ParamManagerScripts/backend/script_groups/S7_DB_Utils/x7_value_updater.py

414 lines
17 KiB
Python

import json
import os
import sys
import glob
import copy
import re
from typing import List, Dict, Optional, Tuple, Any
# Assuming x3.py and x4.py are in the same directory or accessible via PYTHONPATH
# Imports from x3.py
from x3 import (
S7Parser,
ParsedData, # Dataclass for top-level structure
# The following dataclasses are defined in x3.py and used by S7Parser.
# We might not need to import them explicitly if we work with dicts from JSON.
# VariableInfo, ArrayDimension, UdtInfo, DbInfo,
custom_json_serializer,
find_working_directory,
)
# Imports from x4.py (or reimplementations if direct import is problematic)
# These functions from x4.py work on dictionary representations of the parsed data.
from x4 import (
format_data_type_for_source,
generate_variable_declaration_for_source,
generate_struct_members_for_source,
generate_begin_block_assignments,
generate_s7_source_code_lines,
)
# --- Helper Functions ---
def find_data_format_files(working_dir: str) -> Tuple[Optional[str], Optional[str]]:
"""Finds _data and _format files in the working directory."""
data_file: Optional[str] = None
format_file: Optional[str] = None
extensions = ["*.db", "*.awl", "*.db.txt", "*.awl.txt"]
all_s7_files = []
for ext_pattern in extensions:
all_s7_files.extend(glob.glob(os.path.join(working_dir, ext_pattern)))
# Prioritize longer extensions first for matching to avoid partial matches like .db when .db.txt exists
all_s7_files.sort(key=len, reverse=True)
for f_path in all_s7_files:
basename = os.path.basename(f_path)
# Check for _data file (and ensure it's not an _updated file from a previous run)
if "_data" in basename and "_updated" not in basename:
# More specific check to avoid matching e.g. "some_other_data_related_file"
# We expect "PREFIX_data.EXT"
name_part, _ = os.path.splitext(
basename
) # For "file.db.txt", this gives "file.db"
if name_part.endswith("_data") or basename.replace(
os.path.splitext(basename)[-1], ""
).endswith(
"_data"
): # Handles single and double extensions
if data_file is None: # Take the first one found (after sorting)
data_file = f_path
# Check for _format file
if "_format" in basename and "_updated" not in basename:
name_part, _ = os.path.splitext(basename)
if name_part.endswith("_format") or basename.replace(
os.path.splitext(basename)[-1], ""
).endswith("_format"):
if format_file is None:
format_file = f_path
if data_file:
print(f"Found _data file: {data_file}")
else:
print("Warning: No _data file found.")
if format_file:
print(f"Found _format file: {format_file}")
else:
print("Warning: No _format file found.")
return data_file, format_file
def parse_s7_to_json_file(s7_filepath: str, json_dir: str) -> Optional[str]:
"""Parses an S7 source file to JSON and saves it."""
parser = S7Parser()
filename = os.path.basename(s7_filepath)
print(f"Parsing S7 file: {filename}...")
try:
parsed_result = parser.parse_file(s7_filepath)
except Exception as e:
print(f"Error parsing {filename}: {e}")
return None
output_filename_base = os.path.splitext(filename)[0]
# Handle double extensions like .db.txt
if ".db" in output_filename_base or ".awl" in output_filename_base:
# A more robust way to get the true base name before multiple extensions
# Example: "file.db.txt" -> "file"
# Example: "file.db" -> "file"
temp_name = filename
known_exts = [
".txt",
".db",
".awl",
] # order might matter if extensions can be part of name
for k_ext in reversed(known_exts): # try removing from right to left
if temp_name.lower().endswith(k_ext):
temp_name = temp_name[: -len(k_ext)]
output_filename_base = temp_name # This is the "true" base
json_output_filename = os.path.join(
json_dir,
f"{output_filename_base}_{os.path.basename(s7_filepath).split('_', 1)[1].split('.')[0]}.json",
) # e.g. base_data.json
print(f"Serializing to JSON: {json_output_filename}")
try:
json_output = json.dumps(
parsed_result, default=custom_json_serializer, indent=2
)
with open(json_output_filename, "w", encoding="utf-8") as f:
f.write(json_output)
print(f"JSON saved: {json_output_filename}")
return json_output_filename
except Exception as e:
print(f"Error during JSON serialization or writing for {filename}: {e}")
return None
def load_json_file(json_filepath: str) -> Optional[Dict[str, Any]]:
"""Loads a JSON file into a Python dictionary."""
try:
with open(json_filepath, "r", encoding="utf-8") as f:
data = json.load(f)
return data
except Exception as e:
print(f"Error loading JSON file {json_filepath}: {e}")
return None
def flatten_db_variables_for_compare(
members_list: List[Dict[str, Any]], parent_path: str = ""
) -> List[Tuple[str, Dict[str, Any]]]:
"""
Flattens DB members for comparison.
Collects all 'leaf' nodes (primitives, arrays of primitives, strings)
and their full paths.
"""
flat_list = []
for var_info in members_list:
var_name_segment = var_info["name"]
current_var_path = (
f"{parent_path}{var_name_segment}" if parent_path else var_name_segment
)
if var_info.get("children"):
flat_list.extend(
flatten_db_variables_for_compare(
var_info["children"], f"{current_var_path}."
)
)
else:
flat_list.append((current_var_path, var_info))
return flat_list
def compare_db_structures(data_db: Dict[str, Any], format_db: Dict[str, Any]) -> bool:
"""
Compares the structure of two DBs (as dicts from JSON).
Returns True if compatible, False otherwise.
"""
db_name = format_db.get("name", "UnknownDB")
print(f"Comparing structure of DB: {db_name}")
flat_data_vars_with_paths = flatten_db_variables_for_compare(
data_db.get("members", [])
)
flat_format_vars_with_paths = flatten_db_variables_for_compare(
format_db.get("members", [])
)
if len(flat_data_vars_with_paths) != len(flat_format_vars_with_paths):
print(f"Error: DB '{db_name}' tiene un número diferente de variables expandidas (hoja).")
print(f" Número de variables en archivo _data: {len(flat_data_vars_with_paths)}")
print(f" Número de variables en archivo _format: {len(flat_format_vars_with_paths)}")
min_len = min(len(flat_data_vars_with_paths), len(flat_format_vars_with_paths))
divergence_found_early = False
# Revisar si hay un tipo de dato o nombre diferente antes del final de la lista más corta
for k in range(min_len):
path_data_k, var_data_k = flat_data_vars_with_paths[k]
path_format_k, var_format_k = flat_format_vars_with_paths[k]
type_str_data_k = format_data_type_for_source(var_data_k)
type_str_format_k = format_data_type_for_source(var_format_k)
# Comparamos tipos. Los nombres pueden diferir si la estructura interna de un UDT/Struct cambió.
# La ruta del _data es aproximada si los nombres de los miembros de structs/UDTs cambiaron.
if type_str_data_k != type_str_format_k:
print(f" Adicionalmente, se encontró una discrepancia de tipo ANTES del final de la lista más corta (índice {k}):")
print(f" _format variable: Path='{path_format_k}', Nombre='{var_format_k['name']}', Tipo='{type_str_format_k}'")
print(f" _data variable: Path='{path_data_k}' (aprox.), Nombre='{var_data_k['name']}', Tipo='{type_str_data_k}'")
divergence_found_early = True
break
if not divergence_found_early:
# Si no hubo discrepancias tempranas, la diferencia es por variables extra al final.
if len(flat_data_vars_with_paths) > len(flat_format_vars_with_paths):
print(f" El archivo _data tiene {len(flat_data_vars_with_paths) - min_len} variable(s) más.")
print(f" Primeras variables extra en _data (path, nombre, tipo) desde el índice {min_len}:")
for j in range(min_len, min(min_len + 5, len(flat_data_vars_with_paths))): # Mostrar hasta 5 extra
path, var = flat_data_vars_with_paths[j]
print(f" - Path: '{path}', Nombre: '{var['name']}', Tipo: '{format_data_type_for_source(var)}'")
else:
print(f" El archivo _format tiene {len(flat_format_vars_with_paths) - min_len} variable(s) más.")
print(f" Primeras variables extra en _format (path, nombre, tipo) desde el índice {min_len}:")
for j in range(min_len, min(min_len + 5, len(flat_format_vars_with_paths))): # Mostrar hasta 5 extra
path, var = flat_format_vars_with_paths[j]
print(f" - Path: '{path}', Nombre: '{var['name']}', Tipo: '{format_data_type_for_source(var)}'")
return False
for i in range(len(flat_format_vars_with_paths)):
path_data, var_data = flat_data_vars_with_paths[i]
path_format, var_format = flat_format_vars_with_paths[i]
type_str_data = format_data_type_for_source(var_data)
type_str_format = format_data_type_for_source(var_format)
if type_str_data != type_str_format:
print(f"Error: Discrepancia de tipo en DB '{db_name}' para la variable en el índice {i} (contando desde 0) de la lista expandida.")
print(f" Comparando:")
print(f" _format variable: Path='{path_format}', Nombre='{var_format['name']}', Tipo Declarado='{type_str_format}'")
print(f" Offset: {var_format.get('byte_offset')}, Tamaño: {var_format.get('size_in_bytes')} bytes")
print(f" _data variable: Path='{path_data}' (aprox.), Nombre='{var_data['name']}', Tipo Declarado='{type_str_data}'")
print(f" Offset: {var_data.get('byte_offset')}, Tamaño: {var_data.get('size_in_bytes')} bytes")
return False
print(f"La estructura del DB '{db_name}' es compatible.")
return True
def update_format_db_members_recursive(
format_members: List[Dict[str, Any]], data_members: List[Dict[str, Any]]
):
"""
Recursively updates 'initial_value', 'current_value', and 'current_element_values'
in format_members using values from data_members.
Assumes structures are compatible and lists have the same length.
"""
for i in range(len(format_members)):
fm_var = format_members[i]
dm_var = data_members[i]
fm_var["initial_value"] = dm_var.get("initial_value")
fm_var["current_value"] = dm_var.get("current_value")
if "current_element_values" in dm_var:
fm_var["current_element_values"] = dm_var["current_element_values"]
elif "current_element_values" in fm_var:
del fm_var["current_element_values"]
if fm_var.get("children") and dm_var.get("children"):
if len(fm_var["children"]) == len(dm_var["children"]):
update_format_db_members_recursive(
fm_var["children"], dm_var["children"]
)
else:
print(
f"Warning: Mismatch in children count for {fm_var['name']} during update. This is unexpected."
)
def get_updated_filename(format_filename_basename: str) -> str:
"""Generates the output filename for the _updated file."""
suffixes_map = {
"_format.db.txt": "_updated.db.txt",
"_format.awl.txt": "_updated.awl.txt",
"_format.db": "_updated.db",
"_format.awl": "_updated.awl",
}
for s_format, s_updated in suffixes_map.items():
if format_filename_basename.lower().endswith(s_format.lower()):
base = format_filename_basename[: -len(s_format)]
return base + s_updated
if "_format" in format_filename_basename:
return format_filename_basename.replace("_format", "_updated")
name, ext = os.path.splitext(format_filename_basename)
return f"{name}_updated{ext}"
# --- Main Script Logic ---
def main():
working_dir = find_working_directory()
print(f"Using working directory: {working_dir}")
data_s7_filepath, format_s7_filepath = find_data_format_files(working_dir)
if not data_s7_filepath or not format_s7_filepath:
print(
"Error: Both _data and _format S7 source files must be present. Aborting."
)
return
json_dir = os.path.join(working_dir, "json")
os.makedirs(json_dir, exist_ok=True)
data_json_filepath = parse_s7_to_json_file(data_s7_filepath, json_dir)
if not data_json_filepath:
print("Failed to parse _data file. Aborting.")
return
data_parsed_dict = load_json_file(data_json_filepath)
if not data_parsed_dict:
print("Failed to load _data JSON. Aborting.")
return
format_json_filepath = parse_s7_to_json_file(format_s7_filepath, json_dir)
if not format_json_filepath:
print("Failed to parse _format file. Aborting.")
return
format_parsed_dict = load_json_file(format_json_filepath)
if not format_parsed_dict:
print("Failed to load _format JSON. Aborting.")
return
data_dbs = data_parsed_dict.get("dbs", [])
format_dbs = format_parsed_dict.get("dbs", [])
if not format_dbs:
print("No Data Blocks found in the _format file. Nothing to update. Aborting.")
return
if len(data_dbs) != len(format_dbs):
print(
f"Error: Mismatch in the number of Data Blocks. "
f"_data file has {len(data_dbs)} DBs, _format file has {len(format_dbs)} DBs. Aborting."
)
return
all_dbs_compatible = True
for i in range(len(format_dbs)):
current_format_db = format_dbs[i]
current_data_db = next(
(db for db in data_dbs if db["name"] == current_format_db["name"]), None
)
if not current_data_db:
print(
f"Error: DB '{current_format_db['name']}' from _format file not found in _data file. Aborting."
)
all_dbs_compatible = False
break
if not compare_db_structures(current_data_db, current_format_db):
all_dbs_compatible = False
break
if not all_dbs_compatible:
print("Comparison failed. Aborting generation of _updated file.")
return
print("\nAll DB structures are compatible. Proceeding to generate _updated file.")
updated_parsed_dict = copy.deepcopy(format_parsed_dict)
updated_parsed_dict["udts"] = format_parsed_dict.get("udts", [])
updated_dbs_list = updated_parsed_dict.get("dbs", [])
for i in range(len(updated_dbs_list)):
updated_db_ref = updated_dbs_list[i]
data_db_original = next(
(db for db in data_dbs if db["name"] == updated_db_ref["name"]), None
)
if not data_db_original:
print(
f"Critical Error: Could not find data DB {updated_db_ref['name']} during update phase. Aborting."
)
return
if "members" in updated_db_ref and "members" in data_db_original:
update_format_db_members_recursive(
updated_db_ref["members"], data_db_original["members"]
)
updated_db_ref["_begin_block_assignments_ordered"] = data_db_original.get(
"_begin_block_assignments_ordered", []
)
updated_db_ref["_initial_values_from_begin_block"] = data_db_original.get(
"_initial_values_from_begin_block", {}
)
s7_output_lines = generate_s7_source_code_lines(updated_parsed_dict)
output_s7_filename_basename = get_updated_filename(
os.path.basename(format_s7_filepath)
)
output_s7_filepath = os.path.join(working_dir, output_s7_filename_basename)
try:
with open(output_s7_filepath, "w", encoding="utf-8") as f:
for line in s7_output_lines:
f.write(line + "\n")
print(f"\nSuccessfully generated _updated S7 file: {output_s7_filepath}")
except Exception as e:
print(f"Error writing _updated S7 file {output_s7_filepath}: {e}")
if __name__ == "__main__":
main()