ParamManagerScripts/backend/script_groups/S7_DB_Utils/x7_value_updater.py

704 lines
34 KiB
Python

# --- x7_refactored.py ---
import json
import os
import glob
import sys
import copy
import shutil # Para copiar archivos
from typing import Dict, List, Tuple, Any, Optional
# Importar para el path
script_root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
)
sys.path.append(script_root)
from backend.script_utils import load_configuration
# Importar desde x3
from x3 import S7Parser, find_working_directory, custom_json_serializer, flatten_db_structure, format_address_for_display
from x4 import format_data_type_for_source
# Importar desde x4 para generar archivos
from x4 import generate_s7_source_code_lines, generate_markdown_table
def find_matching_files(working_dir: str) -> List[Tuple[str, str]]:
"""
Busca pares de archivos _data y _format con extensión .db o .awl.
"""
# [Código existente]
data_files_db = glob.glob(os.path.join(working_dir, "*_data.db"))
data_files_awl = glob.glob(os.path.join(working_dir, "*_data.awl"))
all_data_files = data_files_db + data_files_awl
format_files_db = glob.glob(os.path.join(working_dir, "*_format.db"))
format_files_awl = glob.glob(os.path.join(working_dir, "*_format.awl"))
all_format_files = format_files_db + format_files_awl
matched_pairs = []
for data_file in all_data_files:
base_name = os.path.basename(data_file).replace("_data", "").split('.')[0]
format_candidates = [f for f in all_format_files if os.path.basename(f).startswith(f"{base_name}_format")]
if format_candidates:
matched_pairs.append((data_file, format_candidates[0]))
return matched_pairs
# [Otras funciones existentes: parse_files_to_json, compare_structures_by_offset, update_values_recursive, create_updated_json]
def parse_files_to_json(data_file: str, format_file: str, json_dir: str) -> Tuple[Dict, Dict]:
"""
Parsea los archivos _data y _format usando S7Parser y guarda los resultados como JSON.
"""
data_parser = S7Parser()
format_parser = S7Parser()
print(f"Parseando archivo data: {os.path.basename(data_file)}")
data_result = data_parser.parse_file(data_file)
print(f"Parseando archivo format: {os.path.basename(format_file)}")
format_result = format_parser.parse_file(format_file)
data_base = os.path.splitext(os.path.basename(data_file))[0]
format_base = os.path.splitext(os.path.basename(format_file))[0]
data_json_path = os.path.join(json_dir, f"{data_base}.json")
format_json_path = os.path.join(json_dir, f"{format_base}.json")
data_json = json.dumps(data_result, default=custom_json_serializer, indent=2)
format_json = json.dumps(format_result, default=custom_json_serializer, indent=2)
with open(data_json_path, "w", encoding='utf-8') as f:
f.write(data_json)
with open(format_json_path, "w", encoding='utf-8') as f:
f.write(format_json)
print(f"Archivos JSON generados: {os.path.basename(data_json_path)} y {os.path.basename(format_json_path)}")
data_obj = json.loads(data_json)
format_obj = json.loads(format_json)
return data_obj, format_obj
def compare_structures_by_offset(data_vars: List[Dict], format_vars: List[Dict]) -> Tuple[bool, List[str]]:
"""
Compara variables por offset, verificando compatibilidad.
Usa las listas aplanadas de flatten_db_structure.
"""
issues = []
# Crear diccionarios para búsqueda rápida por offset
data_by_offset = {var["byte_offset"]: var for var in data_vars}
format_by_offset = {var["byte_offset"]: var for var in format_vars}
# Recopilar todos los offsets únicos de ambos conjuntos
all_offsets = sorted(set(list(data_by_offset.keys()) + list(format_by_offset.keys())))
# Verificar que todos los offsets existan en ambos conjuntos
for offset in all_offsets:
if offset not in data_by_offset:
issues.append(f"Offset {offset} existe en _format pero no en _data")
continue
if offset not in format_by_offset:
issues.append(f"Offset {offset} existe en _data pero no en _format")
continue
# Obtener las variables para comparar
data_var = data_by_offset[offset]
format_var = format_by_offset[offset]
# Verificar coincidencia de tipos
data_type = data_var["data_type"].upper()
format_type = format_var["data_type"].upper()
if data_type != format_type:
issues.append(f"Tipo de dato diferente en offset {offset}: {data_type} ({data_var['full_path']}) vs {format_type} ({format_var['full_path']})")
# Verificar tamaño
data_size = data_var["size_in_bytes"]
format_size = format_var["size_in_bytes"]
if data_size != format_size:
issues.append(f"Tamaño diferente en offset {offset}: {data_size} bytes ({data_var['full_path']}) vs {format_size} bytes ({format_var['full_path']})")
# Verificar tamaño en bits para BOOLs
data_bit_size = data_var.get("bit_size", 0)
format_bit_size = format_var.get("bit_size", 0)
if data_bit_size != format_bit_size:
issues.append(f"Tamaño en bits diferente en offset {offset}: {data_bit_size} ({data_var['full_path']}) vs {format_bit_size} ({format_var['full_path']})")
return len(issues) == 0, issues
def process_updated_json(updated_json: Dict, updated_json_path: str, working_dir: str, documentation_dir: str, original_format_file: str):
"""
Genera los archivos markdown y S7 a partir del JSON actualizado, y copia el archivo S7
al directorio de trabajo con la extensión correcta.
"""
# Obtener nombre base y extensión original
format_file_name = os.path.basename(original_format_file)
base_name = format_file_name.replace("_format", "_updated").split('.')[0]
original_extension = os.path.splitext(format_file_name)[1] # .db o .awl
# Generar archivo markdown para documentación
for db in updated_json.get("dbs", []):
md_output_filename = os.path.join(documentation_dir, f"{base_name}.md")
try:
md_lines = []
md_lines.append(f"# Documentación S7 para {base_name}")
md_lines.append(f"_Fuente JSON: {os.path.basename(updated_json_path)}_")
md_lines.append("")
# Generar tabla markdown usando generate_markdown_table importado de x4
db_md_lines = generate_markdown_table(db)
md_lines.extend(db_md_lines)
with open(md_output_filename, 'w', encoding='utf-8') as f:
for line in md_lines:
f.write(line + "\n")
print(f"Archivo Markdown generado: {md_output_filename}")
except Exception as e:
print(f"Error al generar Markdown para {base_name}: {e}")
# Generar archivo de código fuente S7
s7_txt_filename = os.path.join(documentation_dir, f"{base_name}.txt")
try:
s7_lines = generate_s7_source_code_lines(updated_json)
with open(s7_txt_filename, 'w', encoding='utf-8') as f:
for line in s7_lines:
f.write(line + "\n")
print(f"Archivo S7 generado: {s7_txt_filename}")
# Copiar al directorio de trabajo con la extensión original
s7_output_filename = os.path.join(working_dir, f"{base_name}{original_extension}")
shutil.copy2(s7_txt_filename, s7_output_filename)
print(f"Archivo S7 copiado a: {s7_output_filename}")
except Exception as e:
print(f"Error al generar archivo S7 para {base_name}: {e}")
def create_updated_json(data_json: Dict, format_json: Dict) -> Dict:
"""
Creates an updated JSON based on the structure of _format with values from _data.
Uses offset as the primary key for finding corresponding variables.
Reports errors if a corresponding offset is not found.
"""
# Deep copy of format_json to avoid modifying the original
updated_json = copy.deepcopy(format_json)
# Process each DB
for db_idx, format_db in enumerate(format_json.get("dbs", [])):
# Find corresponding DB in data_json
data_db = next((db for db in data_json.get("dbs", []) if db["name"] == format_db["name"]), None)
if not data_db:
print(f"Error: DB '{format_db['name']}' not found in data_json")
continue # No corresponding DB in data_json
# Flatten variables from both DBs
flat_data_vars = flatten_db_structure(data_db)
flat_format_vars = flatten_db_structure(format_db)
# Create offset to variable map for data - ONLY include usable variables (SIMPLE_VAR and ARRAY_ELEMENT)
# This is the key fix: filter by element_type to avoid matching STRUCT and other non-value types
data_by_offset = {
var["byte_offset"]: var for var in flat_data_vars
if var.get("element_type") in ["SIMPLE_VAR", "ARRAY_ELEMENT"]
}
# For each variable in format, find its corresponding in data by offset
for format_var in flat_format_vars:
# Only process variables and array elements, not structures or UDT instances
if format_var.get("element_type") not in ["SIMPLE_VAR", "ARRAY_ELEMENT"]:
continue
offset = format_var["byte_offset"]
path = format_var["full_path"]
# Find the corresponding variable in data_json by offset
if offset in data_by_offset:
data_var = data_by_offset[offset]
# Even though we've filtered the data variables, double-check element types
format_element_type = format_var.get("element_type")
data_element_type = data_var.get("element_type")
# Only copy values if element types are compatible
if format_element_type == data_element_type or (
format_element_type in ["SIMPLE_VAR", "ARRAY_ELEMENT"] and
data_element_type in ["SIMPLE_VAR", "ARRAY_ELEMENT"]
):
# Find the original variable in the hierarchical structure
path_parts = format_var["full_path"].split('.')
current_node = updated_json["dbs"][db_idx]
# Variable to track if the path was found
path_found = True
# Navigate the hierarchy to find the parent node
for i in range(len(path_parts) - 1):
if "members" in current_node:
# Find the corresponding member
member_name = path_parts[i]
matching_members = [m for m in current_node["members"] if m["name"] == member_name]
if matching_members:
current_node = matching_members[0]
else:
print(f"Error: Member '{member_name}' not found in path '{path}'")
path_found = False
break # Path not found
elif "children" in current_node:
# Find the corresponding child
child_name = path_parts[i]
matching_children = [c for c in current_node["children"] if c["name"] == child_name]
if matching_children:
current_node = matching_children[0]
else:
print(f"Error: Child '{child_name}' not found in path '{path}'")
path_found = False
break # Path not found
else:
print(f"Error: Cannot navigate further in path '{path}', current node has no members or children")
path_found = False
break # Cannot navigate further
# If parent node found, update the child
if path_found and ("members" in current_node or "children" in current_node):
target_list = current_node.get("members", current_node.get("children", []))
target_name = path_parts[-1]
# If it's an array element, extract the base name and index
if '[' in target_name and ']' in target_name:
base_name = target_name.split('[')[0]
index_str = target_name[target_name.find('[')+1:target_name.find(']')]
# Find the base array
array_var = next((var for var in target_list if var["name"] == base_name), None)
if array_var:
# Ensure current_element_values exists
if "current_element_values" not in array_var:
array_var["current_element_values"] = {}
# Copy the array element value
if "current_value" in data_var:
array_var["current_element_values"][index_str] = {
"value": data_var["current_value"],
"offset": data_var["byte_offset"]
}
else:
# Find the variable to update
target_var_found = False
for target_var in target_list:
if target_var["name"] == target_name:
target_var_found = True
# Clean and copy initial_value if exists
if "initial_value" in target_var:
del target_var["initial_value"]
if "initial_value" in data_var and data_var["initial_value"] is not None:
target_var["initial_value"] = data_var["initial_value"]
# Clean and copy current_value if exists
if "current_value" in target_var:
del target_var["current_value"]
if "current_value" in data_var and data_var["current_value"] is not None:
target_var["current_value"] = data_var["current_value"]
# Clean and copy current_element_values if exists
if "current_element_values" in target_var:
del target_var["current_element_values"]
if "current_element_values" in data_var and data_var["current_element_values"]:
target_var["current_element_values"] = copy.deepcopy(data_var["current_element_values"])
break
if not target_var_found and not ('[' in target_name and ']' in target_name):
print(f"Error: Variable '{target_name}' not found in path '{path}'")
else:
print(f"Warning: Element types don't match at offset {offset} for '{path}': {format_element_type} vs {data_element_type}")
else:
# Offset not found in data_json, report error
print(f"Error: Offset {offset} (for '{path}') not found in source data (_data)")
# Clear values if it's not an array element
if '[' not in path or ']' not in path:
# Find the original variable in the hierarchical structure
path_parts = path.split('.')
current_node = updated_json["dbs"][db_idx]
# Navigate to the parent node to clean values
path_found = True
for i in range(len(path_parts) - 1):
if "members" in current_node:
member_name = path_parts[i]
matching_members = [m for m in current_node["members"] if m["name"] == member_name]
if matching_members:
current_node = matching_members[0]
else:
path_found = False
break
elif "children" in current_node:
child_name = path_parts[i]
matching_children = [c for c in current_node["children"] if c["name"] == child_name]
if matching_children:
current_node = matching_children[0]
else:
path_found = False
break
else:
path_found = False
break
if path_found and ("members" in current_node or "children" in current_node):
target_list = current_node.get("members", current_node.get("children", []))
target_name = path_parts[-1]
for target_var in target_list:
if target_var["name"] == target_name:
# Remove initial and current values
if "initial_value" in target_var:
del target_var["initial_value"]
if "current_value" in target_var:
del target_var["current_value"]
if "current_element_values" in target_var:
del target_var["current_element_values"]
break
return updated_json
def generate_comparison_excel(format_json: Dict, data_json: Dict, updated_json: Dict, excel_filename: str):
"""
Generates a comprehensive Excel file comparing values between format, data and updated JSONs.
Uses flatten_db_structure and matches by offset, leveraging element_type for better filtering.
Args:
format_json: JSON with the structure and names from format
data_json: JSON with the source data
updated_json: JSON with the updated data
excel_filename: Path to the Excel file to generate
"""
import openpyxl
from openpyxl.utils import get_column_letter
from openpyxl.styles import PatternFill, Font, Alignment, Border, Side
# Create a new Excel workbook
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Value_Comparison"
# Define styles
diff_fill = PatternFill(start_color="FFFF00", end_color="FFFF00", fill_type="solid")
type_mismatch_fill = PatternFill(start_color="FF9999", end_color="FF9999", fill_type="solid") # Light red
header_font = Font(bold=True)
header_fill = PatternFill(start_color="DDDDDD", end_color="DDDDDD", fill_type="solid")
thin_border = Border(left=Side(style='thin'), right=Side(style='thin'),
top=Side(style='thin'), bottom=Side(style='thin'))
# Set up headers
headers = ["Address", "Name", "Type", "Element Type",
"Format Initial", "Data Initial", "Updated Initial",
"Format Current", "Data Current", "Updated Current",
"Type Match", "Value Differences"]
for col_num, header in enumerate(headers, 1):
cell = sheet.cell(row=1, column=col_num, value=header)
cell.font = header_font
cell.fill = header_fill
cell.border = thin_border
cell.alignment = Alignment(horizontal='center')
# Freeze top row
sheet.freeze_panes = "A2"
current_row = 2
# Process each DB
for db_idx, format_db in enumerate(format_json.get("dbs", [])):
db_name = format_db["name"]
data_db = next((db for db in data_json.get("dbs", []) if db["name"] == db_name), None)
updated_db = next((db for db in updated_json.get("dbs", []) if db["name"] == db_name), None)
if not data_db or not updated_db:
print(f"Error: DB '{db_name}' not found in one of the JSON files")
continue
# Add DB name as section header with merged cells
sheet.merge_cells(f'A{current_row}:L{current_row}')
header_cell = sheet.cell(row=current_row, column=1, value=f"DB: {db_name}")
header_cell.font = Font(bold=True, size=12)
header_cell.fill = PatternFill(start_color="CCCCFF", end_color="CCCCFF", fill_type="solid") # Light blue
header_cell.alignment = Alignment(horizontal='center')
current_row += 1
# Get flattened variables from all sources
flat_format_vars = flatten_db_structure(format_db)
flat_data_vars = flatten_db_structure(data_db)
flat_updated_vars = flatten_db_structure(updated_db)
# Create maps by offset for quick lookup
data_by_offset = {var["byte_offset"]: var for var in flat_data_vars}
updated_by_offset = {var["byte_offset"]: var for var in flat_updated_vars}
# Process each variable from format_json
for format_var in flat_format_vars:
# Skip certain types based on element_type
element_type = format_var.get("element_type", "UNKNOWN")
# Skip STRUCT types with no values, but include ARRAY and UDT_INSTANCE types
if element_type == "STRUCT" and not format_var.get("children"):
continue
offset = format_var["byte_offset"]
path = format_var["full_path"]
data_type = format_data_type_for_source(format_var)
address = format_var.get("address_display", format_address_for_display(offset, format_var.get("bit_size", 0)))
# Find corresponding variables by offset
data_var = data_by_offset.get(offset)
updated_var = updated_by_offset.get(offset)
# Compare element types
data_element_type = data_var.get("element_type", "MISSING") if data_var else "MISSING"
updated_element_type = updated_var.get("element_type", "MISSING") if updated_var else "MISSING"
# Determine type compatibility
type_match = "Yes"
if data_var and element_type != data_element_type:
# Check for compatible types
if (element_type in ["SIMPLE_VAR", "ARRAY_ELEMENT"] and
data_element_type in ["SIMPLE_VAR", "ARRAY_ELEMENT"]):
type_match = "Compatible"
else:
type_match = "No"
elif not data_var:
type_match = "Missing"
# Get values (with empty string defaults)
format_initial = str(format_var.get("initial_value", ""))
data_initial = str(data_var.get("initial_value", "")) if data_var else ""
updated_initial = str(updated_var.get("initial_value", "")) if updated_var else ""
format_current = str(format_var.get("current_value", ""))
data_current = str(data_var.get("current_value", "")) if data_var else ""
updated_current = str(updated_var.get("current_value", "")) if updated_var else ""
# Check for differences
has_initial_diff = (format_initial != data_initial or
format_initial != updated_initial or
data_initial != updated_initial)
has_current_diff = (format_current != data_current or
format_current != updated_current or
data_current != updated_current)
# Create detailed difference description
diff_desc = []
if has_initial_diff:
diff_desc.append("Initial values differ")
if has_current_diff:
diff_desc.append("Current values differ")
if not diff_desc:
diff_desc.append("None")
# Write data
sheet.cell(row=current_row, column=1, value=address)
sheet.cell(row=current_row, column=2, value=path)
sheet.cell(row=current_row, column=3, value=data_type)
sheet.cell(row=current_row, column=4, value=element_type)
sheet.cell(row=current_row, column=5, value=format_initial)
sheet.cell(row=current_row, column=6, value=data_initial)
sheet.cell(row=current_row, column=7, value=updated_initial)
sheet.cell(row=current_row, column=8, value=format_current)
sheet.cell(row=current_row, column=9, value=data_current)
sheet.cell(row=current_row, column=10, value=updated_current)
sheet.cell(row=current_row, column=11, value=type_match)
sheet.cell(row=current_row, column=12, value=", ".join(diff_desc))
# Add borders to all cells
for col in range(1, 13):
sheet.cell(row=current_row, column=col).border = thin_border
# Highlight differences
if has_initial_diff:
for col in range(5, 8):
sheet.cell(row=current_row, column=col).fill = diff_fill
if has_current_diff:
for col in range(8, 11):
sheet.cell(row=current_row, column=col).fill = diff_fill
# Highlight type mismatches
if type_match == "No" or type_match == "Missing":
sheet.cell(row=current_row, column=11).fill = type_mismatch_fill
current_row += 1
# Add filter to headers
sheet.auto_filter.ref = f"A1:L{current_row-1}"
# Auto-adjust column widths
for col_idx, column_cells in enumerate(sheet.columns, 1):
max_length = 0
column = get_column_letter(col_idx)
for cell in column_cells:
try:
if len(str(cell.value)) > max_length:
max_length = len(str(cell.value))
except:
pass
adjusted_width = min(max_length + 2, 100) # Limit maximum width
sheet.column_dimensions[column].width = adjusted_width
# Add a summary sheet
summary_sheet = workbook.create_sheet(title="Summary")
summary_sheet.column_dimensions['A'].width = 30
summary_sheet.column_dimensions['B'].width = 15
summary_sheet.column_dimensions['C'].width = 50
# Add header to summary
summary_headers = ["Database", "Item Count", "Notes"]
for col_num, header in enumerate(summary_headers, 1):
cell = summary_sheet.cell(row=1, column=col_num, value=header)
cell.font = header_font
cell.fill = header_fill
# Add summary data
summary_row = 2
for db_idx, format_db in enumerate(format_json.get("dbs", [])):
db_name = format_db["name"]
data_db = next((db for db in data_json.get("dbs", []) if db["name"] == db_name), None)
updated_db = next((db for db in updated_json.get("dbs", []) if db["name"] == db_name), None)
if not data_db or not updated_db:
continue
flat_format_vars = flatten_db_structure(format_db)
flat_data_vars = flatten_db_structure(data_db)
# Count by element type
format_type_counts = {}
for var in flat_format_vars:
element_type = var.get("element_type", "UNKNOWN")
format_type_counts[element_type] = format_type_counts.get(element_type, 0) + 1
# Count value differences
data_by_offset = {var["byte_offset"]: var for var in flat_data_vars}
diff_count = 0
type_mismatch_count = 0
for format_var in flat_format_vars:
offset = format_var["byte_offset"]
data_var = data_by_offset.get(offset)
if data_var:
# Check for type mismatch
if format_var.get("element_type") != data_var.get("element_type"):
type_mismatch_count += 1
# Check for value differences
format_initial = str(format_var.get("initial_value", ""))
data_initial = str(data_var.get("initial_value", ""))
format_current = str(format_var.get("current_value", ""))
data_current = str(data_var.get("current_value", ""))
if format_initial != data_initial or format_current != data_current:
diff_count += 1
# Write to summary
summary_sheet.cell(row=summary_row, column=1, value=db_name)
summary_sheet.cell(row=summary_row, column=2, value=len(flat_format_vars))
notes = []
for element_type, count in format_type_counts.items():
notes.append(f"{element_type}: {count}")
notes.append(f"Value differences: {diff_count}")
notes.append(f"Type mismatches: {type_mismatch_count}")
summary_sheet.cell(row=summary_row, column=3, value=", ".join(notes))
summary_row += 1
try:
workbook.save(excel_filename)
print(f"Comparison Excel file generated: {excel_filename}")
except Exception as e:
print(f"Error writing Excel file {excel_filename}: {e}")
def main():
working_dir = find_working_directory()
print(f"Using working directory: {working_dir}")
output_json_dir = os.path.join(working_dir, "json")
documentation_dir = os.path.join(working_dir, "documentation")
os.makedirs(output_json_dir, exist_ok=True)
os.makedirs(documentation_dir, exist_ok=True)
print(f"Los archivos JSON se guardarán en: {output_json_dir}")
print(f"Los archivos de documentación se guardarán en: {documentation_dir}")
matched_pairs = find_matching_files(working_dir)
if not matched_pairs:
print("No se encontraron pares de archivos _data y _format para procesar.")
return
print(f"Se encontraron {len(matched_pairs)} pares de archivos para procesar.")
for data_file, format_file in matched_pairs:
print(f"\n--- Procesando par de archivos ---")
print(f"Data file: {os.path.basename(data_file)}")
print(f"Format file: {os.path.basename(format_file)}")
# Parsear archivos a JSON
data_json, format_json = parse_files_to_json(data_file, format_file, output_json_dir)
# Verificar compatibilidad usando listas aplanadas
all_compatible = True
for db_idx, format_db in enumerate(format_json.get("dbs", [])):
# Buscar el DB correspondiente en data_json
data_db = next((db for db in data_json.get("dbs", []) if db["name"] == format_db["name"]), None)
if not data_db:
print(f"Error: No se encontró DB '{format_db['name']}' en el archivo data")
all_compatible = False
continue
# Aplanar variables de ambos DBs
flat_data_vars = flatten_db_structure(data_db)
flat_format_vars = flatten_db_structure(format_db)
print(f"Comparando estructuras para DB '{format_db['name']}': {len(flat_data_vars)} variables en _data, {len(flat_format_vars)} variables en _format")
compatible, issues = compare_structures_by_offset(flat_data_vars, flat_format_vars)
if not compatible:
all_compatible = False
print(f"\nSe encontraron problemas de compatibilidad en DB '{format_db['name']}':")
for issue in issues:
print(f" - {issue}")
print(f"Abortando el proceso para este DB.")
if all_compatible:
print("\nLos archivos son compatibles. Creando el archivo _updated...")
# Crear JSON actualizado
updated_json = create_updated_json(data_json, format_json)
# Guardar la versión actualizada
base_name = os.path.basename(format_file).replace("_format", "").split('.')[0]
updated_json_path = os.path.join(output_json_dir, f"{base_name}_updated.json")
with open(updated_json_path, "w", encoding='utf-8') as f:
json.dump(updated_json, f, default=custom_json_serializer, indent=2)
print(f"Archivo _updated generado: {updated_json_path}")
# Generar archivo de comparación Excel
comparison_excel_path = os.path.join(documentation_dir, f"{base_name}_comparison.xlsx")
generate_comparison_excel(format_json, data_json, updated_json, comparison_excel_path)
# Procesar el JSON actualizado para generar archivos Markdown y S7
process_updated_json(updated_json, updated_json_path, working_dir, documentation_dir, format_file)
print("\n--- Proceso completado ---")
if __name__ == "__main__":
main()