651 lines
33 KiB
Python
651 lines
33 KiB
Python
# --- x7_refactored.py ---
|
|
import json
|
|
import os
|
|
import glob
|
|
import sys
|
|
import copy
|
|
import shutil # Para copiar archivos
|
|
from typing import Dict, List, Tuple, Any, Optional
|
|
|
|
# Importar para el path
|
|
script_root = os.path.dirname(
|
|
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
|
)
|
|
sys.path.append(script_root)
|
|
from backend.script_utils import load_configuration
|
|
|
|
# Importar desde x3
|
|
from x3 import S7Parser, find_working_directory, custom_json_serializer, flatten_db_structure, format_address_for_display
|
|
from x4 import format_data_type_for_source
|
|
|
|
# Importar desde x4 para generar archivos
|
|
from x4 import generate_s7_source_code_lines, generate_markdown_table
|
|
|
|
def find_matching_files(working_dir: str) -> List[Tuple[str, str]]:
|
|
"""
|
|
Busca pares de archivos _data y _format con extensión .db o .awl.
|
|
"""
|
|
# [Código existente]
|
|
data_files_db = glob.glob(os.path.join(working_dir, "*_data.db"))
|
|
data_files_awl = glob.glob(os.path.join(working_dir, "*_data.awl"))
|
|
all_data_files = data_files_db + data_files_awl
|
|
|
|
format_files_db = glob.glob(os.path.join(working_dir, "*_format.db"))
|
|
format_files_awl = glob.glob(os.path.join(working_dir, "*_format.awl"))
|
|
all_format_files = format_files_db + format_files_awl
|
|
|
|
matched_pairs = []
|
|
for data_file in all_data_files:
|
|
base_name = os.path.basename(data_file).replace("_data", "").split('.')[0]
|
|
format_candidates = [f for f in all_format_files if os.path.basename(f).startswith(f"{base_name}_format")]
|
|
if format_candidates:
|
|
matched_pairs.append((data_file, format_candidates[0]))
|
|
|
|
return matched_pairs
|
|
|
|
# [Otras funciones existentes: parse_files_to_json, compare_structures_by_offset, update_values_recursive, create_updated_json]
|
|
|
|
def parse_files_to_json(data_file: str, format_file: str, json_dir: str) -> Tuple[Dict, Dict]:
|
|
"""
|
|
Parsea los archivos _data y _format usando S7Parser y guarda los resultados como JSON.
|
|
"""
|
|
data_parser = S7Parser()
|
|
format_parser = S7Parser()
|
|
|
|
print(f"Parseando archivo data: {os.path.basename(data_file)}")
|
|
data_result = data_parser.parse_file(data_file)
|
|
|
|
print(f"Parseando archivo format: {os.path.basename(format_file)}")
|
|
format_result = format_parser.parse_file(format_file)
|
|
|
|
data_base = os.path.splitext(os.path.basename(data_file))[0]
|
|
format_base = os.path.splitext(os.path.basename(format_file))[0]
|
|
|
|
data_json_path = os.path.join(json_dir, f"{data_base}.json")
|
|
format_json_path = os.path.join(json_dir, f"{format_base}.json")
|
|
|
|
data_json = json.dumps(data_result, default=custom_json_serializer, indent=2)
|
|
format_json = json.dumps(format_result, default=custom_json_serializer, indent=2)
|
|
|
|
with open(data_json_path, "w", encoding='utf-8') as f:
|
|
f.write(data_json)
|
|
|
|
with open(format_json_path, "w", encoding='utf-8') as f:
|
|
f.write(format_json)
|
|
|
|
print(f"Archivos JSON generados: {os.path.basename(data_json_path)} y {os.path.basename(format_json_path)}")
|
|
|
|
data_obj = json.loads(data_json)
|
|
format_obj = json.loads(format_json)
|
|
|
|
return data_obj, format_obj
|
|
|
|
def compare_structures_by_offset(data_vars: List[Dict], format_vars: List[Dict]) -> Tuple[bool, List[str]]:
|
|
"""
|
|
Compara variables por offset, verificando compatibilidad.
|
|
Usa las listas aplanadas de flatten_db_structure.
|
|
"""
|
|
issues = []
|
|
|
|
# Crear diccionarios para búsqueda rápida por offset
|
|
data_by_offset = {var["byte_offset"]: var for var in data_vars}
|
|
format_by_offset = {var["byte_offset"]: var for var in format_vars}
|
|
|
|
# Recopilar todos los offsets únicos de ambos conjuntos
|
|
all_offsets = sorted(set(list(data_by_offset.keys()) + list(format_by_offset.keys())))
|
|
|
|
# Verificar que todos los offsets existan en ambos conjuntos
|
|
for offset in all_offsets:
|
|
if offset not in data_by_offset:
|
|
issues.append(f"Offset {offset} existe en _format pero no en _data")
|
|
continue
|
|
|
|
if offset not in format_by_offset:
|
|
issues.append(f"Offset {offset} existe en _data pero no en _format")
|
|
continue
|
|
|
|
# Obtener las variables para comparar
|
|
data_var = data_by_offset[offset]
|
|
format_var = format_by_offset[offset]
|
|
|
|
# Verificar coincidencia de tipos
|
|
data_type = data_var["data_type"].upper()
|
|
format_type = format_var["data_type"].upper()
|
|
|
|
if data_type != format_type:
|
|
issues.append(f"Tipo de dato diferente en offset {offset}: {data_type} ({data_var['full_path']}) vs {format_type} ({format_var['full_path']})")
|
|
|
|
# Verificar tamaño
|
|
data_size = data_var["size_in_bytes"]
|
|
format_size = format_var["size_in_bytes"]
|
|
|
|
if data_size != format_size:
|
|
issues.append(f"Tamaño diferente en offset {offset}: {data_size} bytes ({data_var['full_path']}) vs {format_size} bytes ({format_var['full_path']})")
|
|
|
|
# Verificar tamaño en bits para BOOLs
|
|
data_bit_size = data_var.get("bit_size", 0)
|
|
format_bit_size = format_var.get("bit_size", 0)
|
|
|
|
if data_bit_size != format_bit_size:
|
|
issues.append(f"Tamaño en bits diferente en offset {offset}: {data_bit_size} ({data_var['full_path']}) vs {format_bit_size} ({format_var['full_path']})")
|
|
|
|
return len(issues) == 0, issues
|
|
|
|
|
|
def create_updated_json(data_json: Dict, format_json: Dict) -> Dict:
|
|
"""
|
|
Crea JSON actualizado basado en la estructura de _format con valores de _data.
|
|
Utiliza offset como clave principal para encontrar variables correspondientes.
|
|
Reporta errores si no se encuentra un offset correspondiente.
|
|
"""
|
|
# Copia profunda de format_json para no modificar el original
|
|
updated_json = copy.deepcopy(format_json)
|
|
|
|
# Procesar cada DB
|
|
for db_idx, format_db in enumerate(format_json.get("dbs", [])):
|
|
# Buscar el DB correspondiente en data_json
|
|
data_db = next((db for db in data_json.get("dbs", []) if db["name"] == format_db["name"]), None)
|
|
if not data_db:
|
|
print(f"Error: No se encontró DB '{format_db['name']}' en data_json")
|
|
continue # No hay DB correspondiente en data_json
|
|
|
|
# Aplanar variables de ambos DBs
|
|
flat_data_vars = flatten_db_structure(data_db)
|
|
flat_format_vars = flatten_db_structure(format_db)
|
|
|
|
# Crear mapa de offset a variable para data
|
|
data_by_offset = {var["byte_offset"]: var for var in flat_data_vars}
|
|
|
|
# Para cada variable en format, buscar su correspondiente en data por offset
|
|
for format_var in flat_format_vars:
|
|
offset = format_var["byte_offset"]
|
|
path = format_var["full_path"]
|
|
|
|
# Buscar la variable correspondiente en data_json por offset
|
|
if offset in data_by_offset:
|
|
data_var = data_by_offset[offset]
|
|
|
|
# Encontrar la variable original en la estructura jerárquica
|
|
path_parts = format_var["full_path"].split('.')
|
|
current_node = updated_json["dbs"][db_idx]
|
|
|
|
# Variable para rastrear si se encontró la ruta
|
|
path_found = True
|
|
|
|
# Navegar la jerarquía hasta encontrar el nodo padre
|
|
for i in range(len(path_parts) - 1):
|
|
if "members" in current_node:
|
|
# Buscar el miembro correspondiente
|
|
member_name = path_parts[i]
|
|
matching_members = [m for m in current_node["members"] if m["name"] == member_name]
|
|
if matching_members:
|
|
current_node = matching_members[0]
|
|
else:
|
|
print(f"Error: No se encontró el miembro '{member_name}' en la ruta '{path}'")
|
|
path_found = False
|
|
break # No se encontró la ruta
|
|
elif "children" in current_node:
|
|
# Buscar el hijo correspondiente
|
|
child_name = path_parts[i]
|
|
matching_children = [c for c in current_node["children"] if c["name"] == child_name]
|
|
if matching_children:
|
|
current_node = matching_children[0]
|
|
else:
|
|
print(f"Error: No se encontró el hijo '{child_name}' en la ruta '{path}'")
|
|
path_found = False
|
|
break # No se encontró la ruta
|
|
else:
|
|
print(f"Error: No se puede navegar más en la ruta '{path}', nodo actual no tiene members ni children")
|
|
path_found = False
|
|
break # No se puede navegar más
|
|
|
|
# Si encontramos el nodo padre, actualizar el hijo
|
|
if path_found and ("members" in current_node or "children" in current_node):
|
|
target_list = current_node.get("members", current_node.get("children", []))
|
|
target_name = path_parts[-1]
|
|
|
|
# Si es un elemento de array, extraer el nombre base y el índice
|
|
if '[' in target_name and ']' in target_name:
|
|
base_name = target_name.split('[')[0]
|
|
index_str = target_name[target_name.find('[')+1:target_name.find(']')]
|
|
|
|
# Buscar el array base
|
|
array_var = next((var for var in target_list if var["name"] == base_name), None)
|
|
if array_var:
|
|
# Asegurarse que existe current_element_values
|
|
if "current_element_values" not in array_var:
|
|
array_var["current_element_values"] = {}
|
|
|
|
# Copiar el valor del elemento del array
|
|
if "current_value" in data_var:
|
|
array_var["current_element_values"][index_str] = {
|
|
"value": data_var["current_value"],
|
|
"offset": data_var["byte_offset"]
|
|
}
|
|
else:
|
|
# Buscar la variable a actualizar
|
|
target_var_found = False
|
|
for target_var in target_list:
|
|
if target_var["name"] == target_name:
|
|
target_var_found = True
|
|
|
|
# Limpiar y copiar initial_value si existe
|
|
if "initial_value" in target_var:
|
|
del target_var["initial_value"]
|
|
if "initial_value" in data_var and data_var["initial_value"] is not None:
|
|
target_var["initial_value"] = data_var["initial_value"]
|
|
|
|
# Limpiar y copiar current_value si existe
|
|
if "current_value" in target_var:
|
|
del target_var["current_value"]
|
|
if "current_value" in data_var and data_var["current_value"] is not None:
|
|
target_var["current_value"] = data_var["current_value"]
|
|
|
|
# Limpiar y copiar current_element_values si existe
|
|
if "current_element_values" in target_var:
|
|
del target_var["current_element_values"]
|
|
if "current_element_values" in data_var and data_var["current_element_values"]:
|
|
target_var["current_element_values"] = copy.deepcopy(data_var["current_element_values"])
|
|
|
|
break
|
|
|
|
if not target_var_found and not ('[' in target_name and ']' in target_name):
|
|
print(f"Error: No se encontró la variable '{target_name}' en la ruta '{path}'")
|
|
else:
|
|
# El offset no existe en data_json, reportar error
|
|
print(f"Error: Offset {offset} (para '{path}') no encontrado en los datos source (_data)")
|
|
|
|
# Eliminar valores si es una variable que no es elemento de array
|
|
if '[' not in path or ']' not in path:
|
|
# Encontrar la variable original en la estructura jerárquica
|
|
path_parts = path.split('.')
|
|
current_node = updated_json["dbs"][db_idx]
|
|
|
|
# Navegar hasta el nodo padre para limpiar valores
|
|
path_found = True
|
|
for i in range(len(path_parts) - 1):
|
|
if "members" in current_node:
|
|
member_name = path_parts[i]
|
|
matching_members = [m for m in current_node["members"] if m["name"] == member_name]
|
|
if matching_members:
|
|
current_node = matching_members[0]
|
|
else:
|
|
path_found = False
|
|
break
|
|
elif "children" in current_node:
|
|
child_name = path_parts[i]
|
|
matching_children = [c for c in current_node["children"] if c["name"] == child_name]
|
|
if matching_children:
|
|
current_node = matching_children[0]
|
|
else:
|
|
path_found = False
|
|
break
|
|
else:
|
|
path_found = False
|
|
break
|
|
|
|
if path_found and ("members" in current_node or "children" in current_node):
|
|
target_list = current_node.get("members", current_node.get("children", []))
|
|
target_name = path_parts[-1]
|
|
|
|
for target_var in target_list:
|
|
if target_var["name"] == target_name:
|
|
# Eliminar valores iniciales y actuales
|
|
if "initial_value" in target_var:
|
|
del target_var["initial_value"]
|
|
if "current_value" in target_var:
|
|
del target_var["current_value"]
|
|
if "current_element_values" in target_var:
|
|
del target_var["current_element_values"]
|
|
break
|
|
|
|
return updated_json
|
|
|
|
def process_updated_json(updated_json: Dict, updated_json_path: str, working_dir: str, documentation_dir: str, original_format_file: str):
|
|
"""
|
|
Genera los archivos markdown y S7 a partir del JSON actualizado, y copia el archivo S7
|
|
al directorio de trabajo con la extensión correcta.
|
|
"""
|
|
# Obtener nombre base y extensión original
|
|
format_file_name = os.path.basename(original_format_file)
|
|
base_name = format_file_name.replace("_format", "_updated").split('.')[0]
|
|
original_extension = os.path.splitext(format_file_name)[1] # .db o .awl
|
|
|
|
# Generar archivo markdown para documentación
|
|
for db in updated_json.get("dbs", []):
|
|
md_output_filename = os.path.join(documentation_dir, f"{base_name}.md")
|
|
try:
|
|
md_lines = []
|
|
md_lines.append(f"# Documentación S7 para {base_name}")
|
|
md_lines.append(f"_Fuente JSON: {os.path.basename(updated_json_path)}_")
|
|
md_lines.append("")
|
|
|
|
# Generar tabla markdown usando generate_markdown_table importado de x4
|
|
db_md_lines = generate_markdown_table(db)
|
|
md_lines.extend(db_md_lines)
|
|
|
|
with open(md_output_filename, 'w', encoding='utf-8') as f:
|
|
for line in md_lines:
|
|
f.write(line + "\n")
|
|
print(f"Archivo Markdown generado: {md_output_filename}")
|
|
except Exception as e:
|
|
print(f"Error al generar Markdown para {base_name}: {e}")
|
|
|
|
# Generar archivo de código fuente S7
|
|
s7_txt_filename = os.path.join(documentation_dir, f"{base_name}.txt")
|
|
try:
|
|
s7_lines = generate_s7_source_code_lines(updated_json)
|
|
with open(s7_txt_filename, 'w', encoding='utf-8') as f:
|
|
for line in s7_lines:
|
|
f.write(line + "\n")
|
|
print(f"Archivo S7 generado: {s7_txt_filename}")
|
|
|
|
# Copiar al directorio de trabajo con la extensión original
|
|
s7_output_filename = os.path.join(working_dir, f"{base_name}{original_extension}")
|
|
shutil.copy2(s7_txt_filename, s7_output_filename)
|
|
print(f"Archivo S7 copiado a: {s7_output_filename}")
|
|
except Exception as e:
|
|
print(f"Error al generar archivo S7 para {base_name}: {e}")
|
|
|
|
def generate_comparison_excel(format_json: Dict, data_json: Dict, updated_json: Dict, excel_filename: str):
|
|
"""
|
|
Genera un archivo Excel con dos hojas que comparan los valores iniciales y actuales
|
|
entre los archivos format_json, data_json y updated_json.
|
|
Filtra STRUCTs y solo compara variables con valores reales.
|
|
|
|
Args:
|
|
format_json: JSON con la estructura y nombres de formato
|
|
data_json: JSON con los datos source
|
|
updated_json: JSON con los datos actualizados
|
|
excel_filename: Ruta del archivo Excel a generar
|
|
"""
|
|
import openpyxl
|
|
from openpyxl.utils import get_column_letter
|
|
from openpyxl.styles import PatternFill, Font
|
|
|
|
# Crear un nuevo libro de Excel
|
|
workbook = openpyxl.Workbook()
|
|
|
|
# Definir estilos para resaltar diferencias
|
|
diff_fill = PatternFill(start_color="FFFF00", end_color="FFFF00", fill_type="solid") # Amarillo
|
|
header_font = Font(bold=True)
|
|
|
|
# Procesar cada DB
|
|
for db_idx, format_db in enumerate(format_json.get("dbs", [])):
|
|
# Buscar los DBs correspondientes
|
|
db_name = format_db["name"]
|
|
data_db = next((db for db in data_json.get("dbs", []) if db["name"] == db_name), None)
|
|
updated_db = next((db for db in updated_json.get("dbs", []) if db["name"] == db_name), None)
|
|
|
|
if not data_db or not updated_db:
|
|
print(f"Error: No se encontró el DB '{db_name}' en alguno de los archivos JSON")
|
|
continue
|
|
|
|
# Crear hojas para valores iniciales y actuales para este DB
|
|
initial_sheet = workbook.active if db_idx == 0 else workbook.create_sheet()
|
|
initial_sheet.title = f"{db_name}_Initial"[:31] # Limitar longitud del nombre de hoja
|
|
|
|
current_sheet = workbook.create_sheet()
|
|
current_sheet.title = f"{db_name}_Current"[:31]
|
|
|
|
# Aplanar variables de los tres DBs
|
|
flat_format_vars = flatten_db_structure(format_db)
|
|
flat_data_vars = flatten_db_structure(data_db)
|
|
flat_updated_vars = flatten_db_structure(updated_db)
|
|
|
|
# Filtrar STRUCTs - solo trabajamos con variables que tienen valores reales
|
|
flat_format_vars = [var for var in flat_format_vars
|
|
if var["data_type"].upper() != "STRUCT" and not var.get("children")]
|
|
|
|
# Crear mapas de offset a variable para búsqueda rápida
|
|
data_by_offset = {var["byte_offset"]: var for var in flat_data_vars
|
|
if var["data_type"].upper() != "STRUCT" and not var.get("children")}
|
|
updated_by_offset = {var["byte_offset"]: var for var in flat_updated_vars
|
|
if var["data_type"].upper() != "STRUCT" and not var.get("children")}
|
|
|
|
# Configurar encabezados para la hoja de valores iniciales
|
|
headers_initial = ["Address", "Name", "Type", "Format Initial", "Data Initial", "Updated Initial", "Difference"]
|
|
for col_num, header in enumerate(headers_initial, 1):
|
|
cell = initial_sheet.cell(row=1, column=col_num, value=header)
|
|
cell.font = header_font
|
|
|
|
# Configurar encabezados para la hoja de valores actuales
|
|
headers_current = ["Address", "Name", "Type", "Format Current", "Data Current", "Updated Current", "Difference"]
|
|
for col_num, header in enumerate(headers_current, 1):
|
|
cell = current_sheet.cell(row=1, column=col_num, value=header)
|
|
cell.font = header_font
|
|
|
|
# Llenar las hojas con datos
|
|
initial_row = 2
|
|
current_row = 2
|
|
|
|
for format_var in flat_format_vars:
|
|
offset = format_var["byte_offset"]
|
|
path = format_var["full_path"]
|
|
data_type = format_data_type_for_source(format_var)
|
|
address = format_var.get("address_display", format_address_for_display(offset, format_var.get("bit_size", 0)))
|
|
|
|
# Obtener variables correspondientes por offset
|
|
data_var = data_by_offset.get(offset)
|
|
updated_var = updated_by_offset.get(offset)
|
|
|
|
# Procesar valores iniciales (solo si la variable puede tener initial_value)
|
|
format_initial = format_var.get("initial_value", "")
|
|
data_initial = data_var.get("initial_value", "") if data_var else ""
|
|
updated_initial = updated_var.get("initial_value", "") if updated_var else ""
|
|
|
|
# Solo incluir en la hoja de valores iniciales si al menos uno tiene valor inicial
|
|
if format_initial or data_initial or updated_initial:
|
|
# Determinar si hay diferencias en valores iniciales
|
|
has_initial_diff = (format_initial != data_initial or
|
|
format_initial != updated_initial or
|
|
data_initial != updated_initial)
|
|
|
|
# Escribir datos de valores iniciales
|
|
initial_sheet.cell(row=initial_row, column=1, value=address)
|
|
initial_sheet.cell(row=initial_row, column=2, value=path)
|
|
initial_sheet.cell(row=initial_row, column=3, value=data_type)
|
|
initial_sheet.cell(row=initial_row, column=4, value=str(format_initial))
|
|
initial_sheet.cell(row=initial_row, column=5, value=str(data_initial))
|
|
initial_sheet.cell(row=initial_row, column=6, value=str(updated_initial))
|
|
|
|
# Resaltar diferencias en valores iniciales
|
|
if has_initial_diff:
|
|
initial_sheet.cell(row=initial_row, column=7, value="Sí")
|
|
for col in range(4, 7):
|
|
initial_sheet.cell(row=initial_row, column=col).fill = diff_fill
|
|
else:
|
|
initial_sheet.cell(row=initial_row, column=7, value="No")
|
|
|
|
initial_row += 1
|
|
|
|
# Procesar valores actuales
|
|
format_current = format_var.get("current_value", "")
|
|
data_current = data_var.get("current_value", "") if data_var else ""
|
|
updated_current = updated_var.get("current_value", "") if updated_var else ""
|
|
|
|
# Solo incluir en la hoja de valores actuales si al menos uno tiene valor actual
|
|
if format_current or data_current or updated_current:
|
|
# Determinar si hay diferencias en valores actuales
|
|
has_current_diff = (format_current != data_current or
|
|
format_current != updated_current or
|
|
data_current != updated_current)
|
|
|
|
# Escribir datos de valores actuales
|
|
current_sheet.cell(row=current_row, column=1, value=address)
|
|
current_sheet.cell(row=current_row, column=2, value=path)
|
|
current_sheet.cell(row=current_row, column=3, value=data_type)
|
|
current_sheet.cell(row=current_row, column=4, value=str(format_current))
|
|
current_sheet.cell(row=current_row, column=5, value=str(data_current))
|
|
current_sheet.cell(row=current_row, column=6, value=str(updated_current))
|
|
|
|
# Resaltar diferencias en valores actuales
|
|
if has_current_diff:
|
|
current_sheet.cell(row=current_row, column=7, value="Sí")
|
|
for col in range(4, 7):
|
|
current_sheet.cell(row=current_row, column=col).fill = diff_fill
|
|
else:
|
|
current_sheet.cell(row=current_row, column=7, value="No")
|
|
|
|
current_row += 1
|
|
|
|
# Si es un array, procesamos también sus elementos
|
|
if format_var.get("current_element_values") or (data_var and data_var.get("current_element_values")) or (updated_var and updated_var.get("current_element_values")):
|
|
format_elements = format_var.get("current_element_values", {})
|
|
data_elements = data_var.get("current_element_values", {}) if data_var else {}
|
|
updated_elements = updated_var.get("current_element_values", {}) if updated_var else {}
|
|
|
|
# Unir todos los índices disponibles
|
|
all_indices = set(list(format_elements.keys()) +
|
|
list(data_elements.keys()) +
|
|
list(updated_elements.keys()))
|
|
|
|
# Ordenar índices numéricamente
|
|
sorted_indices = sorted(all_indices, key=lambda x: [int(i) for i in x.split(',')]) if all_indices else []
|
|
|
|
for idx in sorted_indices:
|
|
elem_path = f"{path}[{idx}]"
|
|
|
|
# Valores actuales para elementos de array
|
|
format_elem_val = ""
|
|
if idx in format_elements:
|
|
if isinstance(format_elements[idx], dict) and "value" in format_elements[idx]:
|
|
format_elem_val = format_elements[idx]["value"]
|
|
else:
|
|
format_elem_val = format_elements[idx]
|
|
|
|
data_elem_val = ""
|
|
if idx in data_elements:
|
|
if isinstance(data_elements[idx], dict) and "value" in data_elements[idx]:
|
|
data_elem_val = data_elements[idx]["value"]
|
|
else:
|
|
data_elem_val = data_elements[idx]
|
|
|
|
updated_elem_val = ""
|
|
if idx in updated_elements:
|
|
if isinstance(updated_elements[idx], dict) and "value" in updated_elements[idx]:
|
|
updated_elem_val = updated_elements[idx]["value"]
|
|
else:
|
|
updated_elem_val = updated_elements[idx]
|
|
|
|
# Determinar si hay diferencias
|
|
has_elem_diff = (str(format_elem_val) != str(data_elem_val) or
|
|
str(format_elem_val) != str(updated_elem_val) or
|
|
str(data_elem_val) != str(updated_elem_val))
|
|
|
|
# Escribir datos de elementos de array (solo en hoja de valores actuales)
|
|
current_sheet.cell(row=current_row, column=1, value=address)
|
|
current_sheet.cell(row=current_row, column=2, value=elem_path)
|
|
current_sheet.cell(row=current_row, column=3, value=data_type.replace("ARRAY", "").strip())
|
|
current_sheet.cell(row=current_row, column=4, value=str(format_elem_val))
|
|
current_sheet.cell(row=current_row, column=5, value=str(data_elem_val))
|
|
current_sheet.cell(row=current_row, column=6, value=str(updated_elem_val))
|
|
|
|
# Resaltar diferencias
|
|
if has_elem_diff:
|
|
current_sheet.cell(row=current_row, column=7, value="Sí")
|
|
for col in range(4, 7):
|
|
current_sheet.cell(row=current_row, column=col).fill = diff_fill
|
|
else:
|
|
current_sheet.cell(row=current_row, column=7, value="No")
|
|
|
|
current_row += 1
|
|
|
|
# Auto-ajustar anchos de columna
|
|
for sheet in [initial_sheet, current_sheet]:
|
|
for col_idx, column_cells in enumerate(sheet.columns, 1):
|
|
max_length = 0
|
|
column = get_column_letter(col_idx)
|
|
for cell in column_cells:
|
|
try:
|
|
if len(str(cell.value)) > max_length:
|
|
max_length = len(str(cell.value))
|
|
except:
|
|
pass
|
|
adjusted_width = min(max_length + 2, 100) # Limitar ancho máximo
|
|
sheet.column_dimensions[column].width = adjusted_width
|
|
|
|
# Guardar el archivo Excel
|
|
try:
|
|
workbook.save(excel_filename)
|
|
print(f"Archivo de comparación Excel generado: {excel_filename}")
|
|
except Exception as e:
|
|
print(f"Error al escribir el archivo Excel {excel_filename}: {e}")
|
|
|
|
def main():
|
|
working_dir = find_working_directory()
|
|
print(f"Using working directory: {working_dir}")
|
|
|
|
output_json_dir = os.path.join(working_dir, "json")
|
|
documentation_dir = os.path.join(working_dir, "documentation")
|
|
os.makedirs(output_json_dir, exist_ok=True)
|
|
os.makedirs(documentation_dir, exist_ok=True)
|
|
print(f"Los archivos JSON se guardarán en: {output_json_dir}")
|
|
print(f"Los archivos de documentación se guardarán en: {documentation_dir}")
|
|
|
|
matched_pairs = find_matching_files(working_dir)
|
|
|
|
if not matched_pairs:
|
|
print("No se encontraron pares de archivos _data y _format para procesar.")
|
|
return
|
|
|
|
print(f"Se encontraron {len(matched_pairs)} pares de archivos para procesar.")
|
|
|
|
for data_file, format_file in matched_pairs:
|
|
print(f"\n--- Procesando par de archivos ---")
|
|
print(f"Data file: {os.path.basename(data_file)}")
|
|
print(f"Format file: {os.path.basename(format_file)}")
|
|
|
|
# Parsear archivos a JSON
|
|
data_json, format_json = parse_files_to_json(data_file, format_file, output_json_dir)
|
|
|
|
# Verificar compatibilidad usando listas aplanadas
|
|
all_compatible = True
|
|
for db_idx, format_db in enumerate(format_json.get("dbs", [])):
|
|
# Buscar el DB correspondiente en data_json
|
|
data_db = next((db for db in data_json.get("dbs", []) if db["name"] == format_db["name"]), None)
|
|
if not data_db:
|
|
print(f"Error: No se encontró DB '{format_db['name']}' en el archivo data")
|
|
all_compatible = False
|
|
continue
|
|
|
|
# Aplanar variables de ambos DBs
|
|
flat_data_vars = flatten_db_structure(data_db)
|
|
flat_format_vars = flatten_db_structure(format_db)
|
|
|
|
print(f"Comparando estructuras para DB '{format_db['name']}': {len(flat_data_vars)} variables en _data, {len(flat_format_vars)} variables en _format")
|
|
compatible, issues = compare_structures_by_offset(flat_data_vars, flat_format_vars)
|
|
|
|
if not compatible:
|
|
all_compatible = False
|
|
print(f"\nSe encontraron problemas de compatibilidad en DB '{format_db['name']}':")
|
|
for issue in issues:
|
|
print(f" - {issue}")
|
|
print(f"Abortando el proceso para este DB.")
|
|
|
|
if all_compatible:
|
|
print("\nLos archivos son compatibles. Creando el archivo _updated...")
|
|
|
|
# Crear JSON actualizado
|
|
updated_json = create_updated_json(data_json, format_json)
|
|
|
|
# Guardar la versión actualizada
|
|
base_name = os.path.basename(format_file).replace("_format", "").split('.')[0]
|
|
updated_json_path = os.path.join(output_json_dir, f"{base_name}_updated.json")
|
|
|
|
with open(updated_json_path, "w", encoding='utf-8') as f:
|
|
json.dump(updated_json, f, default=custom_json_serializer, indent=2)
|
|
|
|
print(f"Archivo _updated generado: {updated_json_path}")
|
|
|
|
# Generar archivo de comparación Excel
|
|
comparison_excel_path = os.path.join(documentation_dir, f"{base_name}_comparison.xlsx")
|
|
generate_comparison_excel(format_json, data_json, updated_json, comparison_excel_path)
|
|
|
|
# Procesar el JSON actualizado para generar archivos Markdown y S7
|
|
process_updated_json(updated_json, updated_json_path, working_dir, documentation_dir, format_file)
|
|
|
|
print("\n--- Proceso completado ---")
|
|
|
|
if __name__ == "__main__":
|
|
main() |