# --- x6.py --- import json from typing import List, Dict, Any import openpyxl # For Excel export from openpyxl.utils import get_column_letter import sys import os import glob # Para buscar archivos JSON script_root = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(__file__))) ) sys.path.append(script_root) from backend.script_utils import load_configuration def find_working_directory(): configs = load_configuration() working_directory = configs.get("working_directory") if not working_directory: print("No working directory specified in the configuration file.") sys.exit(1) return working_directory # format_data_type_for_source (copied from x4.py as it's needed) def format_data_type_for_source(var_info: Dict[str, Any]) -> str: base_type = var_info.get("udt_source_name") if var_info.get("udt_source_name") else var_info["data_type"] type_str = "" if var_info.get("array_dimensions"): dims_str = ",".join([f"{d['lower_bound']}..{d['upper_bound']}" for d in var_info["array_dimensions"]]) type_str += f"ARRAY [{dims_str}] OF " type_str += base_type if var_info["data_type"].upper() == "STRING" and var_info.get("string_length") is not None: type_str += f"[{var_info['string_length']}]" return type_str def generate_excel_table(db_info: Dict[str, Any], excel_filename: str): """ Generates an Excel file with DB documentation. """ workbook = openpyxl.Workbook() sheet = workbook.active db_name_safe = db_info['name'].replace('"', '').replace(' ', '_').replace('/','_') sheet.title = f"DB_{db_name_safe}"[:31] # Sheet names have a length limit headers = ["Address", "Name", "Type", "Initial Value", "Actual Value", "Comment"] for col_num, header in enumerate(headers, 1): cell = sheet.cell(row=1, column=col_num, value=header) cell.font = openpyxl.styles.Font(bold=True) current_row = 2 processed_expanded_members = set() # To handle expanded UDT members correctly def flatten_members_for_excel(members: List[Dict[str, Any]], prefix: str = "", base_offset: float = 0.0, is_expansion: bool = False): nonlocal current_row for var_idx, var in enumerate(members): member_id = f"{prefix}{var['name']}_{var_idx}" # Unique ID for processed check if is_expansion and member_id in processed_expanded_members: continue if is_expansion: processed_expanded_members.add(member_id) name_for_display = f"{prefix}{var['name']}" address = f"{var['byte_offset']:.1f}" if isinstance(var['byte_offset'], float) else str(var['byte_offset']) # Adjust address formatting for bits as in markdown generation if var.get("bit_size", 0) > 0 and isinstance(var['byte_offset'], float) and var['byte_offset'] != int(var['byte_offset']): pass # Already formatted like X.Y elif var.get("bit_size", 0) > 0 : address = f"{int(var['byte_offset'])}.0" # Ensure X.0 for bits at the start of a byte data_type_str = format_data_type_for_source(var) initial_value = str(var.get("initial_value", "")) actual_value = str(var.get("current_value", "")) comment = str(var.get("comment", "")) is_struct_container = var["data_type"].upper() == "STRUCT" and \ not var.get("udt_source_name") and \ var.get("children") is_udt_instance_container = bool(var.get("udt_source_name")) and var.get("children") if not is_struct_container and not is_udt_instance_container or var.get("is_udt_expanded_member"): row_data = [address, name_for_display, data_type_str, initial_value, actual_value, comment] for col_num, value in enumerate(row_data, 1): sheet.cell(row=current_row, column=col_num, value=value) current_row += 1 if var.get("children"): flatten_members_for_excel(var["children"], f"{name_for_display}.", var['byte_offset'], # Pass the parent's offset is_expansion=bool(var.get("udt_source_name"))) # Mark if we are expanding a UDT flatten_members_for_excel(db_info.get("members", [])) # Auto-size columns for better readability for col_idx, column_cells in enumerate(sheet.columns, 1): max_length = 0 column = get_column_letter(col_idx) for cell in column_cells: try: if len(str(cell.value)) > max_length: max_length = len(str(cell.value)) except: pass adjusted_width = (max_length + 2) sheet.column_dimensions[column].width = adjusted_width try: workbook.save(excel_filename) print(f"Excel documentation generated: {excel_filename}") except Exception as e: print(f"Error writing Excel file {excel_filename}: {e}") def main(): working_dir = find_working_directory() print(f"Using working directory: {working_dir}") input_json_dir = os.path.join(working_dir, "json") documentation_dir = os.path.join(working_dir, "documentation") os.makedirs(documentation_dir, exist_ok=True) print(f"Los archivos Excel de documentación se guardarán en: {documentation_dir}") json_files_to_process = glob.glob(os.path.join(input_json_dir, "*.json")) if not json_files_to_process: print(f"No se encontraron archivos .json en {input_json_dir}") return print(f"Archivos JSON encontrados para procesar: {len(json_files_to_process)}") for json_input_filepath in json_files_to_process: current_json_filename = os.path.basename(json_input_filepath) print(f"\n--- Procesando archivo JSON para Excel: {current_json_filename} ---") try: with open(json_input_filepath, 'r', encoding='utf-8') as f: data_from_json = json.load(f) print(f"Archivo JSON '{current_json_filename}' cargado correctamente.") except FileNotFoundError: print(f"Error: El archivo JSON de entrada '{current_json_filename}' no fue encontrado en {json_input_filepath}.") continue except json.JSONDecodeError: print(f"Error: El archivo JSON '{current_json_filename}' no tiene un formato JSON válido.") continue except Exception as e: print(f"Error al cargar/leer {current_json_filename}: {e}") continue if data_from_json.get("dbs"): for db_to_document in data_from_json["dbs"]: # Construir el path completo para el archivo Excel de salida excel_output_filename = os.path.join(documentation_dir, f"{current_json_filename}.xlsx") print(f"Generando documentación Excel para DB: '{db_to_document['name']}' (desde {current_json_filename}) -> {excel_output_filename}") try: generate_excel_table(db_to_document, excel_output_filename) except Exception as e_excel: print(f"Error al generar Excel para DB '{db_to_document['name']}': {e_excel}") else: print(f"No se encontraron DBs en el archivo JSON '{current_json_filename}' para generar documentación Excel.") print("\n--- Proceso de generación de documentación Excel completado ---") if __name__ == "__main__": main()