235 lines
12 KiB
Python
235 lines
12 KiB
Python
# --- x4.py (Modificaciones v_final_2) ---
|
|
import json
|
|
from typing import List, Dict, Any
|
|
import sys
|
|
import os
|
|
import glob # Para buscar archivos JSON
|
|
|
|
script_root = os.path.dirname(
|
|
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
|
)
|
|
sys.path.append(script_root)
|
|
from backend.script_utils import load_configuration
|
|
|
|
def find_working_directory():
|
|
configs = load_configuration()
|
|
working_directory = configs.get("working_directory")
|
|
if not working_directory:
|
|
print("No working directory specified in the configuration file.")
|
|
sys.exit(1)
|
|
return working_directory
|
|
|
|
# format_data_type_for_source (sin cambios respecto a la v5 que te di antes)
|
|
def format_data_type_for_source(var_info: Dict[str, Any]) -> str:
|
|
base_type = var_info.get("udt_source_name") if var_info.get("udt_source_name") else var_info["data_type"]
|
|
type_str = ""
|
|
if var_info.get("array_dimensions"):
|
|
dims_str = ",".join([f"{d['lower_bound']}..{d['upper_bound']}" for d in var_info["array_dimensions"]])
|
|
type_str += f"ARRAY [{dims_str}] OF "
|
|
type_str += base_type
|
|
if var_info["data_type"].upper() == "STRING" and var_info.get("string_length") is not None:
|
|
type_str += f"[{var_info['string_length']}]"
|
|
return type_str
|
|
|
|
def generate_variable_declaration_for_source(var_info: Dict[str, Any], indent_level: int) -> str:
|
|
indent_str = " " * indent_level
|
|
type_declaration_str = format_data_type_for_source(var_info)
|
|
line = f'{indent_str}{var_info["name"]} : {type_declaration_str}'
|
|
if var_info.get("initial_value") is not None:
|
|
initial_val = var_info["initial_value"]
|
|
initial_val_str = "TRUE" if isinstance(initial_val, bool) and initial_val else \
|
|
"FALSE" if isinstance(initial_val, bool) and not initial_val else \
|
|
str(initial_val)
|
|
line += f' := {initial_val_str}'
|
|
|
|
is_multiline_struct_def = (var_info["data_type"].upper() == "STRUCT" and \
|
|
not var_info.get("udt_source_name") and \
|
|
var_info.get("children"))
|
|
if not is_multiline_struct_def: # Solo añadir ; si no es una cabecera de STRUCT multilínea
|
|
line += ';'
|
|
|
|
if var_info.get("comment"):
|
|
line += f'\t// {var_info["comment"]}'
|
|
return line
|
|
|
|
def generate_struct_members_for_source(members: List[Dict[str, Any]], indent_level: int) -> List[str]:
|
|
lines = []
|
|
for var_info in members:
|
|
if var_info.get("is_udt_expanded_member"): continue
|
|
if var_info["data_type"].upper() == "STRUCT" and \
|
|
not var_info.get("udt_source_name") and \
|
|
var_info.get("children"):
|
|
current_indent_str = " " * indent_level
|
|
lines.append(f'{current_indent_str}{var_info["name"]} : STRUCT') # SIN ;
|
|
lines.extend(generate_struct_members_for_source(var_info["children"], indent_level + 1))
|
|
lines.append(f'{current_indent_str}END_STRUCT;') # CON ;
|
|
else:
|
|
lines.append(generate_variable_declaration_for_source(var_info, indent_level))
|
|
return lines
|
|
|
|
def generate_begin_block_assignments(db_info: Dict[str, Any], indent_level: int) -> List[str]:
|
|
indent_str = " " * indent_level
|
|
lines = []
|
|
# Usar la lista ordenada de asignaciones del JSON, que x3.py ahora debería poblar
|
|
ordered_assignments = db_info.get("_begin_block_assignments_ordered")
|
|
|
|
if ordered_assignments and isinstance(ordered_assignments, list):
|
|
print(f"INFO: Usando '_begin_block_assignments_ordered' para generar bloque BEGIN de DB '{db_info['name']}'.")
|
|
for path, value_obj in ordered_assignments:
|
|
value_str = str(value_obj)
|
|
if value_str.lower() == "true": value_str = "TRUE"
|
|
elif value_str.lower() == "false": value_str = "FALSE"
|
|
lines.append(f"{indent_str}{path} := {value_str};") # Asignaciones siempre con ;
|
|
else:
|
|
print(f"ADVERTENCIA: '_begin_block_assignments_ordered' no encontrado para DB '{db_info['name']}'. "
|
|
"El bloque BEGIN puede estar incompleto o desordenado si se usa el fallback.")
|
|
# (Aquí podría ir el fallback a _generate_assignments_recursive_from_current_values si se desea)
|
|
# fallback_lines = _generate_assignments_recursive_from_current_values(db_info.get("members", []), "", indent_str)
|
|
# if fallback_lines: lines.extend(fallback_lines)
|
|
|
|
return lines
|
|
|
|
def generate_s7_source_code_lines(data: Dict[str, Any]) -> List[str]:
|
|
lines = []
|
|
for udt in data.get("udts", []):
|
|
lines.append(f'TYPE "{udt["name"]}"')
|
|
if udt.get("family"): lines.append(f' FAMILY : {udt["family"]}') # SIN ;
|
|
if udt.get("version"): lines.append(f' VERSION : {udt["version"]}') # SIN ;
|
|
lines.append("")
|
|
lines.append(" STRUCT") # SIN ;
|
|
lines.extend(generate_struct_members_for_source(udt["members"], 2))
|
|
lines.append(" END_STRUCT;") # CON ;
|
|
lines.append(f'END_TYPE') # SIN ; según tu último comentario
|
|
lines.append("")
|
|
|
|
for db in data.get("dbs", []):
|
|
lines.append(f'DATA_BLOCK "{db["name"]}"')
|
|
if db.get("title"): # TITLE = { ... } va tal cual y SIN ;
|
|
lines.append(f' TITLE = {db["title"]}')
|
|
if db.get("family"): lines.append(f' FAMILY : {db["family"]}') # SIN ;
|
|
if db.get("version"): lines.append(f' VERSION : {db["version"]}') # SIN ;
|
|
lines.append("")
|
|
lines.append(" STRUCT") # SIN ;
|
|
lines.extend(generate_struct_members_for_source(db["members"], 2))
|
|
lines.append(" END_STRUCT;") # CON ;
|
|
|
|
begin_assignments = generate_begin_block_assignments(db, 1) # Indentación 1 para las asignaciones
|
|
if begin_assignments:
|
|
lines.append("BEGIN") # SIN ;
|
|
lines.extend(begin_assignments)
|
|
|
|
lines.append(f'END_DATA_BLOCK') # SIN ; según tu último comentario
|
|
lines.append("")
|
|
return lines
|
|
|
|
# generate_markdown_table (sin cambios respecto a la v5)
|
|
def generate_markdown_table(db_info: Dict[str, Any]) -> List[str]:
|
|
lines = []
|
|
lines.append(f"## Documentación para DB: {db_info['name']}") # Cambiado a H2 para múltiples DBs por archivo
|
|
lines.append("")
|
|
lines.append("| Address | Name | Type | Initial Value | Actual Value | Comment |")
|
|
lines.append("|---|---|---|---|---|---|")
|
|
processed_expanded_members = set()
|
|
def flatten_members_for_markdown(members: List[Dict[str, Any]], prefix: str = "", base_offset: float = 0.0, is_expansion: bool = False):
|
|
md_lines = []
|
|
for var_idx, var in enumerate(members):
|
|
member_id = f"{prefix}{var['name']}_{var_idx}"
|
|
if is_expansion and member_id in processed_expanded_members: continue
|
|
if is_expansion: processed_expanded_members.add(member_id)
|
|
name_for_display = f"{prefix}{var['name']}"
|
|
address = f"{var['byte_offset']:.1f}" if isinstance(var['byte_offset'], float) else str(var['byte_offset'])
|
|
if var.get("bit_size", 0) > 0 and isinstance(var['byte_offset'], float) and var['byte_offset'] != int(var['byte_offset']): pass
|
|
elif var.get("bit_size", 0) > 0 : address = f"{int(var['byte_offset'])}.0"
|
|
data_type_str = format_data_type_for_source(var)
|
|
initial_value = str(var.get("initial_value", "")).replace("|", "\\|").replace("\n", " ")
|
|
actual_value = str(var.get("current_value", "")).replace("|", "\\|").replace("\n", " ")
|
|
comment = str(var.get("comment", "")).replace("|", "\\|").replace("\n", " ")
|
|
is_struct_container = var["data_type"].upper() == "STRUCT" and not var.get("udt_source_name") and var.get("children")
|
|
is_udt_instance_container = bool(var.get("udt_source_name")) and var.get("children")
|
|
if not is_struct_container and not is_udt_instance_container or var.get("is_udt_expanded_member"):
|
|
md_lines.append(f"| {address} | {name_for_display} | {data_type_str} | {initial_value} | {actual_value} | {comment} |")
|
|
if var.get("children"):
|
|
md_lines.extend(flatten_members_for_markdown(var["children"],
|
|
f"{name_for_display}.",
|
|
var['byte_offset'],
|
|
is_expansion=bool(var.get("udt_source_name"))))
|
|
return md_lines
|
|
lines.extend(flatten_members_for_markdown(db_info.get("members", [])))
|
|
return lines
|
|
|
|
def main():
|
|
working_dir = find_working_directory()
|
|
print(f"Using working directory: {working_dir}")
|
|
|
|
input_json_dir = os.path.join(working_dir, "json")
|
|
documentation_dir = os.path.join(working_dir, "documentation")
|
|
os.makedirs(documentation_dir, exist_ok=True)
|
|
print(f"Los archivos de documentación generados se guardarán en: {documentation_dir}")
|
|
|
|
json_files_to_process = glob.glob(os.path.join(input_json_dir, "*.json"))
|
|
|
|
if not json_files_to_process:
|
|
print(f"No se encontraron archivos .json en {input_json_dir}")
|
|
return
|
|
|
|
print(f"Archivos JSON encontrados para procesar: {len(json_files_to_process)}")
|
|
|
|
for json_input_filepath in json_files_to_process:
|
|
json_filename_base = os.path.splitext(os.path.basename(json_input_filepath))[0]
|
|
current_json_filename = os.path.basename(json_input_filepath)
|
|
print(f"\n--- Procesando archivo JSON: {current_json_filename} ---")
|
|
|
|
s7_output_filename = os.path.join(documentation_dir, f"{json_filename_base}.txt")
|
|
md_output_filename = os.path.join(documentation_dir, f"{json_filename_base}.md")
|
|
|
|
try:
|
|
with open(json_input_filepath, 'r', encoding='utf-8') as f:
|
|
data_from_json = json.load(f)
|
|
print(f"Archivo JSON '{current_json_filename}' cargado correctamente.")
|
|
except Exception as e:
|
|
print(f"Error al cargar/leer {current_json_filename}: {e}")
|
|
continue # Saltar al siguiente archivo JSON
|
|
|
|
# Generar archivo S7 (.txt)
|
|
s7_code_lines = generate_s7_source_code_lines(data_from_json)
|
|
try:
|
|
with open(s7_output_filename, 'w', encoding='utf-8') as f:
|
|
for line in s7_code_lines:
|
|
f.write(line + "\n")
|
|
print(f"Archivo S7 reconstruido generado: {s7_output_filename}")
|
|
except Exception as e:
|
|
print(f"Error al escribir el archivo S7 {s7_output_filename}: {e}")
|
|
|
|
# Generar archivo Markdown (.md) para todos los DBs en este JSON
|
|
all_db_markdown_lines = []
|
|
if data_from_json.get("dbs"):
|
|
all_db_markdown_lines.append(f"# Documentación S7 para {json_filename_base}")
|
|
all_db_markdown_lines.append(f"_Fuente JSON: {current_json_filename}_")
|
|
all_db_markdown_lines.append("")
|
|
|
|
for db_index, db_to_document in enumerate(data_from_json["dbs"]):
|
|
if db_index > 0:
|
|
all_db_markdown_lines.append("\n---\n") # Separador visual entre DBs
|
|
|
|
markdown_lines_for_one_db = generate_markdown_table(db_to_document)
|
|
all_db_markdown_lines.extend(markdown_lines_for_one_db)
|
|
all_db_markdown_lines.append("") # Espacio después de cada tabla de DB
|
|
|
|
try:
|
|
with open(md_output_filename, 'w', encoding='utf-8') as f:
|
|
for line in all_db_markdown_lines:
|
|
f.write(line + "\n")
|
|
print(f"Archivo Markdown de documentación generado: {md_output_filename}")
|
|
except Exception as e:
|
|
print(f"Error al escribir el archivo Markdown {md_output_filename}: {e}")
|
|
else:
|
|
print(f"No se encontraron DBs en {current_json_filename} para generar documentación Markdown.")
|
|
# Opcionalmente, crear un archivo MD con un mensaje
|
|
with open(md_output_filename, 'w', encoding='utf-8') as f:
|
|
f.write(f"# Documentación S7 para {json_filename_base}\n\n_Fuente JSON: {current_json_filename}_\n\nNo se encontraron Bloques de Datos (DBs) en este archivo JSON.\n")
|
|
print(f"Archivo Markdown generado (sin DBs): {md_output_filename}")
|
|
|
|
print("\n--- Proceso de generación de documentación completado ---")
|
|
|
|
if __name__ == "__main__":
|
|
main() |