174 lines
9.5 KiB
Python
174 lines
9.5 KiB
Python
# --- x4.py (Modificaciones v_final_2) ---
|
|
import json
|
|
from typing import List, Dict, Any
|
|
|
|
# format_data_type_for_source (sin cambios respecto a la v5 que te di antes)
|
|
def format_data_type_for_source(var_info: Dict[str, Any]) -> str:
|
|
base_type = var_info.get("udt_source_name") if var_info.get("udt_source_name") else var_info["data_type"]
|
|
type_str = ""
|
|
if var_info.get("array_dimensions"):
|
|
dims_str = ",".join([f"{d['lower_bound']}..{d['upper_bound']}" for d in var_info["array_dimensions"]])
|
|
type_str += f"ARRAY [{dims_str}] OF "
|
|
type_str += base_type
|
|
if var_info["data_type"].upper() == "STRING" and var_info.get("string_length") is not None:
|
|
type_str += f"[{var_info['string_length']}]"
|
|
return type_str
|
|
|
|
def generate_variable_declaration_for_source(var_info: Dict[str, Any], indent_level: int) -> str:
|
|
indent_str = " " * indent_level
|
|
type_declaration_str = format_data_type_for_source(var_info)
|
|
line = f'{indent_str}{var_info["name"]} : {type_declaration_str}'
|
|
if var_info.get("initial_value") is not None:
|
|
initial_val = var_info["initial_value"]
|
|
initial_val_str = "TRUE" if isinstance(initial_val, bool) and initial_val else \
|
|
"FALSE" if isinstance(initial_val, bool) and not initial_val else \
|
|
str(initial_val)
|
|
line += f' := {initial_val_str}'
|
|
|
|
is_multiline_struct_def = (var_info["data_type"].upper() == "STRUCT" and \
|
|
not var_info.get("udt_source_name") and \
|
|
var_info.get("children"))
|
|
if not is_multiline_struct_def: # Solo añadir ; si no es una cabecera de STRUCT multilínea
|
|
line += ';'
|
|
|
|
if var_info.get("comment"):
|
|
line += f'\t// {var_info["comment"]}'
|
|
return line
|
|
|
|
def generate_struct_members_for_source(members: List[Dict[str, Any]], indent_level: int) -> List[str]:
|
|
lines = []
|
|
for var_info in members:
|
|
if var_info.get("is_udt_expanded_member"): continue
|
|
if var_info["data_type"].upper() == "STRUCT" and \
|
|
not var_info.get("udt_source_name") and \
|
|
var_info.get("children"):
|
|
current_indent_str = " " * indent_level
|
|
lines.append(f'{current_indent_str}{var_info["name"]} : STRUCT') # SIN ;
|
|
lines.extend(generate_struct_members_for_source(var_info["children"], indent_level + 1))
|
|
lines.append(f'{current_indent_str}END_STRUCT;') # CON ;
|
|
else:
|
|
lines.append(generate_variable_declaration_for_source(var_info, indent_level))
|
|
return lines
|
|
|
|
def generate_begin_block_assignments(db_info: Dict[str, Any], indent_level: int) -> List[str]:
|
|
indent_str = " " * indent_level
|
|
lines = []
|
|
# Usar la lista ordenada de asignaciones del JSON, que x3.py ahora debería poblar
|
|
ordered_assignments = db_info.get("_begin_block_assignments_ordered")
|
|
|
|
if ordered_assignments and isinstance(ordered_assignments, list):
|
|
print(f"INFO: Usando '_begin_block_assignments_ordered' para generar bloque BEGIN de DB '{db_info['name']}'.")
|
|
for path, value_obj in ordered_assignments:
|
|
value_str = str(value_obj)
|
|
if value_str.lower() == "true": value_str = "TRUE"
|
|
elif value_str.lower() == "false": value_str = "FALSE"
|
|
lines.append(f"{indent_str}{path} := {value_str};") # Asignaciones siempre con ;
|
|
else:
|
|
print(f"ADVERTENCIA: '_begin_block_assignments_ordered' no encontrado para DB '{db_info['name']}'. "
|
|
"El bloque BEGIN puede estar incompleto o desordenado si se usa el fallback.")
|
|
# (Aquí podría ir el fallback a _generate_assignments_recursive_from_current_values si se desea)
|
|
# fallback_lines = _generate_assignments_recursive_from_current_values(db_info.get("members", []), "", indent_str)
|
|
# if fallback_lines: lines.extend(fallback_lines)
|
|
|
|
return lines
|
|
|
|
def generate_s7_source_code_lines(data: Dict[str, Any]) -> List[str]:
|
|
lines = []
|
|
for udt in data.get("udts", []):
|
|
lines.append(f'TYPE "{udt["name"]}"')
|
|
if udt.get("family"): lines.append(f' FAMILY : {udt["family"]}') # SIN ;
|
|
if udt.get("version"): lines.append(f' VERSION : {udt["version"]}') # SIN ;
|
|
lines.append("")
|
|
lines.append(" STRUCT") # SIN ;
|
|
lines.extend(generate_struct_members_for_source(udt["members"], 2))
|
|
lines.append(" END_STRUCT;") # CON ;
|
|
lines.append(f'END_TYPE') # SIN ; según tu último comentario
|
|
lines.append("")
|
|
|
|
for db in data.get("dbs", []):
|
|
lines.append(f'DATA_BLOCK "{db["name"]}"')
|
|
if db.get("title"): # TITLE = { ... } va tal cual y SIN ;
|
|
lines.append(f' TITLE = {db["title"]}')
|
|
if db.get("family"): lines.append(f' FAMILY : {db["family"]}') # SIN ;
|
|
if db.get("version"): lines.append(f' VERSION : {db["version"]}') # SIN ;
|
|
lines.append("")
|
|
lines.append(" STRUCT") # SIN ;
|
|
lines.extend(generate_struct_members_for_source(db["members"], 2))
|
|
lines.append(" END_STRUCT;") # CON ;
|
|
|
|
begin_assignments = generate_begin_block_assignments(db, 1) # Indentación 1 para las asignaciones
|
|
if begin_assignments:
|
|
lines.append("BEGIN") # SIN ;
|
|
lines.extend(begin_assignments)
|
|
|
|
lines.append(f'END_DATA_BLOCK') # SIN ; según tu último comentario
|
|
lines.append("")
|
|
return lines
|
|
|
|
# generate_markdown_table (sin cambios respecto a la v5)
|
|
def generate_markdown_table(db_info: Dict[str, Any]) -> List[str]:
|
|
lines = []
|
|
lines.append(f"# Documentación para DB: {db_info['name']}")
|
|
lines.append("")
|
|
lines.append("| Address | Name | Type | Initial Value | Actual Value | Comment |")
|
|
lines.append("|---|---|---|---|---|---|")
|
|
processed_expanded_members = set()
|
|
def flatten_members_for_markdown(members: List[Dict[str, Any]], prefix: str = "", base_offset: float = 0.0, is_expansion: bool = False):
|
|
md_lines = []
|
|
for var_idx, var in enumerate(members):
|
|
member_id = f"{prefix}{var['name']}_{var_idx}"
|
|
if is_expansion and member_id in processed_expanded_members: continue
|
|
if is_expansion: processed_expanded_members.add(member_id)
|
|
name_for_display = f"{prefix}{var['name']}"
|
|
address = f"{var['byte_offset']:.1f}" if isinstance(var['byte_offset'], float) else str(var['byte_offset'])
|
|
if var.get("bit_size", 0) > 0 and isinstance(var['byte_offset'], float) and var['byte_offset'] != int(var['byte_offset']): pass
|
|
elif var.get("bit_size", 0) > 0 : address = f"{int(var['byte_offset'])}.0"
|
|
data_type_str = format_data_type_for_source(var)
|
|
initial_value = str(var.get("initial_value", "")).replace("|", "\\|").replace("\n", " ")
|
|
actual_value = str(var.get("current_value", "")).replace("|", "\\|").replace("\n", " ")
|
|
comment = str(var.get("comment", "")).replace("|", "\\|").replace("\n", " ")
|
|
is_struct_container = var["data_type"].upper() == "STRUCT" and not var.get("udt_source_name") and var.get("children")
|
|
is_udt_instance_container = bool(var.get("udt_source_name")) and var.get("children")
|
|
if not is_struct_container and not is_udt_instance_container or var.get("is_udt_expanded_member"):
|
|
md_lines.append(f"| {address} | {name_for_display} | {data_type_str} | {initial_value} | {actual_value} | {comment} |")
|
|
if var.get("children"):
|
|
md_lines.extend(flatten_members_for_markdown(var["children"],
|
|
f"{name_for_display}.",
|
|
var['byte_offset'],
|
|
is_expansion=bool(var.get("udt_source_name"))))
|
|
return md_lines
|
|
lines.extend(flatten_members_for_markdown(db_info.get("members", [])))
|
|
return lines
|
|
|
|
def main():
|
|
json_input_filename = "parsed_s7_data_stat.json" # Espera el JSON de x3_v_final_2
|
|
s7_output_filename = "reconstructed_s7_source_stat.txt"
|
|
|
|
try:
|
|
with open(json_input_filename, 'r', encoding='utf-8') as f: data_from_json = json.load(f)
|
|
print(f"Archivo JSON '{json_input_filename}' cargado correctamente.")
|
|
except Exception as e:
|
|
print(f"Error al cargar/leer {json_input_filename}: {e}"); return
|
|
|
|
s7_code_lines = generate_s7_source_code_lines(data_from_json)
|
|
try:
|
|
with open(s7_output_filename, 'w', encoding='utf-8') as f:
|
|
for line in s7_code_lines: f.write(line + "\n")
|
|
print(f"Archivo S7 reconstruido generado: {s7_output_filename}")
|
|
except Exception as e: print(f"Error al escribir el archivo S7 {s7_output_filename}: {e}")
|
|
|
|
if data_from_json.get("dbs"):
|
|
for db_to_document in data_from_json["dbs"]:
|
|
db_name_safe = db_to_document['name'].replace('"', '').replace(' ', '_').replace('/','_')
|
|
md_filename_specific = f"documentation_db_{db_name_safe}.md"
|
|
print(f"\nGenerando documentación Markdown para DB: {db_to_document['name']}...")
|
|
markdown_lines = generate_markdown_table(db_to_document)
|
|
try:
|
|
with open(md_filename_specific, 'w', encoding='utf-8') as f:
|
|
for line in markdown_lines: f.write(line + "\n")
|
|
print(f"Archivo Markdown de documentación generado: {md_filename_specific}")
|
|
except Exception as e: print(f"Error al escribir {md_filename_specific}: {e}")
|
|
else: print("No se encontraron DBs en el archivo JSON para generar documentación.")
|
|
|
|
if __name__ == "__main__":
|
|
main() |