From 546705f8caee2474371d0c7456b890ac68d8b714 Mon Sep 17 00:00:00 2001 From: Miguel Date: Sun, 20 Apr 2025 17:57:48 +0200 Subject: [PATCH] Funciona con UDT y Tags --- ToUpload/parsers/__init__.py | 0 ToUpload/parsers/parse_lad_fbd.py | 548 ++++++++ ToUpload/parsers/parse_scl.py | 253 ++++ ToUpload/parsers/parse_stl.py | 278 ++++ ToUpload/parsers/parser_utils.py | 478 +++++++ ToUpload/processors/process_call.py | 121 +- ToUpload/x0_main.py | 256 ++-- ToUpload/x1_to_json.py | 1777 +++++------------------- ToUpload/x2_process.py | 148 +- ToUpload/x3_generate_scl.py | 655 ++++++--- create_processor_files.py | 144 -- parsers/__init__.py | 0 parsers/parse_lad_fbd.py | 548 ++++++++ parsers/parse_scl.py | 253 ++++ parsers/parse_stl.py | 278 ++++ parsers/parser_utils.py | 387 ++++++ paste.py | 514 ++++--- x1_to_json.py | 1955 ++++++--------------------- x2_process.py | 286 ++-- x3_generate_scl.py | 451 +++--- 20 files changed, 5071 insertions(+), 4259 deletions(-) create mode 100644 ToUpload/parsers/__init__.py create mode 100644 ToUpload/parsers/parse_lad_fbd.py create mode 100644 ToUpload/parsers/parse_scl.py create mode 100644 ToUpload/parsers/parse_stl.py create mode 100644 ToUpload/parsers/parser_utils.py delete mode 100644 create_processor_files.py create mode 100644 parsers/__init__.py create mode 100644 parsers/parse_lad_fbd.py create mode 100644 parsers/parse_scl.py create mode 100644 parsers/parse_stl.py create mode 100644 parsers/parser_utils.py diff --git a/ToUpload/parsers/__init__.py b/ToUpload/parsers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ToUpload/parsers/parse_lad_fbd.py b/ToUpload/parsers/parse_lad_fbd.py new file mode 100644 index 0000000..f3e7ad9 --- /dev/null +++ b/ToUpload/parsers/parse_lad_fbd.py @@ -0,0 +1,548 @@ +# ToUpload/parsers/parse_lad_fbd.py +# -*- coding: utf-8 -*- +from lxml import etree +from collections import defaultdict +import copy +import traceback + +# Importar desde las utilidades del parser +from .parser_utils import ( + ns, + parse_access, + parse_part, + parse_call, + get_multilingual_text, +) + +# Sufijo usado en x2 para identificar instrucciones procesadas (útil para EN/ENO) +SCL_SUFFIX = "_sympy_processed" # Asumimos que este es el sufijo de x2 + + +def parse_lad_fbd_network(network_element): + """ + Parsea una red LAD/FBD/GRAPH, extrae lógica y añade conexiones EN/ENO implícitas. + Devuelve un diccionario representando la red para el JSON. + """ + if network_element is None: + return { + "id": "ERROR", + "title": "Invalid Network Element", + "logic": [], + "error": "Input element was None", + } + + network_id = network_element.get("ID") + # Usar get_multilingual_text de utils + title_element = network_element.xpath( + ".//iface:MultilingualText[@CompositionName='Title']", namespaces=ns + ) + network_title = ( + get_multilingual_text(title_element[0]) + if title_element + else f"Network {network_id}" + ) + comment_element = network_element.xpath( + "./ObjectList/MultilingualText[@CompositionName='Comment']", namespaces=ns + ) # OJO: Path relativo a CompileUnit? + if not comment_element: # Intentar path alternativo si el anterior falla + comment_element = network_element.xpath( + ".//MultilingualText[@CompositionName='Comment']", namespaces=ns + ) # Más genérico dentro de la red + network_comment = ( + get_multilingual_text(comment_element[0]) if comment_element else "" + ) + + # --- Determinar Lenguaje (ya que este parser maneja varios) --- + network_lang = "Unknown" + attr_list_net = network_element.xpath("./AttributeList") + if attr_list_net: + lang_node_net = attr_list_net[0].xpath("./ProgrammingLanguage/text()") + if lang_node_net: + network_lang = lang_node_net[0].strip() + + # --- Buscar FlgNet --- + # Buscar NetworkSource y luego FlgNet (ambos usan namespace flg) + network_source_node = network_element.xpath(".//flg:NetworkSource", namespaces=ns) + flgnet = None + if network_source_node: + flgnet_list = network_source_node[0].xpath("./flg:FlgNet", namespaces=ns) + if flgnet_list: + flgnet = flgnet_list[0] + else: # Intentar buscar FlgNet directamente si no hay NetworkSource + flgnet_list = network_element.xpath(".//flg:FlgNet", namespaces=ns) + if flgnet_list: + flgnet = flgnet_list[0] + + if flgnet is None: + return { + "id": network_id, + "title": network_title, + "comment": network_comment, + "language": network_lang, + "logic": [], + "error": "FlgNet not found inside NetworkSource or CompileUnit", + } + + # 1. Parse Access, Parts, Calls (usan utils) + access_map = {} + # Corregir XPath para buscar Access dentro de FlgNet/Parts + for acc in flgnet.xpath(".//flg:Parts/flg:Access", namespaces=ns): + acc_info = parse_access(acc) + if acc_info and acc_info.get("uid") and "error" not in acc_info.get("type", ""): + access_map[acc_info["uid"]] = acc_info + elif acc_info: + print( + f"Advertencia: Ignorando Access inválido o con error UID={acc_info.get('uid')} en red {network_id}" + ) + + parts_and_calls_map = {} + # Corregir XPath para buscar Part y Call dentro de FlgNet/Parts + instruction_elements = flgnet.xpath( + ".//flg:Parts/flg:Part | .//flg:Parts/flg:Call", namespaces=ns + ) + for element in instruction_elements: + parsed_info = None + tag_name = etree.QName(element.tag).localname + if tag_name == "Part": + parsed_info = parse_part(element) # Usa utils + elif tag_name == "Call": + parsed_info = parse_call(element) # Usa utils + + if ( + parsed_info + and parsed_info.get("uid") + and "error" not in parsed_info.get("type", "") + ): + parts_and_calls_map[parsed_info["uid"]] = parsed_info + elif parsed_info: + # Si parse_call/parse_part devolvió error, lo guardamos para tener el UID + print( + f"Advertencia: {tag_name} con error UID={parsed_info.get('uid')} en red {network_id}. Error: {parsed_info.get('error')}" + ) + parts_and_calls_map[parsed_info["uid"]] = ( + parsed_info # Guardar aunque tenga error + ) + + # 2. Parse Wires (lógica compleja, mantener aquí) + wire_connections = defaultdict(list) # destination -> [source1, source2] + source_connections = defaultdict(list) # source -> [dest1, dest2] + eno_outputs = defaultdict(list) + qname_powerrail = etree.QName(ns["flg"], "Powerrail") + qname_identcon = etree.QName( + ns["flg"], "IdentCon" + ) # Conexión a/desde Access (variable/constante) + qname_namecon = etree.QName( + ns["flg"], "NameCon" + ) # Conexión a/desde Part/Call (pin con nombre) + qname_openbranch = etree.QName( + ns["flg"], "Openbranch" + ) # Rama abierta (normalmente ignorada o tratada como TRUE?) + qname_opencon = etree.QName( + ns["flg"], "OpenCon" + ) # Conexión abierta (pin no conectado) + + # Corregir XPath para buscar Wire dentro de FlgNet/Wires + for wire in flgnet.xpath(".//flg:Wires/flg:Wire", namespaces=ns): + children = wire.getchildren() + if len(children) < 2: + continue # Necesita al menos origen y destino + + source_elem = children[0] + source_uid, source_pin = None, None + + # Determinar origen + if source_elem.tag == qname_powerrail: + source_uid, source_pin = "POWERRAIL", "out" + elif source_elem.tag == qname_identcon: # Origen es una variable/constante + source_uid = source_elem.get("UId") + source_pin = "value" # Salida implícita de un Access + elif source_elem.tag == qname_namecon: # Origen es pin de instrucción + source_uid = source_elem.get("UId") + source_pin = source_elem.get("Name") + elif source_elem.tag == qname_openbranch: + # ¿Cómo manejar OpenBranch como fuente? Podría ser TRUE o una condición OR implícita + source_uid = "OPENBRANCH_" + wire.get( + "UId", "Unknown" + ) # UID único para la rama + source_pin = "out" + print( + f"Advertencia: OpenBranch encontrado como fuente en Wire UID={wire.get('UId')} (Red {network_id}). Tratando como fuente especial." + ) + # No lo añadimos a parts_and_calls_map, get_sympy_representation necesitará manejarlo + # Ignorar OpenCon como fuente (no tiene sentido) + if source_uid is None or source_pin is None: + # print(f"Advertencia: Fuente de wire inválida o no soportada: {source_elem.tag} en Wire UID={wire.get('UId')}") + continue + + source_info = (source_uid, source_pin) + + # Procesar destinos + for dest_elem in children[1:]: + dest_uid, dest_pin = None, None + + if ( + dest_elem.tag == qname_identcon + ): # Destino es una variable/constante (asignación) + dest_uid = dest_elem.get("UId") + dest_pin = "value" # Entrada implícita de un Access + elif dest_elem.tag == qname_namecon: # Destino es pin de instrucción + dest_uid = dest_elem.get("UId") + dest_pin = dest_elem.get("Name") + # Ignorar Powerrail, OpenBranch, OpenCon como destinos válidos de conexión lógica principal + + if dest_uid is not None and dest_pin is not None: + dest_key = (dest_uid, dest_pin) + if source_info not in wire_connections[dest_key]: + wire_connections[dest_key].append(source_info) + + # Mapa inverso: source -> list of destinations + source_key = (source_uid, source_pin) + dest_info = (dest_uid, dest_pin) + if dest_info not in source_connections[source_key]: + source_connections[source_key].append(dest_info) + + # Trackear salidas ENO específicamente si la fuente es una instrucción + if source_pin == "eno" and source_uid in parts_and_calls_map: + if dest_info not in eno_outputs[source_uid]: + eno_outputs[source_uid].append(dest_info) + + # 3. Build Initial Logic Structure (incorporando errores) + all_logic_steps = {} + # Lista de tipos funcionales (usados para inferencia EN) + # Estos son los tipos *originales* de las instrucciones + functional_block_types = [ + "Move", + "Add", + "Sub", + "Mul", + "Div", + "Mod", + "Convert", + "Call", # Call ya está aquí + "TON", + "TOF", + "TP", + "CTU", + "CTD", + "CTUD", + "BLKMOV", # Añadidos + "Se", + "Sd", # Estos son tipos LAD que se mapearán a timers SCL + ] + # Lista de generadores RLO (usados para inferencia EN) + rlo_generators = [ + "Contact", + "O", + "Eq", + "Ne", + "Gt", + "Lt", + "Ge", + "Le", + "And", + "Xor", + "PBox", + "NBox", + "Not", + ] + + # Iterar sobre UIDs válidos (los que se pudieron parsear, aunque sea con error) + valid_instruction_uids = list(parts_and_calls_map.keys()) + + for instruction_uid in valid_instruction_uids: + instruction_info = parts_and_calls_map[instruction_uid] + # Hacer copia profunda para no modificar el mapa original + instruction_repr = copy.deepcopy(instruction_info) + instruction_repr["instruction_uid"] = instruction_uid # Asegurar UID + instruction_repr["inputs"] = {} + instruction_repr["outputs"] = {} + + # Si la instrucción ya tuvo un error de parseo, añadirlo aquí + if "error" in instruction_info: + instruction_repr["parsing_error"] = instruction_info["error"] + # No intentar poblar inputs/outputs si el parseo base falló + all_logic_steps[instruction_uid] = instruction_repr + continue + + original_type = instruction_repr.get("type", "") # Tipo de la instrucción + + # --- Poblar Entradas --- + # Lista base de pines posibles (podría obtenerse de XSDs o dinámicamente) + possible_input_pins = set(["en", "in", "in1", "in2", "pre"]) + # Añadir pines dinámicamente basados en el tipo de instrucción + if original_type in ["Contact", "Coil", "SCoil", "RCoil", "SdCoil"]: + possible_input_pins.add("operand") + elif original_type in [ + "Add", + "Sub", + "Mul", + "Div", + "Mod", + "Eq", + "Ne", + "Gt", + "Lt", + "Ge", + "Le", + ]: + possible_input_pins.update(["in1", "in2"]) + elif original_type in ["TON", "TOF", "TP"]: + possible_input_pins.update(["IN", "PT"]) # Pines SCL + elif original_type in ["Se", "Sd"]: + possible_input_pins.update(["s", "tv", "timer"]) # Pines LAD + elif original_type in ["CTU", "CTD", "CTUD"]: + possible_input_pins.update(["CU", "CD", "R", "LD", "PV"]) # Pines SCL/LAD + elif original_type in ["PBox", "NBox"]: + possible_input_pins.update( + ["bit", "clk", "in"] + ) # PBox/NBox usa 'in' y 'bit' + elif original_type == "BLKMOV": + possible_input_pins.add("SRCBLK") + elif original_type == "Move": + possible_input_pins.add("in") + elif original_type == "Convert": + possible_input_pins.add("in") + elif original_type == "Call": + # Para Calls, los nombres de los parámetros reales se definen en el XML + # El Xpath busca Parameter DENTRO de CallInfo, que está DENTRO de Call + call_xml_element_list = flgnet.xpath( + f".//flg:Parts/flg:Call[@UId='{instruction_uid}']", namespaces=ns + ) + if call_xml_element_list: + call_xml_element = call_xml_element_list[0] + call_info_node_list = call_xml_element.xpath( + "./flg:CallInfo", namespaces=ns + ) + if call_info_node_list: + call_param_names = call_info_node_list[0].xpath( + "./flg:Parameter/@Name", namespaces=ns + ) + possible_input_pins.update(call_param_names) + # print(f"DEBUG Call UID={instruction_uid}: Params={call_param_names}") + else: # Fallback si no hay namespace (menos probable) + call_info_node_list_no_ns = call_xml_element.xpath("./CallInfo") + if call_info_node_list_no_ns: + possible_input_pins.update( + call_info_node_list_no_ns[0].xpath("./Parameter/@Name") + ) + + # Iterar sobre pines posibles y buscar conexiones + for pin_name in possible_input_pins: + dest_key = (instruction_uid, pin_name) + if dest_key in wire_connections: + sources_list = wire_connections[dest_key] + input_sources_repr = [] + for source_uid, source_pin in sources_list: + source_repr = None + if source_uid == "POWERRAIL": + source_repr = {"type": "powerrail"} + elif source_uid.startswith("OPENBRANCH_"): + source_repr = { + "type": "openbranch", + "uid": source_uid, + } # Fuente especial + elif source_uid in access_map: + source_repr = copy.deepcopy(access_map[source_uid]) + elif source_uid in parts_and_calls_map: + source_instr_info = parts_and_calls_map[source_uid] + source_repr = { + "type": "connection", + "source_instruction_type": source_instr_info.get( + "type", "Unknown" + ), # Usar tipo base + "source_instruction_uid": source_uid, + "source_pin": source_pin, + } + else: + # Fuente desconocida (ni Access, ni Part/Call válido) + print( + f"Advertencia: Fuente desconocida UID={source_uid} conectada a {instruction_uid}.{pin_name}" + ) + source_repr = {"type": "unknown_source", "uid": source_uid} + input_sources_repr.append(source_repr) + + # Guardar la representación de la entrada (lista o dict) + instruction_repr["inputs"][pin_name] = ( + input_sources_repr[0] + if len(input_sources_repr) == 1 + else input_sources_repr + ) + + # --- Poblar Salidas (simplificado: solo conexiones a Access) --- + possible_output_pins = set( + [ + "out", + "out1", + "Q", + "q", + "eno", + "RET_VAL", + "DSTBLK", + "rt", + "cv", + "QU", + "QD", + "ET", # Añadir pines de salida estándar SCL + ] + ) + if original_type == "BLKMOV": + possible_output_pins.add("DSTBLK") + if ( + original_type == "Call" + ): # Para Calls, las salidas dependen del bloque llamado + call_xml_element_list = flgnet.xpath( + f".//flg:Parts/flg:Call[@UId='{instruction_uid}']", namespaces=ns + ) + if call_xml_element_list: + call_info_node_list = call_xml_element_list[0].xpath( + "./flg:CallInfo", namespaces=ns + ) + if call_info_node_list: + # Buscar parámetros con Section="Output" o "InOut" o "Return" + output_param_names = call_info_node_list[0].xpath( + "./flg:Parameter[@Section='Output' or @Section='InOut' or @Section='Return']/@Name", + namespaces=ns, + ) + possible_output_pins.update(output_param_names) + + for pin_name in possible_output_pins: + source_key = (instruction_uid, pin_name) + if source_key in source_connections: + if pin_name not in instruction_repr["outputs"]: + instruction_repr["outputs"][pin_name] = [] + for dest_uid, dest_pin in source_connections[source_key]: + if ( + dest_uid in access_map + ): # Solo registrar si va a una variable/constante + dest_operand_copy = copy.deepcopy(access_map[dest_uid]) + if ( + dest_operand_copy + not in instruction_repr["outputs"][pin_name] + ): + instruction_repr["outputs"][pin_name].append( + dest_operand_copy + ) + + all_logic_steps[instruction_uid] = instruction_repr + + # 4. Inferencia EN (modificado para usar tipos originales) + processed_blocks_en_inference = set() + try: + # Ordenar UIDs numéricamente si es posible + sorted_uids_for_en = sorted( + all_logic_steps.keys(), + key=lambda x: ( + int(x) if isinstance(x, str) and x.isdigit() else float("inf") + ), + ) + except ValueError: + sorted_uids_for_en = sorted(all_logic_steps.keys()) # Fallback sort + + ordered_logic_list_for_en = [ + all_logic_steps[uid] for uid in sorted_uids_for_en if uid in all_logic_steps + ] + + for i, instruction in enumerate(ordered_logic_list_for_en): + part_uid = instruction["instruction_uid"] + # Usar el tipo original para la lógica de inferencia + part_type_original = ( + instruction.get("type", "").replace(SCL_SUFFIX, "").replace("_error", "") + ) + + # Inferencia solo para tipos funcionales que no tengan EN explícito + if ( + part_type_original in functional_block_types + and "en" not in instruction.get("inputs", {}) + and part_uid not in processed_blocks_en_inference + and "error" not in part_type_original + ): # No inferir para errores + + inferred_en_source = None + # Buscar hacia atrás en la lista ordenada + if i > 0: + for j in range(i - 1, -1, -1): + prev_instr = ordered_logic_list_for_en[j] + if "error" in prev_instr.get("type", ""): + continue # Saltar errores previos + + prev_uid = prev_instr["instruction_uid"] + prev_type_original = ( + prev_instr.get("type", "") + .replace(SCL_SUFFIX, "") + .replace("_error", "") + ) + + if prev_type_original in rlo_generators: # Fuente RLO encontrada + inferred_en_source = { + "type": "connection", + "source_instruction_uid": prev_uid, + "source_instruction_type": prev_type_original, # Tipo original + "source_pin": "out", + } + break # Detener búsqueda + elif ( + prev_type_original in functional_block_types + ): # Bloque funcional previo + # Comprobar si este bloque tiene salida ENO conectada + if (prev_uid, "eno") in source_connections: + inferred_en_source = { + "type": "connection", + "source_instruction_uid": prev_uid, + "source_instruction_type": prev_type_original, # Tipo original + "source_pin": "eno", + } + # Si no tiene ENO conectado, el flujo RLO se detiene aquí + break # Detener búsqueda + elif prev_type_original in [ + "Coil", + "SCoil", + "RCoil", + "SdCoil", + "SetCoil", + "ResetCoil", + ]: + # Bobinas terminan el flujo RLO + break # Detener búsqueda + + # Si no se encontró fuente, conectar a PowerRail + if inferred_en_source is None: + inferred_en_source = {"type": "powerrail"} + + # Actualizar la instrucción EN el diccionario principal + if part_uid in all_logic_steps: + # Asegurar que inputs exista + if "inputs" not in all_logic_steps[part_uid]: + all_logic_steps[part_uid]["inputs"] = {} + all_logic_steps[part_uid]["inputs"]["en"] = inferred_en_source + processed_blocks_en_inference.add(part_uid) + + # 5. Lógica ENO (añadir destinos ENO si existen) + for source_instr_uid, eno_destinations in eno_outputs.items(): + if source_instr_uid in all_logic_steps and "error" not in all_logic_steps[ + source_instr_uid + ].get("type", ""): + all_logic_steps[source_instr_uid]["eno_destinations"] = eno_destinations + + # 6. Ordenar y Devolver + final_logic_list = [ + all_logic_steps[uid] for uid in sorted_uids_for_en if uid in all_logic_steps + ] + + return { + "id": network_id, + "title": network_title, + "comment": network_comment, + "language": network_lang, # Lenguaje original de la red + "logic": final_logic_list, + # No añadir 'error' aquí a menos que el parseo completo falle + } + + +# --- Función de Información del Parser --- +def get_parser_info(): + """Devuelve la información para este parser.""" + # Este parser maneja LAD, FBD y GRAPH + return { + "language": ["LAD", "FBD", "GRAPH"], # Lista de lenguajes soportados + "parser_func": parse_lad_fbd_network, # Función a llamar + } diff --git a/ToUpload/parsers/parse_scl.py b/ToUpload/parsers/parse_scl.py new file mode 100644 index 0000000..b88e779 --- /dev/null +++ b/ToUpload/parsers/parse_scl.py @@ -0,0 +1,253 @@ +# ToUpload/parsers/parse_scl.py +# -*- coding: utf-8 -*- +from lxml import etree +import re + +# Importar desde las utilidades del parser +from .parser_utils import ns, get_multilingual_text + +def reconstruct_scl_from_tokens(st_node): + """ + Reconstruye SCL desde , mejorando el manejo de + variables, constantes literales, tokens básicos, espacios y saltos de línea. + """ + if st_node is None: + return "// Error: StructuredText node not found.\n" + + scl_parts = [] + # Usar st:* para obtener todos los elementos hijos dentro del namespace st + children = st_node.xpath("./st:*", namespaces=ns) + + for elem in children: + tag = etree.QName(elem.tag).localname + + if tag == "Token": + scl_parts.append(elem.get("Text", "")) + elif tag == "Blank": + # Añadir espacios solo si es necesario o más de uno + num_spaces = int(elem.get("Num", 1)) + if not scl_parts or not scl_parts[-1].endswith(" "): + scl_parts.append(" " * num_spaces) + elif num_spaces > 1: + scl_parts.append(" " * (num_spaces -1)) + + elif tag == "NewLine": + # Quitar espacios finales antes del salto de línea + if scl_parts: + scl_parts[-1] = scl_parts[-1].rstrip() + scl_parts.append("\n") + elif tag == "Access": + scope = elem.get("Scope") + access_str = f"/*_ERR_Scope_{scope}_*/" # Placeholder + + # --- Variables --- + if scope in [ + "GlobalVariable", "LocalVariable", "TempVariable", "InOutVariable", + "InputVariable", "OutputVariable", "ConstantVariable", + "GlobalConstant", "LocalConstant" # Añadir constantes simbólicas + ]: + symbol_elem = elem.xpath("./st:Symbol", namespaces=ns) + if symbol_elem: + components = symbol_elem[0].xpath("./st:Component", namespaces=ns) + symbol_text_parts = [] + for i, comp in enumerate(components): + name = comp.get("Name", "_ERR_COMP_") + if i > 0: symbol_text_parts.append(".") + + # Check for HasQuotes attribute (adjust namespace if needed) + # El atributo está en el Component o en el Access padre? Probar ambos + has_quotes_comp = comp.get("HasQuotes", "false").lower() == "true" # Check directly on Component + has_quotes_access = False + access_parent = comp.xpath("ancestor::st:Access[1]", namespaces=ns) # Get immediate Access parent + if access_parent: + has_quotes_attr = access_parent[0].xpath("./st:BooleanAttribute[@Name='HasQuotes']/text()", namespaces=ns) + has_quotes_access = has_quotes_attr and has_quotes_attr[0].lower() == 'true' + + has_quotes = has_quotes_comp or has_quotes_access + is_temp = name.startswith("#") + + # Apply quotes based on HasQuotes or if it's the first component and not temp + if has_quotes or (i == 0 and not is_temp and '"' not in name): # Avoid double quotes + symbol_text_parts.append(f'"{name}"') + else: + symbol_text_parts.append(name) + + # --- Array Index Access --- + index_access_nodes = comp.xpath("./st:Access", namespaces=ns) + if index_access_nodes: + # Llamada recursiva para cada índice + indices_text = [reconstruct_scl_from_tokens(idx_node) for idx_node in index_access_nodes] + # Limpiar saltos de línea dentro de los corchetes + indices_cleaned = [idx.replace('\n', '').strip() for idx in indices_text] + symbol_text_parts.append(f"[{','.join(indices_cleaned)}]") + + access_str = "".join(symbol_text_parts) + else: + access_str = f"/*_ERR_NO_SYMBOL_IN_{scope}_*/" + + # --- Constantes Literales --- + elif scope == "LiteralConstant": + constant_elem = elem.xpath("./st:Constant", namespaces=ns) + if constant_elem: + val_elem = constant_elem[0].xpath("./st:ConstantValue/text()", namespaces=ns) + type_elem = constant_elem[0].xpath("./st:ConstantType/text()", namespaces=ns) + const_type = type_elem[0].strip().lower() if type_elem and type_elem[0] is not None else "" + const_val = val_elem[0].strip() if val_elem and val_elem[0] is not None else "_ERR_CONSTVAL_" + + # Formatear según tipo + if const_type == "bool": access_str = const_val.upper() + elif const_type.lower() == "string": + replaced_val = const_val.replace("'", "''") + access_str = f"'{replaced_val}'" + elif const_type.lower() == "char": + replaced_val = const_val.replace("'", "''") + access_str = f"'{replaced_val}'" + elif const_type == "wstring": + replaced_val = const_val.replace("'", "''") + access_str = f"WSTRING#'{replaced_val}'" + elif const_type == "wchar": + replaced_val = const_val.replace("'", "''") + access_str = f"WCHAR#'{replaced_val}'" + elif const_type == "time": access_str = f"T#{const_val}" + elif const_type == "ltime": access_str = f"LT#{const_val}" + elif const_type == "s5time": access_str = f"S5T#{const_val}" + elif const_type == "date": access_str = f"D#{const_val}" + elif const_type == "dtl": access_str = f"DTL#{const_val}" + elif const_type == "dt": access_str = f"DT#{const_val}" + elif const_type == "tod": access_str = f"TOD#{const_val}" + elif const_type in ["int", "dint", "sint", "usint", "uint", "udint", "real", "lreal", "word", "dword", "byte"]: + # Añadir .0 para reales si no tienen decimal + if const_type in ["real", "lreal"] and '.' not in const_val and 'e' not in const_val.lower(): + access_str = f"{const_val}.0" + else: + access_str = const_val + else: # Otros tipos (LWORD, etc.) o desconocidos + access_str = const_val + else: + access_str = "/*_ERR_NOCONST_*/" + + # --- Llamadas a Funciones/Bloques (Scope=Call) --- + elif scope == "Call": + call_info_node = elem.xpath("./st:CallInfo", namespaces=ns) + if call_info_node: + ci = call_info_node[0] + call_name = ci.get("Name", "_ERR_CALLNAME_") + call_type = ci.get("BlockType") # FB, FC, etc. + + # Parámetros (están como Access o Token dentro de CallInfo/Parameter) + params = ci.xpath("./st:Parameter", namespaces=ns) + param_parts = [] + for p in params: + p_name = p.get("Name", "_ERR_PARAMNAME_") + # El valor del parámetro está dentro del nodo Parameter + p_value_node = p.xpath("./st:Access | ./st:Token", namespaces=ns) # Buscar Access o Token + p_value_scl = "" + if p_value_node: + p_value_scl = reconstruct_scl_from_tokens(p) # Parsear el contenido del parámetro + p_value_scl = p_value_scl.replace('\n', '').strip() # Limpiar SCL resultante + param_parts.append(f"{p_name} := {p_value_scl}") + + # Manejar FB vs FC + if call_type == "FB": + instance_node = ci.xpath("./st:Instance/st:Component/@Name", namespaces=ns) + if instance_node: + instance_name = f'"{instance_node[0]}"' + access_str = f"{instance_name}({', '.join(param_parts)})" + else: # FB sin instancia? Podría ser STAT + access_str = f'"{call_name}"({", ".join(param_parts)}) (* FB sin instancia explícita? *)' + elif call_type == "FC": + access_str = f'"{call_name}"({", ".join(param_parts)})' + else: # Otros tipos de llamada + access_str = f'"{call_name}"({", ".join(param_parts)}) (* Tipo: {call_type} *)' + else: + access_str = "/*_ERR_NO_CALLINFO_*/" + + # Añadir más scopes si son necesarios (e.g., Address, Label, Reference) + + scl_parts.append(access_str) + + elif tag == "Comment" or tag == "LineComment": + # Usar get_multilingual_text del parser_utils + comment_text = get_multilingual_text(elem) + if tag == "Comment": + scl_parts.append(f"(* {comment_text} *)") + else: + scl_parts.append(f"// {comment_text}") + # Ignorar otros tipos de nodos si no son relevantes para el SCL + + full_scl = "".join(scl_parts) + + # --- Re-indentación Simple --- + output_lines = [] + indent_level = 0 + indent_str = " " # Dos espacios + for line in full_scl.splitlines(): + trimmed_line = line.strip() + if not trimmed_line: + # Mantener líneas vacías? Opcional. + # output_lines.append("") + continue + + # Reducir indentación ANTES de imprimir para END, ELSE, etc. + if trimmed_line.upper().startswith(("END_", "UNTIL", "}")) or \ + trimmed_line.upper() in ["ELSE", "ELSIF"]: + indent_level = max(0, indent_level - 1) + + output_lines.append(indent_str * indent_level + trimmed_line) + + # Aumentar indentación DESPUÉS de imprimir para IF, FOR, etc. + # Ser más específico con las palabras clave que aumentan indentación + # Usar .upper() para ignorar mayúsculas/minúsculas + line_upper = trimmed_line.upper() + if line_upper.endswith(("THEN", "DO", "OF", "{")) or \ + line_upper.startswith(("IF ", "FOR ", "WHILE ", "CASE ", "REPEAT", "STRUCT")) or \ + line_upper == "ELSE": + # Excepción: No indentar después de ELSE IF + if not (line_upper == "ELSE" and "IF" in output_lines[-1].upper()): + indent_level += 1 + + return "\n".join(output_lines) + + +def parse_scl_network(network_element): + """ + Parsea una red SCL extrayendo el código fuente reconstruido. + Devuelve un diccionario representando la red para el JSON. + """ + network_id = network_element.get("ID", "UnknownSCL_ID") + network_lang = "SCL" # Sabemos que es SCL + + # Buscar NetworkSource y luego StructuredText + network_source_node = network_element.xpath(".//flg:NetworkSource", namespaces=ns) + structured_text_node = None + if network_source_node: + structured_text_node_list = network_source_node[0].xpath("./st:StructuredText", namespaces=ns) + if structured_text_node_list: + structured_text_node = structured_text_node_list[0] + + reconstructed_scl = "// SCL extraction failed: StructuredText node not found.\n" + if structured_text_node is not None: + reconstructed_scl = reconstruct_scl_from_tokens(structured_text_node) + + # Crear la estructura de datos para la red + parsed_network_data = { + "id": network_id, + "language": network_lang, + "logic": [ # SCL se guarda como un único bloque lógico + { + "instruction_uid": f"SCL_{network_id}", # UID sintético + "type": "RAW_SCL_CHUNK", # Tipo especial para SCL crudo + "scl": reconstructed_scl, # El código SCL reconstruido + } + ], + # No añadimos error aquí, reconstruct_scl_from_tokens ya incluye comentarios de error + } + return parsed_network_data + +# --- Función de Información del Parser --- +def get_parser_info(): + """Devuelve la información para este parser.""" + return { + 'language': ['SCL'], # Lista de lenguajes soportados + 'parser_func': parse_scl_network # Función a llamar + } \ No newline at end of file diff --git a/ToUpload/parsers/parse_stl.py b/ToUpload/parsers/parse_stl.py new file mode 100644 index 0000000..c5c3f94 --- /dev/null +++ b/ToUpload/parsers/parse_stl.py @@ -0,0 +1,278 @@ +# ToUpload/parsers/parse_stl.py +# -*- coding: utf-8 -*- +from lxml import etree + +# Importar desde las utilidades del parser +from .parser_utils import ns # Solo necesitamos los namespaces aquí + +# --- Funciones Auxiliares de Reconstrucción STL (Adaptadas de x1) --- + +def get_access_text_stl(access_element): + """Reconstruye una representación textual simple de un Access en STL.""" + if access_element is None: return "_ERR_ACCESS_" + scope = access_element.get("Scope") + + # Símbolo (Variable, Constante Simbólica) + symbol_elem = access_element.xpath("./stl:Symbol", namespaces=ns) + if symbol_elem: + components = symbol_elem[0].xpath("./stl:Component", namespaces=ns) + parts = [] + for i, comp in enumerate(components): + name = comp.get("Name", "_ERR_COMP_") + # Comprobar HasQuotes (en Access padre?) + has_quotes_elem = comp.xpath("ancestor::stl:Access/stl:BooleanAttribute[@Name='HasQuotes']/text()", namespaces=ns) + has_quotes = has_quotes_elem and has_quotes_elem[0].lower() == "true" + is_temp = name.startswith("#") + + if i > 0: parts.append(".") + # Aplicar comillas + if has_quotes or (i == 0 and not is_temp and '"' not in name): + parts.append(f'"{name}"') + else: + parts.append(name) + # Índices de Array + index_access = comp.xpath("./stl:Access", namespaces=ns) + if index_access: + indices = [get_access_text_stl(ia) for ia in index_access] + parts.append(f"[{','.join(indices)}]") + return "".join(parts) + + # Constante Literal + constant_elem = access_element.xpath("./stl:Constant", namespaces=ns) + if constant_elem: + val_elem = constant_elem[0].xpath("./stl:ConstantValue/text()", namespaces=ns) + type_elem = constant_elem[0].xpath("./stl:ConstantType/text()", namespaces=ns) + const_type = (type_elem[0].strip().lower() if type_elem and type_elem[0] is not None else "") + const_val = (val_elem[0].strip() if val_elem and val_elem[0] is not None else "_ERR_CONST_") + + # Añadir prefijos estándar STL + if const_type == "time": return f"T#{const_val}" + if const_type == "s5time": return f"S5T#{const_val}" + if const_type == "date": return f"D#{const_val}" + if const_type == "dt": return f"DT#{const_val}" + if const_type == "time_of_day" or const_type=="tod": return f"TOD#{const_val}" + if const_type.lower() == "string": + replaced_val = const_val.replace("'", "''") + access_str = f"'{replaced_val}'" + if const_type.lower() == "char": + replaced_val = const_val.replace("'", "''") + access_str = f"'{replaced_val}'" + if const_type == "wstring": + replaced_val = const_val.replace("'", "''") + access_str = f"WSTRING#'{replaced_val}'" + if const_type == "wchar": + replaced_val = const_val.replace("'", "''") + access_str = f"WCHAR#'{replaced_val}'" # Añadir más si es necesario (WSTRING#, BYTE#, WORD#...) + if const_type == "byte" and const_val.startswith("16#"): return f"B#{const_val}" # Formato B#16#FF + if const_type == "word" and const_val.startswith("16#"): return f"W#{const_val}" + if const_type == "dword" and const_val.startswith("16#"): return f"DW#{const_val}" + # Real con punto decimal + if const_type == "real" and '.' not in const_val and 'e' not in const_val.lower(): return f"{const_val}.0" + return const_val # Valor por defecto + + # Etiqueta + label_elem = access_element.xpath("./stl:Label", namespaces=ns) + if label_elem: + return label_elem[0].get("Name", "_ERR_LABEL_") + + # Acceso Indirecto (Punteros) + indirect_elem = access_element.xpath("./stl:Indirect", namespaces=ns) + if indirect_elem: + reg = indirect_elem[0].get("Register", "AR?") # AR1, AR2 + offset_str = indirect_elem[0].get("BitOffset", "0") + area = indirect_elem[0].get("Area", "DB") # DB, DI, L, etc. + width = indirect_elem[0].get("Width", "X") # Bit, Byte, Word, Double + try: + bit_offset = int(offset_str) + byte_offset = bit_offset // 8 + bit_in_byte = bit_offset % 8 + p_format_offset = f"P#{byte_offset}.{bit_in_byte}" + except ValueError: + p_format_offset = "P#?.?" + width_map = {"Bit": "X", "Byte": "B", "Word": "W", "Double": "D", "Long": "D"} + width_char = width_map.get(width, width[0] if width else "?") + return f"{area}{width_char}[{reg},{p_format_offset}]" + + # Dirección Absoluta (I, Q, M, PI, PQ, T, C, DBX, DIX, L) + address_elem = access_element.xpath("./stl:Address", namespaces=ns) + if address_elem: + area = address_elem[0].get("Area", "??") # Input, Output, Memory, DB, DI, Local, Timer, Counter... + bit_offset_str = address_elem[0].get("BitOffset", "0") + addr_type_str = address_elem[0].get("Type", "Bool") # Bool, Byte, Word, DWord, Int, DInt, Real... + try: + bit_offset = int(bit_offset_str) + byte_offset = bit_offset // 8 + bit_in_byte = bit_offset % 8 + # Determinar ancho (X, B, W, D) + addr_width = "X" # Default bit + if addr_type_str in ["Byte", "SInt", "USInt"]: addr_width = "B" + elif addr_type_str in ["Word", "Int", "UInt"]: addr_width = "W" + elif addr_type_str in ["DWord", "DInt", "UDInt", "Real", "Time", "DT", "TOD"]: addr_width = "D" + elif addr_type_str in ["LReal", "LTime", "LWord", "LInt", "ULInt"]: addr_width = "D" # L se maneja como D en direccionamiento base? O usar L? Chequear estándar. STL clásico no tenía L. + # Mapear Área XML a Área STL + area_map = {"Input": "I", "Output": "Q", "Memory": "M", + "PeripheryInput": "PI", "PeripheryOutput": "PQ", + "DB": "DB", "DI": "DI", "Local": "L", + "Timer": "T", "Counter": "C"} + stl_area = area_map.get(area, area) + + if stl_area in ["DB", "DI"]: + block_num = address_elem[0].get("BlockNumber") # Para DB10.DBX0.0 + if block_num: + return f"{stl_area}{block_num}.{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" + else: # Para acceso con registro DB/DI (DBX, DIW, etc.) + return f"{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" + elif stl_area in ["T", "C"]: + return f"{stl_area}{byte_offset}" # T 5, C 10 (offset es el número) + else: # I, Q, M, L, PI, PQ + return f"{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" # M10.1, IW0, QB5, etc. + + except ValueError: + return f"{area}?{bit_offset_str}?" + + # CallInfo (para CALL FC10, CALL FB20, DB10) + call_info_elem = access_element.xpath("./stl:CallInfo", namespaces=ns) + if call_info_elem: + name = call_info_elem[0].get("Name", "_ERR_CALL_") + btype = call_info_elem[0].get("BlockType", "FC") # FC, FB, DB + instance_node = call_info_elem[0].xpath("./stl:Instance/stl:Component/@Name", namespaces=ns) + if btype == "FB" and instance_node: + # Para CALL FB, el operando es el DB de instancia + db_name_raw = instance_node[0] + return f'"{db_name_raw}"' if '"' not in db_name_raw else db_name_raw + elif btype == "DB": + return f'DB "{name}"' # O solo DB name? ej. DB10 + else: # FC + return f'{btype} "{name}"' # FC "Nombre" + + return f"_{scope}_?" # Fallback + + +def get_comment_text_stl(comment_element): + """Extrae texto de un LineComment o Comment para STL.""" + if comment_element is None: return "" + # STL Comments suelen tener directamente + text_nodes = comment_element.xpath("./stl:Text/text()", namespaces=ns) + if text_nodes: + return text_nodes[0].strip() + return "" # Vacío si no hay + +def reconstruct_stl_from_statementlist(statement_list_node): + """Reconstruye el código STL como una cadena de texto desde .""" + if statement_list_node is None: + return "// Error: StatementList node not found.\n" + stl_lines = [] + statements = statement_list_node.xpath("./stl:StlStatement", namespaces=ns) + + for stmt in statements: + line_parts = [] + inline_comment = "" # Comentarios en la misma línea + + # 1. Comentarios iniciales (línea completa //) + initial_comments = stmt.xpath("child::stl:Comment[not(@Inserted='true')] | child::stl:LineComment[not(@Inserted='true')]", namespaces=ns) + for comm in initial_comments: + comment_text = get_comment_text_stl(comm) + if comment_text: + for comment_line in comment_text.splitlines(): + stl_lines.append(f"// {comment_line}") + + # 2. Etiqueta (Label) + label_decl = stmt.xpath("./stl:LabelDeclaration", namespaces=ns) + label_str = "" + if label_decl: + label_name = label_decl[0].xpath("./stl:Label/@Name", namespaces=ns) + if label_name: + label_str = f"{label_name[0]}:" + # Comentarios después de la etiqueta (inline) + label_comments = label_decl[0].xpath("./stl:Comment[@Inserted='true'] | ./stl:LineComment[@Inserted='true']", namespaces=ns) + for lcomm in label_comments: + inline_comment += f" // {get_comment_text_stl(lcomm)}" + if label_str: + line_parts.append(label_str) + + # 3. Instrucción (StlToken) + instruction_token = stmt.xpath("./stl:StlToken", namespaces=ns) + instruction_str = "" + if instruction_token: + token_text = instruction_token[0].get("Text", "_ERR_TOKEN_") + if token_text == "EMPTY_LINE": + stl_lines.append("") # Línea vacía + continue # Saltar resto del statement + elif token_text == "COMMENT": # Marcador de línea de comentario completo + # Ya manejado por initial_comments? Verificar XML. Si no, extraer comentario aquí. + pass # Asumir manejado antes + else: + instruction_str = token_text + # Comentarios asociados al token (inline) + token_comments = instruction_token[0].xpath("./stl:Comment[@Inserted='true'] | ./stl:LineComment[@Inserted='true']", namespaces=ns) + for tcomm in token_comments: + inline_comment += f" // {get_comment_text_stl(tcomm)}" + if instruction_str: + # Añadir tabulación si hay etiqueta + line_parts.append("\t" + instruction_str if label_str else instruction_str) + + # 4. Operando (Access) + access_elem = stmt.xpath("./stl:Access", namespaces=ns) + access_str = "" + if access_elem: + access_text = get_access_text_stl(access_elem[0]) + access_str = access_text + # Comentarios dentro del Access (inline) + access_comments = access_elem[0].xpath("child::stl:Comment[@Inserted='true'] | child::stl:LineComment[@Inserted='true']", namespaces=ns) + for acc_comm in access_comments: + inline_comment += f" // {get_comment_text_stl(acc_comm)}" + if access_str: + line_parts.append(access_str) + + # Construir línea final + current_line = " ".join(lp for lp in line_parts if lp) # Unir partes con espacio + if inline_comment: + current_line += f"\t{inline_comment.strip()}" # Añadir comentario con tab + + if current_line.strip(): # Añadir solo si no está vacía después de todo + stl_lines.append(current_line.rstrip()) # Quitar espacios finales + + return "\n".join(stl_lines) + + +def parse_stl_network(network_element): + """ + Parsea una red STL extrayendo el código fuente reconstruido. + Devuelve un diccionario representando la red para el JSON. + """ + network_id = network_element.get("ID", "UnknownSTL_ID") + network_lang = "STL" + + # Buscar NetworkSource y luego StatementList + network_source_node = network_element.xpath(".//flg:NetworkSource", namespaces=ns) + statement_list_node = None + if network_source_node: + statement_list_node_list = network_source_node[0].xpath("./stl:StatementList", namespaces=ns) + if statement_list_node_list: + statement_list_node = statement_list_node_list[0] + + reconstructed_stl = "// STL extraction failed: StatementList node not found.\n" + if statement_list_node is not None: + reconstructed_stl = reconstruct_stl_from_statementlist(statement_list_node) + + # Crear la estructura de datos para la red + parsed_network_data = { + "id": network_id, + "language": network_lang, + "logic": [ # STL se guarda como un único bloque lógico + { + "instruction_uid": f"STL_{network_id}", # UID sintético + "type": "RAW_STL_CHUNK", # Tipo especial para STL crudo + "stl": reconstructed_stl, # El código STL reconstruido + } + ], + } + return parsed_network_data + +# --- Función de Información del Parser --- +def get_parser_info(): + """Devuelve la información para este parser.""" + return { + 'language': ['STL'], # Lenguaje soportado + 'parser_func': parse_stl_network # Función a llamar + } \ No newline at end of file diff --git a/ToUpload/parsers/parser_utils.py b/ToUpload/parsers/parser_utils.py new file mode 100644 index 0000000..64d748d --- /dev/null +++ b/ToUpload/parsers/parser_utils.py @@ -0,0 +1,478 @@ +# ToUpload/parsers/parser_utils.py +# -*- coding: utf-8 -*- +from lxml import etree +import traceback + +# --- Namespaces (Común para muchos parsers) --- +ns = { + "iface": "http://www.siemens.com/automation/Openness/SW/Interface/v5", + "flg": "http://www.siemens.com/automation/Openness/SW/NetworkSource/FlgNet/v4", + "st": "http://www.siemens.com/automation/Openness/SW/NetworkSource/StructuredText/v3", + "stl": "http://www.siemens.com/automation/Openness/SW/NetworkSource/StatementList/v4", +} + +# --- Funciones Comunes de Extracción de Texto y Nodos --- + + +def get_multilingual_text(element, default_lang="en-US", fallback_lang="it-IT"): + """Extrae texto multilingüe de un elemento XML.""" + if element is None: + return "" + try: + # Intenta buscar el idioma por defecto + xpath_expr_default = f".//iface:MultilingualTextItem[iface:AttributeList/iface:Culture='{default_lang}']/iface:AttributeList/iface:Text" + text_items_default = element.xpath(xpath_expr_default, namespaces=ns) + if text_items_default and text_items_default[0].text is not None: + return text_items_default[0].text.strip() + + # Intenta buscar el idioma de fallback + xpath_expr_fallback = f".//iface:MultilingualTextItem[iface:AttributeList/iface:Culture='{fallback_lang}']/iface:AttributeList/iface:Text" + text_items_fallback = element.xpath(xpath_expr_fallback, namespaces=ns) + if text_items_fallback and text_items_fallback[0].text is not None: + return text_items_fallback[0].text.strip() + + # Si no encuentra ninguno, toma el primer texto que encuentre + xpath_expr_any = ".//iface:MultilingualTextItem/iface:AttributeList/iface:Text" + text_items_any = element.xpath(xpath_expr_any, namespaces=ns) + if text_items_any and text_items_any[0].text is not None: + return text_items_any[0].text.strip() + + # Fallback si MultilingualText está vacío o tiene una estructura inesperada + return "" + except Exception as e: + print(f"Advertencia: Error extrayendo MultilingualText: {e}") + # traceback.print_exc() # Descomentar para más detalles del error + return "" + + +def get_symbol_name(symbol_element): + """Obtiene el nombre completo de un símbolo desde un elemento .""" + # Adaptado para usar namespace flg + if symbol_element is None: + return None + try: + # Asume que Component está dentro de Symbol y ambos están en el namespace flg + components = symbol_element.xpath("./flg:Component/@Name", namespaces=ns) + # Formatear correctamente con comillas dobles si es necesario (ej. DBs) + return ( + ".".join( + f'"{c}"' if not c.startswith("#") and '"' not in c else c + for c in components + ) + if components + else None + ) + except Exception as e: + print(f"Advertencia: Excepción en get_symbol_name: {e}") + return None + + +def parse_access(access_element): + """Parsea un nodo devolviendo un diccionario con su información.""" + # Adaptado para usar namespace flg + if access_element is None: + return None + uid = access_element.get("UId") + scope = access_element.get("Scope") + info = {"uid": uid, "scope": scope, "type": "unknown"} + + # Buscar Symbol o Constant usando el namespace flg + symbol = access_element.xpath("./flg:Symbol", namespaces=ns) + constant = access_element.xpath("./flg:Constant", namespaces=ns) + + if symbol: + info["type"] = "variable" + # Llamar a get_symbol_name que ahora espera flg:Symbol + info["name"] = get_symbol_name(symbol[0]) + if info["name"] is None: + info["type"] = "error_parsing_symbol" + print(f"Error: No se pudo parsear nombre símbolo Access UID={uid}") + # Intentar extraer texto directamente como fallback muy básico + raw_text = "".join(symbol[0].xpath(".//text()")).strip() + info["name"] = ( + f'"_ERR_PARSING_{raw_text[:20]}"' + if raw_text + else f'"_ERR_PARSING_EMPTY_SYMBOL_ACCESS_{uid}"' + ) + # return info # Podríamos devolver el error aquí + elif constant: + info["type"] = "constant" + # Buscar ConstantType y ConstantValue usando el namespace flg + const_type_elem = constant[0].xpath("./flg:ConstantType", namespaces=ns) + const_val_elem = constant[0].xpath("./flg:ConstantValue", namespaces=ns) + + # Extraer texto + info["datatype"] = ( + const_type_elem[0].text.strip() + if const_type_elem and const_type_elem[0].text is not None + else "Unknown" + ) + value_str = ( + const_val_elem[0].text.strip() + if const_val_elem and const_val_elem[0].text is not None + else None + ) + + if value_str is None: + info["type"] = "error_parsing_constant" + info["value"] = None + print(f"Error: Constante sin valor Access UID={uid}") + # return info + + # Inferir tipo si es Unknown (igual que antes) + if info["datatype"] == "Unknown" and value_str: + val_lower = value_str.lower() + if val_lower in ["true", "false"]: + info["datatype"] = "Bool" + elif value_str.isdigit() or ( + value_str.startswith("-") and value_str[1:].isdigit() + ): + info["datatype"] = "Int" # O DInt? Int es más seguro + elif "." in value_str: + try: + float(value_str) + info["datatype"] = "Real" # O LReal? Real es más seguro + except ValueError: + pass # Podría ser string con punto + elif "#" in value_str: + # Inferir tipo desde prefijo (T#, DT#, '...', etc.) + parts = value_str.split("#", 1) + prefix = parts[0].upper() + if prefix == "T": + info["datatype"] = "Time" + elif prefix == "LT": + info["datatype"] = "LTime" + elif prefix == "S5T": + info["datatype"] = "S5Time" + elif prefix == "D": + info["datatype"] = "Date" + elif prefix == "DT": + info["datatype"] = "DT" + elif prefix == "DTL": + info["datatype"] = "DTL" + elif prefix == "TOD": + info["datatype"] = "Time_Of_Day" + # Añadir más prefijos si es necesario (WSTRING#, STRING#, etc.) + elif value_str.startswith("'") and value_str.endswith("'"): + info["datatype"] = "String" # O Char? String es más probable + else: + info["datatype"] = ( + "TypedConstant" # Genérico si no se reconoce prefijo + ) + + elif value_str.startswith("'") and value_str.endswith("'"): + info["datatype"] = "String" # O Char? + + info["value"] = value_str # Guardar valor original + # Intentar conversión numérica/booleana (igual que antes) + dtype_lower = info["datatype"].lower() + # Quitar prefijo y comillas para la conversión + val_str_processed = value_str + if isinstance(value_str, str): + if "#" in value_str: + val_str_processed = value_str.split("#", 1)[-1] + if ( + val_str_processed.startswith("'") + and val_str_processed.endswith("'") + and len(val_str_processed) > 1 + ): + val_str_processed = val_str_processed[1:-1] + try: + if dtype_lower in [ + "int", + "dint", + "udint", + "sint", + "usint", + "lint", + "ulint", + "word", + "dword", + "lword", + "byte", + ]: + info["value"] = int(val_str_processed) + elif dtype_lower == "bool": + info["value"] = ( + val_str_processed.lower() == "true" or val_str_processed == "1" + ) + elif dtype_lower in ["real", "lreal"]: + info["value"] = float(val_str_processed) + # Mantener string para otros tipos (Time, Date, String, Char, TypedConstant) + except (ValueError, TypeError) as e: + # Permitir que el valor sea un string si la conversión falla (podría ser una constante simbólica) + # print(f"Advertencia: No se pudo convertir valor constante '{val_str_processed}' a {dtype_lower} UID={uid}. Manteniendo string. Error: {e}") + info["value"] = value_str # Mantener string original + + else: + info["type"] = "unknown_structure" + print(f"Advertencia: Access UID={uid} no es Symbol ni Constant.") + # return info + + # Verificar nombre faltante después de intentar parsear + if info["type"] == "variable" and info.get("name") is None: + print(f"Error Interno: parse_access var sin nombre UID {uid}.") + info["type"] = "error_no_name" + # return info + + return info + + +def parse_part(part_element): + """Parsea un nodo de LAD/FBD.""" + # Asume que Part está en namespace flg + if part_element is None: + return None + uid = part_element.get("UId") + name = part_element.get("Name") + if not uid or not name: + print( + f"Error: Part sin UID o Name: {etree.tostring(part_element, encoding='unicode')}" + ) + return None + + template_values = {} + try: + # TemplateValue parece NO tener namespace flg + for tv in part_element.xpath("./TemplateValue"): + tv_name = tv.get("Name") + tv_type = tv.get("Type") + if tv_name and tv_type: + template_values[tv_name] = tv_type + except Exception as e: + print(f"Advertencia: Error extrayendo TemplateValues Part UID={uid}: {e}") + + negated_pins = {} + try: + # Negated parece NO tener namespace flg + for negated_elem in part_element.xpath("./Negated"): + negated_pin_name = negated_elem.get("Name") + if negated_pin_name: + negated_pins[negated_pin_name] = True + except Exception as e: + print(f"Advertencia: Error extrayendo Negated Pins Part UID={uid}: {e}") + + return { + "uid": uid, + "type": name, # El 'type' de la instrucción (e.g., 'Add', 'Contact') + "template_values": template_values, + "negated_pins": negated_pins, + } + + +def parse_call(call_element): + """Parsea un nodo de LAD/FBD.""" + # Asume que Call está en namespace flg + if call_element is None: + return None + uid = call_element.get("UId") + if not uid: + print( + f"Error: Call encontrado sin UID: {etree.tostring(call_element, encoding='unicode')}" + ) + return None + + # << CORRECCIÓN: CallInfo y sus hijos están en el namespace por defecto (flg) >> + call_info_elem = call_element.xpath("./flg:CallInfo", namespaces=ns) + if not call_info_elem: + print(f"Error: Call UID {uid} sin elemento flg:CallInfo.") + # Intentar sin namespace como fallback por si acaso + call_info_elem_no_ns = call_element.xpath("./CallInfo") + if not call_info_elem_no_ns: + print( + f"Error: Call UID {uid} sin elemento CallInfo (probado sin NS tambien)." + ) + return { + "uid": uid, + "type": "Call_error", + "error": "Missing CallInfo", + } # Devolver error + else: + # Si se encontró sin NS, usar ese (menos probable pero posible) + print(f"Advertencia: Call UID {uid} encontró CallInfo SIN namespace.") + call_info = call_info_elem_no_ns[0] + else: + call_info = call_info_elem[0] # Usar el encontrado con namespace + + block_name = call_info.get("Name") + block_type = call_info.get("BlockType") # FC, FB + if not block_name or not block_type: + print(f"Error: CallInfo para UID {uid} sin Name o BlockType.") + return { + "uid": uid, + "type": "Call_error", + "error": "Missing Name or BlockType in CallInfo", + } + + instance_name = None + instance_scope = None + # Buscar Instance y Component (que también deberían estar en namespace flg) + # Solo relevante si es FB + if block_type == "FB": + instance_elem_list = call_info.xpath("./flg:Instance", namespaces=ns) + if instance_elem_list: + instance_elem = instance_elem_list[0] + instance_scope = instance_elem.get("Scope") # GlobalDB, LocalVariable, etc. + # Buscar Component dentro de Instance + component_elem_list = instance_elem.xpath("./flg:Component", namespaces=ns) + if component_elem_list: + component_elem = component_elem_list[0] + db_name_raw = component_elem.get("Name") + if db_name_raw: + # Asegurar comillas dobles para nombres de DB + instance_name = ( + f'"{db_name_raw}"' + if not db_name_raw.startswith('"') + else db_name_raw + ) + else: + print( + f"Advertencia: en FB Call UID {uid} sin 'Name'." + ) + else: + print( + f"Advertencia: No se encontró en FB Call UID {uid}." + ) + else: + print( + f"Advertencia: FB Call '{block_name}' UID {uid} sin . ¿Llamada a multi-instancia STAT?" + ) + # Aquí podríamos intentar buscar si el scope del Call es LocalVariable para inferir STAT + call_scope = call_element.get("Scope") # Scope del mismo + if call_scope == "LocalVariable": + # Si la llamada es local y no tiene , probablemente es una multi-instancia STAT + instance_name = f'"{block_name}"' # Usar el nombre del bloque como nombre de instancia STAT (convención común) + instance_scope = "Static" # Marcar como estático + print( + f"INFO: Asumiendo instancia STAT '{instance_name}' para FB Call UID {uid}." + ) + # else: # Error si es Global y no tiene Instance? Depende de la semántica deseada. + # print(f"Error: FB Call '{block_name}' UID {uid} no es STAT y no tiene .") + # return {"uid": uid, "type": "Call_error", "error": "FB Call sin datos de instancia"} + + # El 'type' aquí es genérico 'Call', la distinción FC/FB se hace con block_type + call_data = { + "uid": uid, + "type": "Call", + "block_name": block_name, + "block_type": block_type, # FC o FB + } + if instance_name: + call_data["instance_db"] = instance_name # Nombre formateado SCL + if instance_scope: + call_data["instance_scope"] = instance_scope # Static, GlobalDB, etc. + + return call_data + + +def parse_interface_members(member_elements): + """ + Parsea recursivamente una lista de elementos de una interfaz o estructura. + Maneja miembros simples, structs anidados y arrays con valores iniciales. + Usa el namespace 'iface'. + """ + members_data = [] + if not member_elements: + return members_data + + for member in member_elements: + member_name = member.get("Name") + member_dtype_raw = member.get( + "Datatype" + ) # Puede tener comillas o ser Array[...] of "..." + member_version = member.get("Version") # v1.0 etc. + member_remanence = member.get("Remanence", "NonRetain") + member_accessibility = member.get("Accessibility", "Public") + + if not member_name or not member_dtype_raw: + print( + "Advertencia: Miembro sin nombre o tipo de dato encontrado. Saltando." + ) + continue + + # Combinar tipo y versión si existe versión separada + member_dtype = ( + f"{member_dtype_raw}:v{member_version}" + if member_version + else member_dtype_raw + ) + + member_info = { + "name": member_name, + "datatype": member_dtype, # Guardar el tipo original (puede tener comillas, versión) + "remanence": member_remanence, + "accessibility": member_accessibility, + "start_value": None, + "comment": None, + "children": [], # Para Structs + "array_elements": {}, # Para Arrays + } + + # Comentario del miembro + comment_node = member.xpath("./iface:Comment", namespaces=ns) + if comment_node: + # Comentario está dentro de Comment/MultiLanguageText + member_info["comment"] = get_multilingual_text(comment_node[0]) + + # Valor inicial + start_value_node = member.xpath("./iface:StartValue", namespaces=ns) + if start_value_node: + # Puede ser un nombre de constante o un valor literal + constant_name = start_value_node[0].get("ConstantName") + member_info["start_value"] = ( + constant_name + if constant_name + else ( + start_value_node[0].text + if start_value_node[0].text is not None + else "" + ) + ) + # No intentar convertir aquí, se hará en x3 según el tipo de dato + + # --- Structs Anidados --- + # Los miembros de un struct están dentro de Sections/Section/Member + nested_sections = member.xpath( + "./iface:Sections/iface:Section[@Name='None']/iface:Member", namespaces=ns + ) # Sección sin nombre específico + if nested_sections: + # Llamada recursiva + member_info["children"] = parse_interface_members(nested_sections) + + # --- Arrays --- + # Buscar elementos para valores iniciales de array + if isinstance(member_dtype, str) and member_dtype.lower().startswith("array["): + subelements = member.xpath("./iface:Subelement", namespaces=ns) + for sub in subelements: + path = sub.get("Path") # Path es el índice: '0', '1', '0,0', etc. + sub_start_value_node = sub.xpath("./iface:StartValue", namespaces=ns) + if path and sub_start_value_node: + constant_name = sub_start_value_node[0].get("ConstantName") + value = ( + constant_name + if constant_name + else ( + sub_start_value_node[0].text + if sub_start_value_node[0].text is not None + else "" + ) + ) + member_info["array_elements"][path] = value + # Parsear comentario del subelemento si es necesario + sub_comment_node = sub.xpath("./iface:Comment", namespaces=ns) + if path and sub_comment_node: + sub_comment_text = get_multilingual_text(sub_comment_node[0]) + # ¿Cómo guardar comentario de subelemento? Podría ser un dict en array_elements + if isinstance(member_info["array_elements"].get(path), dict): + member_info["array_elements"][path][ + "comment" + ] = sub_comment_text + else: # Si solo estaba el valor, convertir a dict + current_val = member_info["array_elements"].get(path) + member_info["array_elements"][path] = { + "value": current_val, + "comment": sub_comment_text, + } + + members_data.append(member_info) + return members_data diff --git a/ToUpload/processors/process_call.py b/ToUpload/processors/process_call.py index c3a554e..7902697 100644 --- a/ToUpload/processors/process_call.py +++ b/ToUpload/processors/process_call.py @@ -11,17 +11,14 @@ SCL_SUFFIX = "_sympy_processed" def process_call(instruction, network_id, sympy_map, symbol_manager: SymbolManager, data): instr_uid = instruction["instruction_uid"] - # Get original type before potential suffix/error was added by x1 or previous passes - # This requires storing the original type perhaps, or removing known suffixes - # Let's assume 'block_type' (FC/FB) and 'block_name' are correct from x1 + instr_type_original = instruction.get("type", "") # Tipo antes de añadir sufijo + if instr_type_original.endswith(SCL_SUFFIX) or "_error" in instr_type_original: + return False + block_name = instruction.get("block_name", f"UnknownCall_{instr_uid}") block_type = instruction.get("block_type") # FC, FB instance_db = instruction.get("instance_db") # Nombre del DB de instancia (para FB) - # Check if already processed - if instruction.get("type", "").endswith(SCL_SUFFIX) or "_error" in instruction.get("type", ""): - return False - # Formatear nombres SCL (para la llamada final) block_name_scl = format_variable_name(block_name) instance_db_scl = format_variable_name(instance_db) if instance_db else None @@ -36,140 +33,91 @@ def process_call(instruction, network_id, sympy_map, symbol_manager: SymbolManag # --- Procesar Parámetros de Entrada --- scl_call_params = [] - processed_inputs = {"en"} # Track processed pins to avoid duplicates if 'en' is also listed elsewhere + processed_inputs = {"en"} dependencies_resolved = True - # Iterar sobre las entradas que x1 debería haber poblado - # Ordenar por nombre de pin para consistencia en la llamada SCL + # Ordenar para consistencia input_pin_names = sorted(instruction.get("inputs", {}).keys()) for pin_name in input_pin_names: - if pin_name not in processed_inputs: # Skip 'en' if already handled + if pin_name not in processed_inputs: source_info = instruction["inputs"][pin_name] - - # Get the representation of the source (SymPy, constant, or SCL string) + # Obtener la representación de la fuente (puede ser SymPy o Constante/String) source_sympy_or_const = get_sympy_representation(source_info, network_id, sympy_map, symbol_manager) if source_sympy_or_const is None: # print(f"DEBUG Call {instr_uid}: Input param '{pin_name}' dependency not ready.") dependencies_resolved = False - break # Exit if one dependency is not ready + break # Salir si una dependencia no está lista - # Convert the expression/constant to SCL for the call - # Simplification of inputs is generally not needed here, convert directly + # Convertir la expresión/constante a SCL para la llamada + # Simplificar ANTES de convertir? Probablemente no necesario para parámetros de entrada + # a menos que queramos optimizar el valor pasado. Por ahora, convertir directo. param_scl_value = sympy_expr_to_scl(source_sympy_or_const, symbol_manager) - # Parameter pin name needs formatting for SCL + # El nombre del pin SÍ necesita formateo pin_name_scl = format_variable_name(pin_name) - - # Special check for DB_ANY or ANY_POINTER - pass name directly without := - # We need the original parameter type info for this, which is not in the simplified JSON. - # WORKAROUND: Check if param_scl_value looks like a DB name ("DB_NAME") - # This is heuristic and might be wrong. Ideally, x1 should pass type info. - # For now, we assume standard 'Param := Value' syntax. - # if param_scl_value.startswith('"') and param_scl_value.endswith('"') and block_type == "FC": # Heuristic for DB_ANY? - # scl_call_params.append(f"{pin_name_scl} := {param_scl_value}") # Still use := for clarity? TIA might infer - # else: scl_call_params.append(f"{pin_name_scl} := {param_scl_value}") - processed_inputs.add(pin_name) if not dependencies_resolved: return False - # --- Construcción de la Llamada SCL (con parámetros) --- + # --- Construcción de la Llamada SCL (similar a antes) --- scl_call_body = "" - param_string = ", ".join(scl_call_params) # Join parameters with commas + param_string = ", ".join(scl_call_params) if block_type == "FB": if not instance_db_scl: print(f"Error: Call FB '{block_name_scl}' (UID {instr_uid}) sin instancia.") instruction["scl"] = f"// ERROR: FB Call {block_name_scl} sin instancia" - instruction["type"] = f"Call_FB_error" # Mark with error - return True # Processed (with error) - # FB Call: InstanceName(Param1 := Value1, Param2 := Value2); + instruction["type"] = f"Call_FB_error" + return True scl_call_body = f"{instance_db_scl}({param_string});" elif block_type == "FC": - # FC Call: BlockName(Param1 := Value1, Param2 := Value2); scl_call_body = f"{block_name_scl}({param_string});" else: print(f"Advertencia: Tipo de bloque no soportado para Call UID {instr_uid}: {block_type}") scl_call_body = f"// ERROR: Call a bloque tipo '{block_type}' no soportado: {block_name_scl}" - # Mark instruction type with error - instruction["type"] = f"Call_{block_type or 'Unknown'}_error" # Add specific type if known - + instruction["type"] = f"Call_{block_type}_error" # Marcar como error # --- Aplicar Condición EN (usando la expresión SymPy EN) --- scl_final = "" if sympy_en_expr != sympy.true: - # Simplify the EN condition before converting to SCL + # Simplificar la condición EN ANTES de convertirla a SCL try: #simplified_en_expr = sympy.simplify_logic(sympy_en_expr, force=True) simplified_en_expr = sympy.logic.boolalg.to_dnf(sympy_en_expr, simplify=True) except Exception as e: - print(f"Error simplifying EN for Call {instr_uid} ({block_name_scl}): {e}") + print(f"Error simplifying EN for Call {instr_uid}: {e}") simplified_en_expr = sympy_en_expr # Fallback en_condition_scl = sympy_expr_to_scl(simplified_en_expr, symbol_manager) - # Avoid IF TRUE/FALSE blocks - if en_condition_scl == "TRUE": - scl_final = scl_call_body - elif en_condition_scl == "FALSE": - scl_final = f"// Call {block_name_scl} (UID {instr_uid}) condition simplified to FALSE." - # Also update type to avoid further processing? - # instruction["type"] = f"Call_{block_type}{SCL_SUFFIX}_Optimized" - else: - # Indent the call body within the IF block - indented_call = "\n".join([f" {line}" for line in scl_call_body.splitlines()]) - scl_final = f"IF {en_condition_scl} THEN\n{indented_call}\nEND_IF;" + indented_call = "\n".join([f" {line}" for line in scl_call_body.splitlines()]) + scl_final = f"IF {en_condition_scl} THEN\n{indented_call}\nEND_IF;" else: - # No IF needed if EN is always TRUE scl_final = scl_call_body # --- Actualizar Instrucción y Mapa SymPy --- instruction["scl"] = scl_final # Guardar el SCL final generado + instruction["type"] = (f"Call_{block_type}{SCL_SUFFIX}" if "_error" not in instruction["type"] else instruction["type"]) - # Update instruction type to mark as processed (unless already marked as error) - if "_error" not in instruction.get("type", ""): - instruction["type"] = f"Call_{block_type}{SCL_SUFFIX}" - - # Propagar el estado ENO (es la expresión SymPy de EN) + # Actualizar sympy_map con el estado ENO (es la expresión SymPy de EN) map_key_eno = (network_id, instr_uid, "eno") sympy_map[map_key_eno] = sympy_en_expr # Guardar la expresión SymPy para ENO - # --- Propagar Valores de Salida (Importante pero complejo) --- - # Esto requiere conocer la interfaz del bloque llamado (que no tenemos aquí directamente) - # O asumir convenciones estándar (ej. FCs tienen Ret_Val, FBs tienen outputs en su instancia) - - # Heurística simple: Si es un FC, intentar propagar Ret_Val si existe en outputs - # Si es un FB, las salidas se acceden a través de la instancia (e.g., "MyInstance".Output1) - # Por ahora, dejaremos la propagación de salidas más avanzada para una mejora futura - # o requerirá pasar información de la interfaz del bloque llamado. - - # Ejemplo básico (necesita mejorar): + # Propagar valores de salida (requiere info de interfaz o heurística) + # Si se sabe que hay una salida 'MyOutput', se podría añadir su SCL al mapa + # Ejemplo MUY simplificado: # for pin_name, dest_list in instruction.get("outputs", {}).items(): # if pin_name != 'eno' and dest_list: # Asumir que hay un destino # map_key_out = (network_id, instr_uid, pin_name) - # pin_name_scl = format_variable_name(pin_name) # if block_type == "FB" and instance_db_scl: - # # Salida de FB: "Instancia".NombrePin - # output_scl_access = f"{instance_db_scl}.{pin_name_scl}" - # # Podríamos guardar el string SCL o crear/obtener un Symbol - # sympy_out_symbol = symbol_manager.get_symbol(output_scl_access) - # sympy_map[map_key_out] = sympy_out_symbol if sympy_out_symbol else output_scl_access # Prefiere Symbol - # elif block_type == "FC": - # # Salida de FC: Requiere asignar a una variable (temporal o de interfaz) - # # Esto se complica porque el destino está en 'dest_list' - # if len(dest_list) == 1 and dest_list[0].get("type") == "variable": - # target_var_name = format_variable_name(dest_list[0].get("name")) - # # Guardar el nombre del destino SCL que contendrá el valor - # sympy_map[map_key_out] = target_var_name - # # Necesitaríamos modificar scl_final para incluir la asignación: - # # target_var_name := FC_Call(...); (requiere reestructurar la generación SCL) - # else: - # # Múltiples destinos o destino no variable es complejo para FC outputs - # sympy_map[map_key_out] = f"/* TODO: Assign FC output {pin_name_scl} */" - + # sympy_map[map_key_out] = f"{instance_db_scl}.{format_variable_name(pin_name)}" # Guardar el *string* de acceso SCL + # # Para FCs es más complejo, necesitaría asignación explícita a temp + # # else: # FC output -> necesita temp var + # # temp_var = generate_temp_var_name(...) + # # sympy_map[map_key_out] = temp_var return True @@ -177,8 +125,7 @@ def process_call(instruction, network_id, sympy_map, symbol_manager: SymbolManag # --- Processor Information Function --- def get_processor_info(): """Devuelve la información para las llamadas a FC y FB.""" - # Asegurarse que los type_name coincidan con los usados en x1 y x2 return [ - {'type_name': 'call_fc', 'processor_func': process_call, 'priority': 6}, # Prioridad alta - {'type_name': 'call_fb', 'processor_func': process_call, 'priority': 6} # Prioridad alta + {'type_name': 'call_fc', 'processor_func': process_call, 'priority': 6}, + {'type_name': 'call_fb', 'processor_func': process_call, 'priority': 6} ] \ No newline at end of file diff --git a/ToUpload/x0_main.py b/ToUpload/x0_main.py index 950a973..d8f081b 100644 --- a/ToUpload/x0_main.py +++ b/ToUpload/x0_main.py @@ -3,255 +3,163 @@ import subprocess import os import sys import locale -import glob # <--- Importar glob para buscar archivos +import glob - -# (Función get_console_encoding y variable CONSOLE_ENCODING como en la respuesta anterior) +# (Función get_console_encoding y variable CONSOLE_ENCODING como antes) def get_console_encoding(): """Obtiene la codificación preferida de la consola, con fallback.""" try: return locale.getpreferredencoding(False) except Exception: - return "cp1252" - + # Fallback común en Windows si falla getpreferredencoding + return "cp1252" # O prueba con 'utf-8' si cp1252 da problemas CONSOLE_ENCODING = get_console_encoding() -# Descomenta la siguiente línea si quieres ver la codificación detectada: # print(f"Detected console encoding: {CONSOLE_ENCODING}") - -# (Función run_script como en la respuesta anterior, usando CONSOLE_ENCODING) +# (Función run_script como antes, usando CONSOLE_ENCODING) def run_script(script_name, xml_arg): """Runs a given script with the specified XML file argument.""" - script_path = os.path.join(os.path.dirname(__file__), script_name) - command = [sys.executable, script_path, xml_arg] + # Asegurarse que la ruta al script sea absoluta o relativa al script actual + script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), script_name) + # Usar la ruta absoluta al ejecutable de Python actual + python_executable = sys.executable + command = [python_executable, script_path, xml_arg] # Usar la ruta absoluta de python print(f"\n--- Running {script_name} with argument: {xml_arg} ---") try: + # Ejecutar el proceso hijo result = subprocess.run( command, - check=True, - capture_output=True, - text=True, - encoding=CONSOLE_ENCODING, - errors="replace", - ) # 'replace' para evitar errores + check=True, # Lanza excepción si el script falla (return code != 0) + capture_output=True,# Captura stdout y stderr + text=True, # Decodifica stdout/stderr como texto + encoding=CONSOLE_ENCODING, # Usa la codificación detectada + errors='replace' # Reemplaza caracteres no decodificables + ) + + # Imprimir stdout y stderr si no están vacíos + stdout_clean = result.stdout.strip() if result.stdout else "" + stderr_clean = result.stderr.strip() if result.stderr else "" - # Imprimir stdout y stderr - # Eliminar saltos de línea extra al final si existen - stdout_clean = result.stdout.strip() - stderr_clean = result.stderr.strip() if stdout_clean: print(stdout_clean) if stderr_clean: - print("--- Stderr ---") - print(stderr_clean) - print("--------------") + # Imprimir stderr claramente para errores del script hijo + print(f"--- Stderr ({script_name}) ---", file=sys.stderr) # Imprimir en stderr + print(stderr_clean, file=sys.stderr) + print("--------------------------", file=sys.stderr) + print(f"--- {script_name} finished successfully ---") - return True + return True # Indicar éxito + except FileNotFoundError: - print(f"Error: Script '{script_path}' not found.") + # Error si el script python o el ejecutable no se encuentran + print(f"Error: Script '{script_path}' or Python executable '{python_executable}' not found.", file=sys.stderr) return False except subprocess.CalledProcessError as e: - print(f"Error running {script_name}:") - print(f"Return code: {e.returncode}") - stdout_decoded = ( - e.stdout.decode(CONSOLE_ENCODING, errors="replace").strip() - if isinstance(e.stdout, bytes) - else (e.stdout or "").strip() - ) - stderr_decoded = ( - e.stderr.decode(CONSOLE_ENCODING, errors="replace").strip() - if isinstance(e.stderr, bytes) - else (e.stderr or "").strip() - ) + # Error si el script hijo devuelve un código de error (ej., sys.exit(1)) + print(f"Error running {script_name}: Script returned non-zero exit code {e.returncode}.", file=sys.stderr) + + # Decodificar e imprimir stdout/stderr del proceso fallido + stdout_decoded = e.stdout.strip() if e.stdout else "" + stderr_decoded = e.stderr.strip() if e.stderr else "" + if stdout_decoded: - print("--- Stdout ---") - print(stdout_decoded) + print(f"--- Stdout ({script_name}) ---", file=sys.stderr) + print(stdout_decoded, file=sys.stderr) if stderr_decoded: - print("--- Stderr ---") - print(stderr_decoded) - print("--------------") - return False + print(f"--- Stderr ({script_name}) ---", file=sys.stderr) + print(stderr_decoded, file=sys.stderr) + print("--------------------------", file=sys.stderr) + return False # Indicar fallo except Exception as e: - print(f"An unexpected error occurred while running {script_name}: {e}") - return False + # Otros errores inesperados + print(f"An unexpected error occurred while running {script_name}: {e}", file=sys.stderr) + # Imprimir traceback para depuración + import traceback + traceback.print_exc(file=sys.stderr) + return False # Indicar fallo -# --- NUEVA FUNCIÓN PARA SELECCIONAR ARCHIVO --- -def select_xml_file(): - """Busca archivos .xml, los lista y pide al usuario que elija uno.""" - print("No XML file specified. Searching for XML files in current directory...") - # Buscar archivos .xml en el directorio actual (.) - xml_files = sorted(glob.glob("*.xml")) # sorted para orden alfabético - - if not xml_files: - print("Error: No .xml files found in the current directory.") - sys.exit(1) - - print("\nAvailable XML files:") - for i, filename in enumerate(xml_files, start=1): - print(f" {i}: {filename}") - - while True: - try: - choice = input( - f"Enter the number of the file to process (1-{len(xml_files)}): " - ) - choice_num = int(choice) - if 1 <= choice_num <= len(xml_files): - selected_file = xml_files[choice_num - 1] - print(f"Selected: {selected_file}") - return selected_file - else: - print("Invalid choice. Please enter a number from the list.") - except ValueError: - print("Invalid input. Please enter a number.") - except EOFError: # Manejar si la entrada se cierra inesperadamente - print("\nSelection cancelled.") - sys.exit(1) - - -# --- FIN NUEVA FUNCIÓN --- - +# --- NO SE NECESITA select_xml_file() si procesamos todos --- if __name__ == "__main__": - # Imports necesarios para esta sección - import os - import sys - import glob # Asegúrate de que glob esté importado al principio del archivo - + # --- PARTE 1: BUSCAR ARCHIVOS --- # Directorio base donde buscar los archivos XML (relativo al script) base_search_dir = "XML Project" - script_dir = os.path.dirname(__file__) # Directorio donde está x0_main.py + # Obtener la ruta absoluta del directorio donde está x0_main.py + script_dir = os.path.dirname(os.path.abspath(__file__)) xml_project_dir = os.path.join(script_dir, base_search_dir) print(f"Buscando archivos XML recursivamente en: '{xml_project_dir}'") # Verificar si el directorio 'XML Project' existe if not os.path.isdir(xml_project_dir): - print( - f"Error: El directorio '{xml_project_dir}' no existe o no es un directorio." - ) - print( - "Por favor, crea el directorio 'XML Project' en la misma carpeta que este script y coloca tus archivos XML dentro." - ) - sys.exit(1) + print(f"Error: El directorio '{xml_project_dir}' no existe o no es un directorio.", file=sys.stderr) + print("Por favor, crea el directorio 'XML Project' en la misma carpeta que este script y coloca tus archivos XML dentro.") + sys.exit(1) # Salir con error - # Buscar todos los archivos .xml recursivamente dentro de xml_project_dir - # Usamos os.path.join para construir la ruta de búsqueda correctamente - # y '**/*.xml' para la recursividad con glob + # Buscar todos los archivos .xml recursivamente search_pattern = os.path.join(xml_project_dir, "**", "*.xml") xml_files_found = glob.glob(search_pattern, recursive=True) if not xml_files_found: - print( - f"No se encontraron archivos XML en '{xml_project_dir}' o sus subdirectorios." - ) - sys.exit(0) # Salir limpiamente si no hay archivos + print(f"No se encontraron archivos XML en '{xml_project_dir}' o sus subdirectorios.") + sys.exit(0) # Salir limpiamente si no hay archivos print(f"Se encontraron {len(xml_files_found)} archivos XML para procesar:") - # Ordenar para un procesamiento predecible (opcional) - xml_files_found.sort() + xml_files_found.sort() # Ordenar para consistencia for xml_file in xml_files_found: - # Imprimir la ruta relativa desde el directorio del script para claridad print(f" - {os.path.relpath(xml_file, script_dir)}") - # Scripts a ejecutar en secuencia (asegúrate que los nombres son correctos) + # --- PARTE 2: PROCESAR CADA ARCHIVO --- + # Scripts a ejecutar en secuencia script1 = "x1_to_json.py" script2 = "x2_process.py" script3 = "x3_generate_scl.py" - # Procesar cada archivo encontrado processed_count = 0 failed_count = 0 - for xml_filepath in xml_files_found: - print( - f"\n--- Iniciando pipeline para: {os.path.relpath(xml_filepath, script_dir)} ---" - ) - # Usar la ruta absoluta para evitar problemas si los scripts cambian de directorio + # Procesar cada archivo encontrado en el bucle + for xml_filepath in xml_files_found: + relative_path = os.path.relpath(xml_filepath, script_dir) + print(f"\n--- Iniciando pipeline para: {relative_path} ---") + + # Usar la ruta absoluta para los scripts hijos absolute_xml_filepath = os.path.abspath(xml_filepath) - # Ejecutar los scripts en secuencia para el archivo actual - # La función run_script ya está definida en tu script x0_main.py + # Ejecutar los scripts en secuencia success = True if not run_script(script1, absolute_xml_filepath): - print( - f"\nPipeline falló en el script '{script1}' para el archivo: {os.path.relpath(xml_filepath, script_dir)}" - ) + print(f"\nPipeline falló en el script '{script1}' para el archivo: {relative_path}", file=sys.stderr) success = False elif not run_script(script2, absolute_xml_filepath): - print( - f"\nPipeline falló en el script '{script2}' para el archivo: {os.path.relpath(xml_filepath, script_dir)}" - ) + print(f"\nPipeline falló en el script '{script2}' para el archivo: {relative_path}", file=sys.stderr) success = False elif not run_script(script3, absolute_xml_filepath): - print( - f"\nPipeline falló en el script '{script3}' para el archivo: {os.path.relpath(xml_filepath, script_dir)}" - ) + print(f"\nPipeline falló en el script '{script3}' para el archivo: {relative_path}", file=sys.stderr) success = False + # Actualizar contadores y mostrar estado if success: - print( - f"--- Pipeline completado exitosamente para: {os.path.relpath(xml_filepath, script_dir)} ---" - ) + print(f"--- Pipeline completado exitosamente para: {relative_path} ---") processed_count += 1 else: failed_count += 1 - print( - f"--- Pipeline falló para: {os.path.relpath(xml_filepath, script_dir)} ---" - ) + print(f"--- Pipeline falló para: {relative_path} ---", file=sys.stderr) # Indicar fallo + # --- PARTE 3: RESUMEN FINAL --- print("\n--- Resumen Final del Procesamiento ---") print(f"Total de archivos XML encontrados: {len(xml_files_found)}") - print( - f"Archivos procesados exitosamente por el pipeline completo: {processed_count}" - ) + print(f"Archivos procesados exitosamente por el pipeline completo: {processed_count}") print(f"Archivos que fallaron en algún punto del pipeline: {failed_count}") print("---------------------------------------") - xml_filename = None - # Comprobar si se pasó un argumento de línea de comandos - # sys.argv[0] es el nombre del script, sys.argv[1] sería el primer argumento - if len(sys.argv) > 1: - # Si hay argumentos, usar argparse para parsearlo (permite -h, etc.) - parser = argparse.ArgumentParser( - description="Run the Simatic XML processing pipeline." - ) - parser.add_argument( - "xml_file", - # Ya no necesitamos nargs='?' ni default aquí porque sabemos que hay un argumento - help="Path to the XML file to process.", - ) - # Parsear solo los argumentos conocidos, ignorar extras si los hubiera - args, unknown = parser.parse_known_args() - xml_filename = args.xml_file - print(f"XML file specified via argument: {xml_filename}") - else: - # Si no hay argumentos, llamar a la función interactiva - xml_filename = select_xml_file() - - # --- El resto del script continúa igual, usando xml_filename --- - - # Verificar si el archivo XML de entrada (seleccionado o pasado) existe - if not os.path.exists(xml_filename): - print(f"Error: Selected or specified XML file not found: {xml_filename}") + # Salir con código 0 si todo fue bien, 1 si hubo fallos + if failed_count > 0: sys.exit(1) - - print(f"\nStarting pipeline for: {xml_filename}") - - # Run scripts sequentially (asegúrate que los nombres son correctos) - script1 = "x1_to_json.py" - script2 = "x2_process.py" - script3 = "x3_generate_scl.py" - - if run_script(script1, xml_filename): - if run_script(script2, xml_filename): - if run_script(script3, xml_filename): - print("\nPipeline completed successfully.") - else: - print("\nPipeline failed at script:", script3) - else: - print("\nPipeline failed at script:", script2) else: - print("\nPipeline failed at script:", script1) + sys.exit(0) + +# --- FIN: Se elimina la lógica redundante que venía después del bucle --- \ No newline at end of file diff --git a/ToUpload/x1_to_json.py b/ToUpload/x1_to_json.py index 71ac19b..0a52174 100644 --- a/ToUpload/x1_to_json.py +++ b/ToUpload/x1_to_json.py @@ -1,1554 +1,489 @@ +# ToUpload/x1_to_json.py # -*- coding: utf-8 -*- import json import argparse import os -import re -from lxml import etree +import sys import traceback -from collections import defaultdict +import importlib +from lxml import etree +from collections import defaultdict # Puede ser necesario si load_parsers la usa +import copy # Puede ser necesario si load_parsers la usa -# --- Namespaces --- -# Se añade el namespace 'st' para Structured Text -ns = { - "iface": "http://www.siemens.com/automation/Openness/SW/Interface/v5", - "flg": "http://www.siemens.com/automation/Openness/SW/NetworkSource/FlgNet/v4", - "st": "http://www.siemens.com/automation/Openness/SW/NetworkSource/StructuredText/v3", - "stl": "http://www.siemens.com/automation/Openness/SW/NetworkSource/StatementList/v4", -} +# Importar funciones comunes y namespaces desde el nuevo módulo de utils +try: + from parsers.parser_utils import ns, get_multilingual_text, parse_interface_members +except ImportError as e: + print( + f"Error crítico: No se pudieron importar funciones desde parsers.parser_utils: {e}" + ) + print( + "Asegúrate de que el directorio 'parsers' y 'parsers/parser_utils.py' existen y son correctos." + ) + sys.exit(1) -# --- Helper Functions --- -def get_multilingual_text(element, default_lang="en-US", fallback_lang="it-IT"): - # (Sin cambios respecto a la versión anterior) - if element is None: - return "" - try: - xpath_expr = ( - f".//*[local-name()='MultilingualTextItem'][*[local-name()='AttributeList']/*[local-name()='Culture' and text()='{default_lang}']]" - f"/*[local-name()='AttributeList']/*[local-name()='Text']" - ) - text_items = element.xpath(xpath_expr) - if text_items and text_items[0].text is not None: - return text_items[0].text.strip() - xpath_expr = ( - f".//*[local-name()='MultilingualTextItem'][*[local-name()='AttributeList']/*[local-name()='Culture' and text()='{fallback_lang}']]" - f"/*[local-name()='AttributeList']/*[local-name()='Text']" - ) - text_items = element.xpath(xpath_expr) - if text_items and text_items[0].text is not None: - return text_items[0].text.strip() - xpath_expr = f".//*[local-name()='MultilingualTextItem']/*[local-name()='AttributeList']/*[local-name()='Text']" - text_items = element.xpath(xpath_expr) - if text_items and text_items[0].text is not None: - return text_items[0].text.strip() - return "" - except Exception as e: - print(f"Advertencia: Error extrayendo MultilingualText: {e}") - return "" +# --- Cargador Dinámico de Parsers --- +def load_parsers(parsers_dir="parsers"): + """ + Escanea el directorio de parsers, importa módulos y construye + un mapa de lenguaje a función de parseo. + """ + parser_map = {} + # Verificar si el directorio existe + script_dir = os.path.dirname(__file__) + parsers_dir_path = os.path.join(script_dir, parsers_dir) + if not os.path.isdir(parsers_dir_path): + print(f"Error: Directorio de parsers no encontrado: '{parsers_dir_path}'") + return parser_map # Devuelve mapa vacío + print(f"Cargando parsers desde: '{parsers_dir_path}'") + parsers_package = os.path.basename(parsers_dir) -def get_symbol_name(symbol_element): - # (Sin cambios respecto a la versión anterior) - if symbol_element is None: - return None - try: - components = symbol_element.xpath("./*[local-name()='Component']/@Name") - return ".".join(f'"{c}"' for c in components) if components else None - except Exception as e: - print(f"Advertencia: Excepción en get_symbol_name: {e}") - return None - - -def parse_access(access_element): - # (Sin cambios respecto a la versión anterior) - if access_element is None: - return None - uid = access_element.get("UId") - scope = access_element.get("Scope") - info = {"uid": uid, "scope": scope, "type": "unknown"} - symbol = access_element.xpath("./*[local-name()='Symbol']") - constant = access_element.xpath("./*[local-name()='Constant']") - if symbol: - info["type"] = "variable" - info["name"] = get_symbol_name(symbol[0]) - if info["name"] is None: - info["type"] = "error_parsing_symbol" - print(f"Error: No se pudo parsear nombre símbolo Access UID={uid}") - return info - elif constant: - info["type"] = "constant" - const_type_elem = constant[0].xpath("./*[local-name()='ConstantType']") - const_val_elem = constant[0].xpath("./*[local-name()='ConstantValue']") - info["datatype"] = ( - const_type_elem[0].text - if const_type_elem and const_type_elem[0].text is not None - else "Unknown" - ) - value_str = ( - const_val_elem[0].text - if const_val_elem and const_val_elem[0].text is not None - else None - ) - if value_str is None: - info["type"] = "error_parsing_constant" - info["value"] = None - print(f"Error: Constante sin valor Access UID={uid}") - return info - if info["datatype"] == "Unknown": - val_lower = value_str.lower() - if val_lower in ["true", "false"]: - info["datatype"] = "Bool" - elif value_str.isdigit() or ( - value_str.startswith("-") and value_str[1:].isdigit() - ): - info["datatype"] = "Int" - elif "." in value_str: - try: - float(value_str) - info["datatype"] = "Real" - except ValueError: - pass - elif "#" in value_str: - info["datatype"] = "TypedConstant" - info["value"] = value_str - dtype_lower = info["datatype"].lower() - val_str_processed = value_str.split("#")[-1] if "#" in value_str else value_str - try: - if dtype_lower in [ - "int", - "dint", - "udint", - "sint", - "usint", - "lint", - "ulint", - "word", - "dword", - "lword", - "byte", - ]: - info["value"] = int(val_str_processed) - elif dtype_lower == "bool": - info["value"] = ( - val_str_processed.lower() == "true" or val_str_processed == "1" - ) - elif dtype_lower in ["real", "lreal"]: - info["value"] = float(val_str_processed) - elif dtype_lower == "typedconstant": - info["value"] = value_str - except (ValueError, TypeError) as e: - print( - f"Advertencia: No se pudo convertir valor '{val_str_processed}' a {dtype_lower} UID={uid}. Error: {e}" + for filename in os.listdir(parsers_dir_path): + # Buscar archivos que empiecen con 'parse_' y terminen en '.py' + # Excluir '__init__.py' y 'parser_utils.py' + if ( + filename.startswith("parse_") + and filename.endswith(".py") + and filename not in ["__init__.py", "parser_utils.py"] + ): + module_name_rel = filename[:-3] # Nombre sin .py (e.g., parse_lad_fbd) + full_module_name = ( + f"{parsers_package}.{module_name_rel}" # e.g., parsers.parse_lad_fbd ) - info["value"] = value_str - else: - info["type"] = "unknown_structure" - print(f"Advertencia: Access UID={uid} no es Symbol ni Constant.") - return info - if info["type"] == "variable" and info.get("name") is None: - print(f"Error Interno: parse_access var sin nombre UID {uid}.") - info["type"] = "error_no_name" - return info - return info + try: + # Importar el módulo dinámicamente + module = importlib.import_module(full_module_name) + # Verificar si el módulo tiene la función get_parser_info + if hasattr(module, "get_parser_info") and callable( + module.get_parser_info + ): + parser_info = module.get_parser_info() + # Esperamos un diccionario con 'language' (lista) y 'parser_func' + if ( + isinstance(parser_info, dict) + and "language" in parser_info + and "parser_func" in parser_info + ): + languages = parser_info["language"] + parser_func = parser_info["parser_func"] -def parse_part(part_element): - # (Sin cambios respecto a la versión anterior) - if part_element is None: - return None - uid = part_element.get("UId") - name = part_element.get("Name") - if not uid or not name: - print( - f"Error: Part sin UID o Name: {etree.tostring(part_element, encoding='unicode')}" - ) - return None - template_values = {} - try: - for tv in part_element.xpath("./*[local-name()='TemplateValue']"): - tv_name = tv.get("Name") - tv_type = tv.get("Type") - if tv_name and tv_type: - template_values[tv_name] = tv_type - except Exception as e: - print(f"Advertencia: Error extrayendo TemplateValues Part UID={uid}: {e}") - negated_pins = {} - try: - for negated_elem in part_element.xpath("./*[local-name()='Negated']"): - negated_pin_name = negated_elem.get("Name") - if negated_pin_name: - negated_pins[negated_pin_name] = True - except Exception as e: - print(f"Advertencia: Error extrayendo Negated Pins Part UID={uid}: {e}") - return { - "uid": uid, - "type": name, - "template_values": template_values, - "negated_pins": negated_pins, - } - - -def parse_call(call_element): - # (Mantiene la corrección para DB de instancia) - if call_element is None: - return None - uid = call_element.get("UId") - if not uid: - print( - f"Error: Call encontrado sin UID: {etree.tostring(call_element, encoding='unicode')}" - ) - return None - call_info_elem = call_element.xpath("./*[local-name()='CallInfo']") - if not call_info_elem: - print(f"Error: Call UID {uid} sin elemento CallInfo.") - return None - call_info = call_info_elem[0] - block_name = call_info.get("Name") - block_type = call_info.get("BlockType") - instance_name = None - instance_scope = None - if not block_name or not block_type: - print(f"Error: CallInfo para UID {uid} sin Name o BlockType.") - return None - if block_type == "FB": - instance_elem_list = call_info.xpath("./*[local-name()='Instance']") - if instance_elem_list: - instance_elem = instance_elem_list[0] - instance_scope = instance_elem.get("Scope") - component_elem_list = instance_elem.xpath( - "./*[local-name()='Component']" - ) # Busca Component directo - if component_elem_list: - component_elem = component_elem_list[0] - db_name_raw = component_elem.get("Name") - if db_name_raw: - instance_name = f'"{db_name_raw}"' # Añade comillas + if isinstance(languages, list) and callable(parser_func): + # Añadir la función al mapa para cada lenguaje que soporta + for lang in languages: + lang_upper = lang.upper() # Usar mayúsculas como clave + if lang_upper in parser_map: + print( + f" Advertencia: Parser para '{lang_upper}' en {full_module_name} sobrescribe definición anterior." + ) + parser_map[lang_upper] = parser_func + print( + f" - Cargado parser para '{lang_upper}' desde {module_name_rel}.py" + ) + else: + print( + f" Advertencia: Formato inválido en get_parser_info de {full_module_name} (language debe ser lista, parser_func callable)." + ) + else: + print( + f" Advertencia: get_parser_info en {full_module_name} no devolvió el diccionario esperado." + ) else: print( - f"Advertencia: dentro de para FB Call UID {uid} no tiene atributo 'Name'." - ) - else: - print( - f"Advertencia: No se encontró dentro de para FB Call UID {uid}. No se pudo obtener el nombre del DB." - ) - else: - print( - f"Advertencia: FB Call '{block_name}' UID {uid} no tiene elemento ." - ) - call_data = { - "uid": uid, - "type": "Call", - "block_name": block_name, - "block_type": block_type, - } - if instance_name: - call_data["instance_db"] = instance_name - if instance_scope: - call_data["instance_scope"] = instance_scope - return call_data - - -# SCL (Structured Text) Parser - - -def reconstruct_scl_from_tokens(st_node): - """ - Reconstruye SCL desde , mejorando el manejo de - variables, constantes literales, tokens básicos, espacios y saltos de línea. - """ - if st_node is None: - return "// Error: StructuredText node not found.\n" - - scl_parts = [] - children = st_node.xpath("./st:*", namespaces=ns) - - for elem in children: - tag = etree.QName(elem.tag).localname - - if tag == "Token": - scl_parts.append(elem.get("Text", "")) - elif tag == "Blank": - # Añadir espacios simples, evitar múltiples si ya hay uno antes/después - if not scl_parts or not scl_parts[-1].endswith(" "): - scl_parts.append(" " * int(elem.get("Num", 1))) - elif int(elem.get("Num", 1)) > 1: # Añadir extras si son más de 1 - scl_parts.append(" " * (int(elem.get("Num", 1)) - 1)) - elif tag == "NewLine": - # Limpiar espacios antes del salto de línea real - if scl_parts: - scl_parts[-1] = scl_parts[-1].rstrip() - scl_parts.append("\n") - elif tag == "Access": - scope = elem.get("Scope") - access_str = f"/*_ERR_Scope_{scope}_*/" # Fallback más informativo - - if scope in [ - "GlobalVariable", - "LocalVariable", - "TempVariable", - "InOutVariable", - "InputVariable", - "OutputVariable", - "ConstantVariable", - ]: # Tipos comunes de variables - symbol_elem = elem.xpath("./st:Symbol", namespaces=ns) - if symbol_elem: - components = symbol_elem[0].xpath("./st:Component", namespaces=ns) - symbol_text_parts = [] - for i, comp in enumerate(components): - name = comp.get("Name", "_ERR_COMP_") - # Añadir punto si no es el primer componente - if i > 0: - symbol_text_parts.append(".") - - # Reconstrucción de comillas (heurística) - has_quotes_elem = comp.xpath( - "../st:BooleanAttribute[@Name='HasQuotes']/text()", - namespaces=ns, - ) - has_quotes = ( - has_quotes_elem and has_quotes_elem[0].lower() == "true" - ) - is_temp = name.startswith("#") - - if has_quotes or ( - i == 0 and not is_temp - ): # Comillas si HasQuotes o primer componente (no temp) - symbol_text_parts.append(f'"{name}"') - else: - symbol_text_parts.append(name) - - # Manejar índices de array (RECURSIVO) - index_access = comp.xpath("./st:Access", namespaces=ns) - if index_access: - # Llama recursivamente para obtener el texto de cada índice - indices_text = [ - reconstruct_scl_from_tokens(idx_node) - for idx_node in index_access - ] - symbol_text_parts.append(f"[{','.join(indices_text)}]") - - access_str = "".join(symbol_text_parts) - - elif scope == "LiteralConstant": - constant_elem = elem.xpath("./st:Constant", namespaces=ns) - if constant_elem: - val_elem = constant_elem[0].xpath( - "./st:ConstantValue/text()", namespaces=ns - ) - type_elem = constant_elem[0].xpath( - "./st:ConstantType/text()", namespaces=ns - ) - const_type = type_elem[0] if type_elem else "" - const_val = val_elem[0] if val_elem else "_ERR_CONSTVAL_" - - # **CORRECCIÓN CLAVE**: Usar el valor extraído - access_str = const_val - - # Opcional: añadir prefijos T#, L#, etc. si es necesario - # if const_type == "Time": access_str = f"T#{const_val}" - # elif const_type == "LTime": access_str = f"LT#{const_val}" - # ... otros tipos ... - else: - access_str = "/*_ERR_NOCONST_*/" - # --- Añadir más manejo de scopes aquí si es necesario --- - # elif scope == "Call": access_str = reconstruct_call(elem) - # elif scope == "Expression": access_str = reconstruct_expression(elem) - - scl_parts.append(access_str) - - elif tag == "Comment" or tag == "LineComment": - comment_text = "".join(elem.xpath(".//text()")).strip() - if tag == "Comment": - scl_parts.append(f"(* {comment_text} *)") - else: - scl_parts.append(f"// {comment_text}") - # else: Ignorar otros nodos - - # Unir partes, limpiar espacios extra alrededor de operadores y saltos de línea - full_scl = "".join(scl_parts) - - # Re-indentar líneas después de IF/THEN, etc. (Simplificado) - output_lines = [] - indent_level = 0 - for line in full_scl.split("\n"): - line = line.strip() - if not line: - continue # Saltar líneas vacías - - # Reducir indentación antes de procesar END_IF, ELSE, etc. (simplificado) - if line.startswith( - ("END_IF", "END_WHILE", "END_FOR", "END_CASE", "ELSE", "ELSIF") - ): - indent_level = max(0, indent_level - 1) - - output_lines.append(" " * indent_level + line) # Aplicar indentación - - # Aumentar indentación después de IF, WHILE, FOR, CASE, ELSE, ELSIF (simplificado) - if ( - line.endswith("THEN") - or line.endswith("DO") - or line.endswith("OF") - or line == "ELSE" - ): - indent_level += 1 - # Nota: Esto no maneja bloques BEGIN/END dentro de SCL - - return "\n".join(output_lines) - - -# STL (Statement List) Parser - - -def get_access_text(access_element): - """Reconstruye una representación textual simple de un Access en STL.""" - if access_element is None: - return "_ERR_ACCESS_" - scope = access_element.get("Scope") - - # Intenta reconstruir el símbolo - # CORREGIDO: Añadido namespaces=ns - symbol_elem = access_element.xpath("./stl:Symbol", namespaces=ns) - if symbol_elem: - # CORREGIDO: Añadido namespaces=ns - components = symbol_elem[0].xpath("./stl:Component", namespaces=ns) - parts = [] - for comp in components: - name = comp.get("Name", "_ERR_COMP_") - # CORREGIDO: Añadido namespaces=ns - has_quotes_elem = comp.xpath( - "../stl:BooleanAttribute[@Name='HasQuotes']/text()", namespaces=ns - ) - has_quotes = has_quotes_elem and has_quotes_elem[0].lower() == "true" - - # Usar nombre tal cual por ahora - parts.append(name) - - # Añadir índices si existen - # CORREGIDO: Añadido namespaces=ns - index_access = comp.xpath("./stl:Access", namespaces=ns) - if index_access: - indices = [get_access_text(ia) for ia in index_access] - parts.append(f"[{','.join(indices)}]") - - return ".".join(parts) - - # Intenta reconstruir constante - # CORREGIDO: Añadido namespaces=ns - constant_elem = access_element.xpath("./stl:Constant", namespaces=ns) - if constant_elem: - # CORREGIDO: Añadido namespaces=ns - val_elem = constant_elem[0].xpath("./stl:ConstantValue/text()", namespaces=ns) - type_elem = constant_elem[0].xpath( - "./stl:ConstantType/text()", namespaces=ns - ) # Obtener tipo para mejor formato - const_type = type_elem[0] if type_elem else "" - const_val = val_elem[0] if val_elem else "_ERR_CONST_" - # Añadir prefijo de tipo si es necesario (ej. T# , L#) - Simplificado - if const_type == "Time": - return f"T#{const_val}" - if const_type == "ARef": - return f"{const_val}" # No necesita prefijo - # Añadir más tipos si es necesario - return const_val # Valor directo para otros tipos - - # Intenta reconstruir etiqueta - # CORREGIDO: Añadido namespaces=ns - label_elem = access_element.xpath("./stl:Label", namespaces=ns) - if label_elem: - name = label_elem[0].get("Name", "_ERR_LABEL_") - return name - - # Intenta reconstruir acceso indirecto (simplificado) - # CORREGIDO: Añadido namespaces=ns - indirect_elem = access_element.xpath("./stl:Indirect", namespaces=ns) - if indirect_elem: - reg = indirect_elem[0].get("Register", "AR?") - offset_str = indirect_elem[0].get("BitOffset", "0") - area = indirect_elem[0].get("Area", "DB") - width = indirect_elem[0].get("Width", "X") - - # Convertir BitOffset a formato P#Byte.Bit - try: - bit_offset = int(offset_str) - byte_offset = bit_offset // 8 - bit_in_byte = bit_offset % 8 - p_format_offset = f"P#{byte_offset}.{bit_in_byte}" - except ValueError: - p_format_offset = "P#?.?" - - # Formatear ancho - width_map = {"Bit": "X", "Byte": "B", "Word": "W", "Double": "D"} - width_char = width_map.get( - width, width[0] if width else "?" - ) # Usa primera letra si no mapeado - - return f"{area}{width_char}[{reg},{p_format_offset}]" - - # Intenta reconstruir dirección absoluta - # CORREGIDO: Añadido namespaces=ns - address_elem = access_element.xpath("./stl:Address", namespaces=ns) - if address_elem: - area = address_elem[0].get("Area", "??") - bit_offset_str = address_elem[0].get("BitOffset", "0") - addr_type_str = address_elem[0].get("Type", "Bool") # Obtener tipo para ancho - try: - bit_offset = int(bit_offset_str) - byte_offset = bit_offset // 8 - bit_in_byte = bit_offset % 8 - # Determinar ancho basado en tipo (simplificación) - addr_width = "X" # Default a Bit - if addr_type_str == "Byte": - addr_width = "B" - elif addr_type_str == "Word": - addr_width = "W" - elif addr_type_str in ["DWord", "DInt"]: - addr_width = "D" - # Añadir más tipos si es necesario (Real, etc.) - - # Mapear Area para STL estándar - area_map = { - "Input": "I", - "Output": "Q", - "Memory": "M", - "PeripheryInput": "PI", - "PeripheryOutput": "PQ", - "DB": "DB", - "DI": "DI", - "Local": "L", # L no siempre válido aquí - "Timer": "T", - "Counter": "C", - } - stl_area = area_map.get(area, area) - - # Manejar DB/DI que necesitan número de bloque - if stl_area in ["DB", "DI"]: - block_num = address_elem[0].get("BlockNumber") - if block_num: - return f"{stl_area}{block_num}.{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" # Ej: DB1.DBX0.1 - else: # Acceso con registro DB/DI - return f"{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" # Ej: DBX0.1 - elif stl_area in ["T", "C"]: - return f"{stl_area}{byte_offset}" # Los timers/contadores solo usan el número - else: # I, Q, M, L, PI, PQ - return f"{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" # Ej: M10.1, I0.0 - - except ValueError: - return f"{area}?{bit_offset_str}?" - - return f"_{scope}_?" # Fallback - - -def get_comment_text(comment_element): - """Extrae texto de un LineComment o Comment.""" - if comment_element is None: - return "" - # Usar get_multilingual_text si los comentarios son multilingües - # Si no, extraer texto directamente - ml_texts = comment_element.xpath( - ".//mlt:MultilingualTextItem/mlt:AttributeList/mlt:Text/text()", - namespaces={ - "mlt": "http://www.siemens.com/automation/Openness/SW/Interface/v5" - }, - ) # Asumiendo ns - if ml_texts: - # Podrías intentar obtener un idioma específico o simplemente el primero - return ml_texts[0].strip() if ml_texts else "" - - # Fallback a texto directo si no hay estructura multilingüe - text_nodes = comment_element.xpath("./text()") - return "".join(text_nodes).strip() - - -def reconstruct_stl_from_statementlist(statement_list_node): - """Reconstruye el código STL como una cadena de texto desde .""" - if statement_list_node is None: - return "// Error: StatementList node not found.\n" - - stl_lines = [] - # CORREGIDO: Añadido namespaces=ns - statements = statement_list_node.xpath("./stl:StlStatement", namespaces=ns) - - for stmt in statements: - line_parts = [] - line_comment = "" # Comentario al final de la línea - - # 1. Comentarios al inicio de la línea (como líneas separadas //) - # CORREGIDO: Añadido namespaces=ns - initial_comments = stmt.xpath( - "child::stl:Comment | child::stl:LineComment", namespaces=ns - ) - for comm in initial_comments: - comment_text = get_comment_text(comm) - if comment_text: - # Dividir comentarios multilínea en varias líneas // - for comment_line in comment_text.splitlines(): - stl_lines.append(f"// {comment_line}") - - # 2. Etiqueta (si existe) - # CORREGIDO: Añadido namespaces=ns - label_decl = stmt.xpath("./stl:LabelDeclaration", namespaces=ns) - label_str = "" - if label_decl: - # CORREGIDO: Añadido namespaces=ns - label_name_nodes = label_decl[0].xpath("./stl:Label/@Name", namespaces=ns) - if label_name_nodes: - label_str = f"{label_name_nodes[0]}:" - # Buscar comentarios DENTRO de LabelDeclaration pero después de Label - # CORREGIDO: Añadido namespaces=ns - label_comments = label_decl[0].xpath( - "./stl:Comment | ./stl:LineComment", namespaces=ns - ) - for lcomm in label_comments: - comment_text = get_comment_text(lcomm) - if comment_text: - line_comment += ( - f" // {comment_text}" # Añadir al comentario de línea - ) - - # 3. Token de Instrucción STL - # CORREGIDO: Añadido namespaces=ns - instruction_token = stmt.xpath("./stl:StlToken", namespaces=ns) - instruction_str = "" - if instruction_token: - token_text = instruction_token[0].get("Text", "_ERR_TOKEN_") - instruction_str = token_text - # Comentarios asociados directamente al token - # CORREGIDO: Añadido namespaces=ns - token_comments = instruction_token[0].xpath( - "./stl:Comment | ./stl:LineComment", namespaces=ns - ) - for tcomm in token_comments: - comment_text = get_comment_text(tcomm) - if comment_text: - line_comment += ( - f" // {comment_text}" # Añadir al comentario de línea + f" Advertencia: Módulo {module_name_rel}.py no tiene la función 'get_parser_info'." ) - # 4. Acceso/Operando STL - # CORREGIDO: Añadido namespaces=ns - access_elem = stmt.xpath("./stl:Access", namespaces=ns) - access_str = "" - if access_elem: - access_text = get_access_text(access_elem[0]) - access_str = access_text - # Comentarios DENTRO del Access (pueden ser de línea o bloque) - # CORREGIDO: Añadido namespaces=ns - access_comments = access_elem[0].xpath( - "child::stl:LineComment | child::stl:Comment", namespaces=ns - ) - for acc_comm in access_comments: - comment_text = get_comment_text(acc_comm) - if comment_text: - line_comment += ( - f" // {comment_text}" # Añadir al comentario de línea - ) + except ImportError as e: + print(f"Error importando {full_module_name}: {e}") + except Exception as e: + print(f"Error procesando {full_module_name}: {e}") + traceback.print_exc() - # Construir la línea: Etiqueta (si hay) + Tab + Instrucción + Espacio + Operando (si hay) + Comentario(s) - current_line = "" - if label_str: - current_line += label_str - if instruction_str: - if current_line: # Si ya había etiqueta, añadir tabulador - current_line += "\t" - current_line += instruction_str - if access_str: - if current_line: # Si ya había algo, añadir espacio - current_line += " " - current_line += access_str - if line_comment: - # Añadir espacio antes del comentario si hay código en la línea - if current_line.strip(): - current_line += f" {line_comment}" - else: # Si la línea estaba vacía (solo comentarios iniciales), poner el comentario de línea - current_line = line_comment - - # Añadir la línea construida solo si no está vacía - if current_line.strip(): - stl_lines.append(current_line.rstrip()) # Eliminar espacios finales - - return "\n".join(stl_lines) + print(f"\nTotal de lenguajes con parser cargado: {len(parser_map)}") + print(f"Lenguajes soportados: {list(parser_map.keys())}") + return parser_map -# DB Parser - - -def parse_interface_members(member_elements): - """ - Parsea recursivamente una lista de elementos de una interfaz o estructura. - Maneja miembros simples, structs anidados y arrays con valores iniciales. - """ - members_data = [] - if not member_elements: - return members_data - - for member in member_elements: - member_name = member.get("Name") - member_dtype = member.get("Datatype") - member_remanence = member.get("Remanence", "NonRetain") # Default si no existe - member_accessibility = member.get("Accessibility", "Public") # Default - - if not member_name or not member_dtype: - print( - f"Advertencia: Miembro sin nombre o tipo de dato encontrado. Saltando." - ) - continue - - member_info = { - "name": member_name, - "datatype": member_dtype, - "remanence": member_remanence, - "accessibility": member_accessibility, - "start_value": None, # Para valores simples o structs/arrays inicializados globalmente - "comment": None, - "children": [], # Para structs - "array_elements": {}, # Para arrays (índice -> valor) - } - - # Extraer comentario del miembro - # Usar namespace iface - comment_node = member.xpath("./iface:Comment", namespaces=ns) - if comment_node: - # Llama a get_multilingual_text que ya maneja el namespace iface internamente - member_info["comment"] = get_multilingual_text(comment_node[0]) - - # Extraer valor inicial (para tipos simples) - # Usar namespace iface - start_value_node = member.xpath("./iface:StartValue", namespaces=ns) - if start_value_node: - constant_name = start_value_node[0].get("ConstantName") - if constant_name: - member_info["start_value"] = constant_name - else: - member_info["start_value"] = ( - start_value_node[0].text - if start_value_node[0].text is not None - else "" - ) - - # --- Manejar Structs Anidados --- - # Usar namespace iface - nested_sections = member.xpath( - "./iface:Sections/iface:Section/iface:Member", namespaces=ns - ) - if nested_sections: - member_info["children"] = parse_interface_members( - nested_sections - ) # Llamada recursiva - - # --- Manejar Arrays --- - if member_dtype.lower().startswith("array["): - # Usar namespace iface - subelements = member.xpath("./iface:Subelement", namespaces=ns) - for sub in subelements: - path = sub.get("Path") - # Usar namespace iface - sub_start_value_node = sub.xpath("./iface:StartValue", namespaces=ns) - if path and sub_start_value_node: - constant_name = sub_start_value_node[0].get("ConstantName") - value = ( - constant_name - if constant_name - else ( - sub_start_value_node[0].text - if sub_start_value_node[0].text is not None - else "" - ) - ) - member_info["array_elements"][path] = value - else: - # Usar namespace iface - sub_comment_node = sub.xpath("./iface:Comment", namespaces=ns) - if path and sub_comment_node: - # member_info["array_comments"][path] = get_multilingual_text(sub_comment_node[0]) - pass - - members_data.append(member_info) - - return members_data - - -# --- Main Parsing Function --- - - -def parse_network(network_element): - """ - Parsea una red, extrae lógica y añade conexiones EN implícitas. - Maneja wires con múltiples destinos. (Función original adaptada para namespaces) - """ - if network_element is None: - return { - "id": "ERROR", - "title": "Invalid Network Element", - "comment": "", - "logic": [], - "error": "Input element was None", - } - - network_id = network_element.get("ID") - - # Extracción Título/Comentario (usar namespace iface para MultilingualText) - title_element = network_element.xpath( - ".//iface:MultilingualText[@CompositionName='Title']", namespaces=ns - ) - network_title = ( - get_multilingual_text(title_element[0]) - if title_element - else f"Network {network_id}" - ) - # Asume que el comentario está en ObjectList dentro de CompileUnit - comment_element = network_element.xpath( - "./*[local-name()='ObjectList']/*[local-name()='MultilingualText'][@CompositionName='Comment']" - ) - network_comment = ( - get_multilingual_text(comment_element[0]) if comment_element else "" - ) - - # Buscar FlgNet usando namespace flg - flgnet_list = network_element.xpath(".//flg:FlgNet", namespaces=ns) - if not flgnet_list: - return { - "id": network_id, - "title": network_title, - "comment": network_comment, - "logic": [], - "language": "Unknown", - "error": "FlgNet not found", - } - flgnet = flgnet_list[0] - - # 1. Parsear Access, Parts y Calls (llaman a funciones que ya usan ns) - access_map = { - acc_info["uid"]: acc_info - for acc in flgnet.xpath(".//flg:Access", namespaces=ns) # Usa ns - if (acc_info := parse_access(acc)) and acc_info["type"] != "unknown" - } - parts_and_calls_map = {} - # Usa ns - instruction_elements = flgnet.xpath(".//flg:Part | .//flg:Call", namespaces=ns) - for element in instruction_elements: - parsed_info = None - tag_name = etree.QName(element.tag).localname - if tag_name == "Part": - parsed_info = parse_part(element) - elif tag_name == "Call": - parsed_info = parse_call(element) - if parsed_info and "uid" in parsed_info: - parts_and_calls_map[parsed_info["uid"]] = parsed_info - else: - print( - f"Advertencia: Se ignoró un Part/Call inválido en la red {network_id}" - ) - - # 2. Parsear Wires (con namespaces) - wire_connections = defaultdict(list) - source_connections = defaultdict(list) - eno_outputs = defaultdict(list) - # Cachear QNames con namespace flg - flg_ns_uri = ns["flg"] - qname_powerrail = etree.QName(flg_ns_uri, "Powerrail") - qname_identcon = etree.QName(flg_ns_uri, "IdentCon") - qname_namecon = etree.QName(flg_ns_uri, "NameCon") - # Usa ns - for wire in flgnet.xpath(".//flg:Wire", namespaces=ns): - children = wire.getchildren() - if len(children) < 2: - continue - source_elem = children[0] - source_uid, source_pin = None, None - if source_elem.tag == qname_powerrail: - source_uid, source_pin = "POWERRAIL", "out" - elif source_elem.tag == qname_identcon: - source_uid, source_pin = source_elem.get("UId"), "value" - elif source_elem.tag == qname_namecon: - source_uid, source_pin = source_elem.get("UId"), source_elem.get("Name") - if source_uid is None: - continue - source_info = (source_uid, source_pin) - for dest_elem in children[1:]: - dest_uid, dest_pin = None, None - if dest_elem.tag == qname_identcon: - dest_uid, dest_pin = dest_elem.get("UId"), "value" - elif dest_elem.tag == qname_namecon: - dest_uid, dest_pin = dest_elem.get("UId"), dest_elem.get("Name") - if dest_uid is not None and dest_pin is not None: - dest_key = (dest_uid, dest_pin) - if source_info not in wire_connections[dest_key]: - wire_connections[dest_key].append(source_info) - source_key = (source_uid, source_pin) - dest_info = (dest_uid, dest_pin) - if dest_info not in source_connections[source_key]: - source_connections[source_key].append(dest_info) - if source_pin == "eno" and source_uid in parts_and_calls_map: - if dest_info not in eno_outputs[source_uid]: - eno_outputs[source_uid].append(dest_info) - - # 3. Construcción Lógica Inicial (sin cambios en lógica, pero verificar llamadas) - all_logic_steps = {} - functional_block_types = [ - "Move", - "Add", - "Sub", - "Mul", - "Div", - "Mod", - "Convert", - "Call", - "Se", - "Sd", - "BLKMOV", - "TON", - "TOF", - "TP", - "CTU", - "CTD", - "CTUD", - ] # Añadidos timers/counters SCL - rlo_generators = [ - "Contact", - "O", - "Eq", - "Ne", - "Gt", - "Lt", - "Ge", - "Le", - "And", - "Xor", - "PBox", - "NBox", - "Not", - ] # Añadido Not - for instruction_uid, instruction_info in parts_and_calls_map.items(): - instruction_repr = {"instruction_uid": instruction_uid, **instruction_info} - instruction_repr["inputs"] = {} - instruction_repr["outputs"] = {} - original_type = instruction_info["type"] - current_type = original_type - input_pin_mapping = {} - output_pin_mapping = {} - # --- Manejo Especial Tipos --- - if original_type == "SdCoil": - current_type = "Se" - input_pin_mapping = {"in": "s", "operand": "timer", "value": "tv"} - output_pin_mapping = {"out": "q"} - elif original_type in ["Se", "Sd", "TON", "TOF", "TP"]: - input_pin_mapping = { - "s": "s", - "in": "in", - "tv": "tv", - "pt": "pt", - "r": "r", - "timer": "timer", - } - output_pin_mapping = {"q": "q", "Q": "Q", "rt": "rt", "ET": "ET"} - elif original_type in ["CTU", "CTD", "CTUD"]: - input_pin_mapping = { - "cu": "CU", - "cd": "CD", - "r": "R", - "ld": "LD", - "pv": "PV", - "counter": "counter", - } - output_pin_mapping = {"qu": "QU", "qd": "QD", "cv": "CV"} - instruction_repr["type"] = current_type - possible_input_pins = set( - [ - "en", - "in", - "in1", - "in2", - "s", - "r", - "tv", - "value", - "operand", - "timer", - "bit", - "clk", - "pv", - "cu", - "cd", - "ld", - "pre", - "SRCBLK", - "PT", - ] - ) # Añadido PT - for xml_pin_name in possible_input_pins: - dest_key = (instruction_uid, xml_pin_name) - if dest_key in wire_connections: - sources_list = wire_connections[dest_key] - input_sources_repr = [] - for source_uid, source_pin in sources_list: - if source_uid == "POWERRAIL": - input_sources_repr.append({"type": "powerrail"}) - elif source_uid in access_map: - input_sources_repr.append(access_map[source_uid]) - elif source_uid in parts_and_calls_map: - source_instr_info = parts_and_calls_map[source_uid] - source_original_type = source_instr_info["type"] - source_output_mapping = {} - if source_original_type == "SdCoil": - source_output_mapping = {"out": "q"} - elif source_original_type in ["Se", "Sd", "TON", "TOF", "TP"]: - source_output_mapping = { - "q": "q", - "Q": "Q", - "rt": "rt", - "ET": "ET", - } - elif source_original_type in ["CTU", "CTD", "CTUD"]: - source_output_mapping = {"qu": "QU", "qd": "QD", "cv": "CV"} - mapped_source_pin = source_output_mapping.get( - source_pin, source_pin - ) - input_sources_repr.append( - { - "type": "connection", - "source_instruction_type": source_original_type, - "source_instruction_uid": source_uid, - "source_pin": mapped_source_pin, - } - ) - else: - input_sources_repr.append( - {"type": "unknown_source", "uid": source_uid} - ) - json_pin_name = input_pin_mapping.get(xml_pin_name, xml_pin_name) - if len(input_sources_repr) == 1: - instruction_repr["inputs"][json_pin_name] = input_sources_repr[0] - elif len(input_sources_repr) > 1: - instruction_repr["inputs"][json_pin_name] = input_sources_repr - possible_output_pins = set( - [ - "out", - "out1", - "Q", - "q", - "eno", - "RET_VAL", - "DSTBLK", - "rt", - "rtbcd", - "cv", - "cvbcd", - "QU", - "QD", - "ET", - ] - ) # Añadido ET - for xml_pin_name in possible_output_pins: - source_key = (instruction_uid, xml_pin_name) - if source_key in source_connections: - json_pin_name = output_pin_mapping.get(xml_pin_name, xml_pin_name) - if json_pin_name not in instruction_repr["outputs"]: - instruction_repr["outputs"][json_pin_name] = [] - for dest_uid, dest_pin in source_connections[source_key]: - if dest_uid in access_map: - if ( - access_map[dest_uid] - not in instruction_repr["outputs"][json_pin_name] - ): - instruction_repr["outputs"][json_pin_name].append( - access_map[dest_uid] - ) - all_logic_steps[instruction_uid] = instruction_repr - - # 4. Inferencia EN (sin cambios en lógica) - processed_blocks_en_inference = set() - something_changed = True - inference_passes = 0 - max_inference_passes = len(all_logic_steps) + 5 - try: - sorted_uids_for_en = sorted( - all_logic_steps.keys(), - key=lambda x: int(x) if x.isdigit() else float("inf"), - ) - except ValueError: - sorted_uids_for_en = sorted(all_logic_steps.keys()) - ordered_logic_list_for_en = [ - all_logic_steps[uid] for uid in sorted_uids_for_en if uid in all_logic_steps - ] - while something_changed and inference_passes < max_inference_passes: - something_changed = False - inference_passes += 1 - for i, instruction in enumerate(ordered_logic_list_for_en): - part_uid = instruction["instruction_uid"] - part_type_original = ( - instruction["type"].replace(SCL_SUFFIX, "").replace("_error", "") - ) # Usa SCL_SUFFIX - if ( - part_type_original in functional_block_types - and "en" not in instruction["inputs"] - and part_uid not in processed_blocks_en_inference - ): - inferred_en_source = None - if i > 0: - for j in range(i - 1, -1, -1): - prev_instr = ordered_logic_list_for_en[j] - prev_uid = prev_instr["instruction_uid"] - prev_type_original = ( - prev_instr["type"] - .replace(SCL_SUFFIX, "") - .replace("_error", "") - ) - if prev_type_original in rlo_generators: - inferred_en_source = { - "type": "connection", - "source_instruction_uid": prev_uid, - "source_instruction_type": prev_type_original, - "source_pin": "out", - } - break - elif prev_type_original in functional_block_types: - source_key_eno = (prev_uid, "eno") - if source_key_eno in source_connections: - inferred_en_source = { - "type": "connection", - "source_instruction_uid": prev_uid, - "source_instruction_type": prev_type_original, - "source_pin": "eno", - } - break - else: - continue - elif prev_type_original in [ - "Coil", - "SCoil", - "RCoil", - "SetCoil", - "ResetCoil", - "SdCoil", - ]: - break - if inferred_en_source: - all_logic_steps[part_uid]["inputs"]["en"] = inferred_en_source - processed_blocks_en_inference.add(part_uid) - something_changed = True - - # 5. Añadir lógica ENO interesante (sin cambios en lógica) - for source_instr_uid, eno_destinations in eno_outputs.items(): - if source_instr_uid not in all_logic_steps: - continue - interesting_eno_logic = [] - for dest_uid, dest_pin in eno_destinations: - is_direct_en_connection = False - if dest_uid in parts_and_calls_map and dest_pin == "en": - try: - source_idx = sorted_uids_for_en.index(source_instr_uid) - dest_idx = sorted_uids_for_en.index(dest_uid) - if ( - dest_idx == source_idx + 1 - and parts_and_calls_map[dest_uid]["type"] - in functional_block_types - ): - is_direct_en_connection = True - except ValueError: - pass - if not is_direct_en_connection: - target_info = {"target_pin": dest_pin} - if dest_uid in parts_and_calls_map: - target_info.update( - { - "target_type": "instruction", - "target_uid": dest_uid, - "target_name": parts_and_calls_map[dest_uid].get( - "name", parts_and_calls_map[dest_uid].get("type") - ), - } - ) - elif dest_uid in access_map: - target_info.update( - { - "target_type": "operand", - "target_details": access_map[dest_uid], - } - ) - else: - target_info.update( - {"target_type": "unknown", "target_uid": dest_uid} - ) - interesting_eno_logic.append(target_info) - if interesting_eno_logic: - all_logic_steps[source_instr_uid]["eno_logic"] = interesting_eno_logic - - # 6. Ordenar y Devolver - network_logic_final = [ - all_logic_steps[uid] for uid in sorted_uids_for_en if uid in all_logic_steps - ] - # Determinar lenguaje de la red para devolverlo - network_lang = "Unknown" - if network_element is not None: - attr_list_net = network_element.xpath("./*[local-name()='AttributeList']") - if attr_list_net: - lang_node_net = attr_list_net[0].xpath( - "./*[local-name()='ProgrammingLanguage']/text()" - ) - if lang_node_net: - network_lang = lang_node_net[0].strip() - - return { - "id": network_id, - "title": network_title, - "comment": network_comment, - "language": network_lang, - "logic": network_logic_final, - } - - -def convert_xml_to_json(xml_filepath, json_filepath): +# --- Función Principal de Conversión (Refactorizada) --- +def convert_xml_to_json(xml_filepath, json_filepath, parser_map): + """Convierte XML a JSON usando los parsers cargados dinámicamente.""" print(f"Iniciando conversión de '{xml_filepath}' a '{json_filepath}'...") if not os.path.exists(xml_filepath): print(f"Error Crítico: Archivo XML no encontrado: '{xml_filepath}'") - return + return False # Indicar fallo + try: print("Paso 1: Parseando archivo XML...") + # Usar un parser que quite texto en blanco para simplificar XPath parser = etree.XMLParser(remove_blank_text=True) tree = etree.parse(xml_filepath, parser) root = tree.getroot() print("Paso 1: Parseo XML completado.") - print("Paso 2: Buscando el bloque SW.Blocks.FC, SW.Blocks.FB o SW.Blocks.GlobalDB...") - # --- MODIFICADO: Buscar FC, FB o GlobalDB --- - block_list = root.xpath("//*[local-name()='SW.Blocks.FC' or local-name()='SW.Blocks.FB' or local-name()='SW.Blocks.GlobalDB']") + + # --- Buscar bloque principal (FC, FB, GlobalDB, OB) --- + print("Paso 2: Buscando el bloque SW.Blocks.FC/FB/GlobalDB/OB...") + # Usar local-name() para ignorar namespaces en esta búsqueda inicial + block_list = root.xpath( + "//*[local-name()='SW.Blocks.FC' or local-name()='SW.Blocks.FB' or local-name()='SW.Blocks.GlobalDB' or local-name()='SW.Blocks.OB']" + ) + if ( + not block_list + ): # Intentar con namespace si el anterior falla (menos probable) + ns_doc = { + "doc": "http://www.siemens.com/automation/Openness/SW/Document/v5" + } # Asumiendo este namespace + block_list = root.xpath( + "//doc:SW.Blocks.FC | //doc:SW.Blocks.FB | //doc:SW.Blocks.GlobalDB | //doc:SW.Blocks.OB", + namespaces=ns_doc, + ) + block_type_found = None the_block = None if block_list: the_block = block_list[0] - # Obtener el nombre real de la etiqueta encontrada - block_tag_name = etree.QName(the_block.tag).localname + block_tag_name = etree.QName( + the_block.tag + ).localname # Obtener nombre local sin ns if block_tag_name == "SW.Blocks.FC": block_type_found = "FC" elif block_tag_name == "SW.Blocks.FB": block_type_found = "FB" elif block_tag_name == "SW.Blocks.GlobalDB": - block_type_found = "GlobalDB" # Identificar el tipo DB - print(f"Paso 2: Bloque {block_tag_name} encontrado (ID={the_block.get('ID')}).") + block_type_found = "GlobalDB" + elif block_tag_name == "SW.Blocks.OB": + block_type_found = "OB" + print( + f"Paso 2: Bloque {block_tag_name} (Tipo: {block_type_found}) encontrado (ID={the_block.get('ID')})." + ) else: - # Mensaje de error más específico y añadimos depuración - print("Error Crítico: No se encontró el elemento raíz del bloque (, o ) usando XPath.") - # --- Añadir Debugging --- - print(f"DEBUG: Tag del elemento raíz del XML: {root.tag}") - print(f"DEBUG: Primeros hijos del raíz:") - for i, child in enumerate(root.getchildren()): - if i < 5: # Imprimir solo los primeros 5 para no saturar - print(f"DEBUG: - Hijo {i+1}: {child.tag}") - else: - print("DEBUG: - ... (más hijos)") - break - # --- Fin Debugging --- - return # Salir si no se encuentra el bloque principal + print( + "Error Crítico: No se encontró el elemento raíz del bloque ()." + ) + # Podríamos intentar buscar cualquier SW.Blocks.* como fallback? + any_block = root.xpath("//*[starts-with(local-name(), 'SW.Blocks.')]") + if any_block: + print( + f"Advertencia: Se encontró un bloque genérico: {etree.QName(any_block[0].tag).localname}. Intentando continuar..." + ) + the_block = any_block[0] + block_type_found = "Unknown" # Marcar como desconocido + else: + return False # Fallo si no se encuentra ningún bloque + + # --- Extraer atributos del bloque --- print("Paso 3: Extrayendo atributos del bloque...") - attribute_list_node = the_block.xpath("./*[local-name()='AttributeList']") + # AttributeList generalmente no tiene namespace propio + attribute_list_node = the_block.xpath("./AttributeList") block_name_val, block_number_val, block_lang_val = "Unknown", None, "Unknown" if attribute_list_node: attr_list = attribute_list_node[0] - name_node = attr_list.xpath("./*[local-name()='Name']/text()") + # Name, Number, ProgrammingLanguage están directamente bajo AttributeList + name_node = attr_list.xpath("./Name/text()") block_name_val = name_node[0].strip() if name_node else block_name_val - num_node = attr_list.xpath("./*[local-name()='Number']/text()") + num_node = attr_list.xpath("./Number/text()") try: block_number_val = int(num_node[0]) if num_node else None - except ValueError: - block_number_val = None - lang_node = attr_list.xpath( - "./*[local-name()='ProgrammingLanguage']/text()" + except (ValueError, TypeError): + block_number_val = None # Mantener como None si no es entero + lang_node = attr_list.xpath("./ProgrammingLanguage/text()") + block_lang_val = ( + lang_node[0].strip() + if lang_node + else ("DB" if block_type_found == "GlobalDB" else "Unknown") ) - block_lang_val = lang_node[0].strip() if lang_node else block_lang_val print( - f"Paso 3: Atributos: Nombre='{block_name_val}', Número={block_number_val}, Lenguaje='{block_lang_val}'" + f"Paso 3: Atributos: Nombre='{block_name_val}', Número={block_number_val}, Lenguaje Bloque='{block_lang_val}'" ) else: print( f"Advertencia: No se encontró AttributeList para el bloque {block_type_found}." ) + if block_type_found == "GlobalDB": + block_lang_val = "DB" # Asignar lenguaje DB si es GlobalDB + + # --- Extraer comentario del bloque --- + # ObjectList -> MultilingualText[@CompositionName='Comment'] block_comment_val = "" + # ObjectList tampoco suele tener namespace propio comment_node_list = the_block.xpath( - "./*[local-name()='ObjectList']/*[local-name()='MultilingualText'][@CompositionName='Comment']" + "./ObjectList/MultilingualText[@CompositionName='Comment']" ) if comment_node_list: + # Usar la función de utils que maneja los namespaces internos de MultilingualText block_comment_val = get_multilingual_text(comment_node_list[0]) print(f"Paso 3b: Comentario bloque: '{block_comment_val[:50]}...'") + else: + # Intentar buscar comentario en AttributeList como fallback? + comment_attr_node = the_block.xpath("./AttributeList/Comment") + if comment_attr_node: + block_comment_val = get_multilingual_text(comment_attr_node[0]) + print( + f"Paso 3b (Fallback): Comentario bloque encontrado en AttributeList: '{block_comment_val[:50]}...'" + ) + + # --- Crear diccionario resultado --- result = { "block_name": block_name_val, "block_number": block_number_val, - "language": block_lang_val, + "language": block_lang_val, # Lenguaje general del bloque + "block_type": block_type_found, "block_comment": block_comment_val, "interface": {}, "networks": [], } + + # --- Extraer interfaz --- print("Paso 4: Extrayendo la interfaz del bloque...") - if attribute_list_node: - interface_node_list = attribute_list_node[0].xpath( - ".//*[local-name()='Interface']" - ) - if interface_node_list: - interface_node = interface_node_list[0] - print("Paso 4: Nodo Interface encontrado.") - for section in interface_node.xpath(".//iface:Section", namespaces=ns): - section_name = section.get("Name") - if not section_name: + # Interface está dentro de AttributeList (sin ns propio), pero sus hijos usan 'iface' + interface_node_list = ( + attribute_list_node[0].xpath("./Interface") if attribute_list_node else [] + ) + + if interface_node_list: + interface_node = interface_node_list[0] + print("Paso 4: Nodo Interface encontrado.") + # Sections/Section usan namespace iface + all_sections = interface_node.xpath(".//iface:Section", namespaces=ns) + if all_sections: + processed_sections = set() + for section in all_sections: + section_name = section.get( + "Name" + ) # Input, Output, Static, Temp, etc. + if not section_name or section_name in processed_sections: continue - members = [] - for member in section.xpath("./iface:Member", namespaces=ns): - member_name = member.get("Name") - member_dtype = member.get("Datatype") - if member_name and member_dtype: - members.append( - {"name": member_name, "datatype": member_dtype} - ) - if members: - result["interface"][section_name] = members - if not result["interface"]: - print("Advertencia: Interface sin secciones iface:Section válidas.") + # Los Member dentro de Section usan namespace iface + members_in_section = section.xpath("./iface:Member", namespaces=ns) + if members_in_section: + # Usar la función de utils para parsear miembros + result["interface"][section_name] = parse_interface_members( + members_in_section + ) + processed_sections.add(section_name) else: print( - "Advertencia: No se encontró dentro de ." + "Advertencia: Nodo Interface no contiene secciones ." ) + + if not result["interface"]: + print( + "Advertencia: Interface encontrada pero sin secciones procesables." + ) + else: + # Manejo especial para DB si no hay explícita + if block_type_found == "GlobalDB": + # Buscar directamente la sección Static (que usa namespace iface) + static_members = the_block.xpath( + ".//iface:Section[@Name='Static']/iface:Member", namespaces=ns + ) + if static_members: + print( + "Paso 4: Encontrada sección Static para GlobalDB (sin nodo Interface)." + ) + result["interface"]["Static"] = parse_interface_members( + static_members + ) + else: + print("Advertencia: No se encontró sección 'Static' para GlobalDB.") + else: + print( + f"Advertencia: No se encontró para bloque {block_type_found}." + ) + if not result["interface"]: print("Advertencia: No se pudo extraer información de la interfaz.") - print("Paso 5: Extrayendo y PROCESANDO lógica de redes (CompileUnits)...") + # --- Procesar redes (CompileUnits) --- + print("Paso 5: Buscando y PROCESANDO redes (CompileUnits)...") networks_processed_count = 0 - result["networks"] = [] # Initialize networks list here - object_list_node = the_block.xpath("./*[local-name()='ObjectList']") + result["networks"] = [] + # ObjectList y SW.Blocks.CompileUnit no suelen tener namespace propio + object_list_node = the_block.xpath("./ObjectList") if object_list_node: - compile_units = object_list_node[0].xpath( - "./*[local-name()='SW.Blocks.CompileUnit']" - ) + compile_units = object_list_node[0].xpath("./SW.Blocks.CompileUnit") print( f"Paso 5: Se encontraron {len(compile_units)} elementos SW.Blocks.CompileUnit." ) + # --- BUCLE PRINCIPAL DE PARSEO DE REDES (MODIFICADO) --- for network_elem in compile_units: networks_processed_count += 1 network_id = network_elem.get("ID") if not network_id: - print(" Advertencia: Se encontró CompileUnit sin ID. Saltando.") + print("Advertencia: CompileUnit sin ID, saltando.") continue - # --- Detectar lenguaje de la red --- - attribute_list = network_elem.xpath("./*[local-name()='AttributeList']") - programming_language = "LAD" # Default a LAD si no se especifica - network_source_node = None # Nodo - - if attribute_list: - lang_node = attribute_list[0].xpath( - "./*[local-name()='ProgrammingLanguage']/text()" - ) + # Detectar lenguaje de la RED (puede diferir del lenguaje del bloque) + # AttributeList/ProgrammingLanguage sin namespace + network_lang = "LAD" # Default si no se encuentra + net_attr_list = network_elem.xpath("./AttributeList") + if net_attr_list: + lang_node = net_attr_list[0].xpath("./ProgrammingLanguage/text()") if lang_node: - programming_language = lang_node[0].strip() - # Obtener el nodo NetworkSource para pasarlo a los parsers - network_source_list = attribute_list[0].xpath( - "./*[local-name()='NetworkSource']" - ) - if network_source_list: - network_source_node = network_source_list[0] + network_lang = lang_node[0].strip() print( - f" - Procesando Red ID={network_id}, Lenguaje={programming_language}" + f" - Procesando Red ID={network_id}, Lenguaje Red={network_lang}" ) - # --- Extraer título y comentario (común) --- - title_element = network_elem.xpath( - ".//*[local-name()='MultilingualText'][@CompositionName='Title']" - ) - network_title = ( - get_multilingual_text(title_element[0]) - if title_element - else f"Network {network_id}" - ) - - comment_element = network_elem.xpath( - "./*[local-name()='ObjectList']/*[local-name()='MultilingualText'][@CompositionName='Comment']" - ) - network_comment = ( - get_multilingual_text(comment_element[0]) if comment_element else "" - ) - - # --- Procesar según el lenguaje --- + # --- Llamada al Parser Dinámico --- + parser_func = parser_map.get( + network_lang.upper() + ) # Buscar parser por lenguaje parsed_network_data = None - if programming_language == "SCL": - structured_text_node = ( - network_source_node.xpath("./st:StructuredText", namespaces=ns) - if network_source_node is not None - else None - ) - reconstructed_scl = f"// SCL extraction failed for Network {network_id}: StructuredText node not found.\n" - if structured_text_node: + if parser_func: + try: + # Llamar a la función de parseo específica del lenguaje + # Pasar el elemento XML de la red y los namespaces + parsed_network_data = parser_func( + network_elem + ) # Pasar ns ya no es necesario si están en utils + except Exception as e_parse: print( - f" Reconstruyendo SCL desde tokens para red {network_id}..." + f" ERROR durante el parseo de Red {network_id} ({network_lang}): {e_parse}" ) - reconstructed_scl = reconstruct_scl_from_tokens( - structured_text_node[0] - ) - # print(f" ... SCL reconstruido (parcial):\n{reconstructed_scl[:200]}...") # Preview opcional - else: - print( - f" Advertencia: No se encontró nodo para red SCL {network_id}." - ) - - parsed_network_data = { - "id": network_id, - "title": network_title, - "comment": network_comment, - "language": "SCL", - "logic": [ - { - "instruction_uid": f"SCL_{network_id}", # UID inventado - "type": "RAW_SCL_CHUNK", - "scl": reconstructed_scl, - } - ], - } - - # --- NUEVO MANEJO STL --- - elif programming_language == "STL": - statement_list_node = ( - network_source_node.xpath("./stl:StatementList", namespaces=ns) - if network_source_node is not None - else None - ) - - reconstructed_stl = f"// STL extraction failed for Network {network_id}: StatementList node not found.\n" - if statement_list_node: - print( - f" Reconstruyendo STL desde StatementList para red {network_id}..." - ) - # Llama a la nueva función de reconstrucción STL - reconstructed_stl = reconstruct_stl_from_statementlist( - statement_list_node[0] - ) - # print(f" ... STL reconstruido (parcial):\n{reconstructed_stl[:200]}...") # Preview opcional - else: - print( - f" Advertencia: No se encontró nodo para red STL {network_id}." - ) - - # Guardar como un chunk de texto crudo - parsed_network_data = { - "id": network_id, - "title": network_title, - "comment": network_comment, - "language": "STL", # Indicar que es STL - "logic": [ - { - "instruction_uid": f"STL_{network_id}", # UID inventado - "type": "RAW_STL_CHUNK", # Nuevo tipo para identificarlo - "stl": reconstructed_stl, # Guardar el texto reconstruido - } - ], - } - - elif programming_language in ["LAD", "FBD"]: - # Para LAD/FBD, llamar a parse_network (que espera FlgNet dentro de NetworkSource) - # parse_network ya maneja su propio título/comentario si es necesario, pero podemos pasar los extraídos - # Nota: parse_network espera el *CompileUnit* element, no el NetworkSource - parsed_network_data = parse_network(network_elem) - if parsed_network_data: - parsed_network_data["language"] = ( - programming_language # Asegurar que el lenguaje se guarda - ) - if parsed_network_data.get("error"): - print( - f" Error al parsear red {programming_language} ID={network_id}: {parsed_network_data['error']}" - ) - # parsed_network_data = None # Descomentar para omitir redes con error - else: - print( - f" Error: parse_network devolvió None para red {programming_language} ID={network_id}" - ) - - else: - # Manejar otros lenguajes o casos inesperados + traceback.print_exc() + # Crear diccionario de error si el parser falla + parsed_network_data = { + "id": network_id, + "language": network_lang, + "logic": [], + "error": f"Parser failed: {e_parse}", + } + else: # Lenguaje no soportado por ningún parser cargado print( - f" Advertencia: Lenguaje no soportado '{programming_language}' en red ID={network_id}. Creando placeholder." + f" Advertencia: Lenguaje de red '{network_lang}' no soportado por los parsers cargados." ) parsed_network_data = { "id": network_id, - "title": network_title, - "comment": network_comment, - "language": programming_language, - "logic": [ - { - "instruction_uid": f"UNS_{network_id}", - "type": "UNSUPPORTED_LANG", - "scl": f"// Network {network_id} uses unsupported language: {programming_language}\n", - } - ], + "language": network_lang, + "logic": [], + "error": f"Unsupported language: {network_lang}", } - # Añadir la red procesada (si es válida) al resultado + # --- Añadir Título y Comentario a la Red Parseada --- if parsed_network_data: + # Usar get_multilingual_text de utils + title_element = network_elem.xpath( + ".//iface:MultilingualText[@CompositionName='Title']", + namespaces=ns, + ) + parsed_network_data["title"] = ( + get_multilingual_text(title_element[0]) + if title_element + else f"Network {network_id}" + ) + + # Buscar comentario específico de la red + comment_elem_net = network_elem.xpath( + "./ObjectList/MultilingualText[@CompositionName='Comment']", + namespaces=ns, + ) + if not comment_elem_net: # Fallback + comment_elem_net = network_elem.xpath( + ".//MultilingualText[@CompositionName='Comment']", + namespaces=ns, + ) + + parsed_network_data["comment"] = ( + get_multilingual_text(comment_elem_net[0]) + if comment_elem_net + else "" + ) + + # Añadir la red procesada (o con error) al resultado result["networks"].append(parsed_network_data) - # --- Fin del bucle for network_elem --- + # --- Fin Bucle Redes --- - if networks_processed_count == 0: + if networks_processed_count == 0 and block_type_found != "GlobalDB": print( - "Advertencia: ObjectList no contenía elementos SW.Blocks.CompileUnit." + f"Advertencia: ObjectList para {block_type_found} sin SW.Blocks.CompileUnit." ) + elif block_type_found == "GlobalDB": + print("Paso 5: Saltando búsqueda de CompileUnits para GlobalDB (esperado).") else: - print("Advertencia: No se encontró ObjectList para el bloque.") + print( + f"Advertencia: No se encontró ObjectList para el bloque {block_type_found}." + ) + # --- Escribir JSON --- print("Paso 6: Escribiendo el resultado en el archivo JSON...") + # Validaciones finales opcionales if not result["interface"]: - print("ADVERTENCIA FINAL: 'interface' está vacía.") - if not result["networks"]: - print("ADVERTENCIA FINAL: 'networks' está vacía.") + print("ADVERTENCIA FINAL: 'interface' está vacía en el JSON.") + if not result["networks"] and block_type_found != "GlobalDB": + print("ADVERTENCIA FINAL: 'networks' está vacía en el JSON.") + try: with open(json_filepath, "w", encoding="utf-8") as f: json.dump(result, f, indent=4, ensure_ascii=False) - print("Paso 6: Escritura completada.") - print(f"Conversión finalizada. JSON guardado en: '{json_filepath}'") + print("Paso 6: Escritura JSON completada.") + print( + f"Conversión finalizada. JSON guardado en: '{os.path.relpath(json_filepath)}'" + ) + return True # Indicar éxito + except IOError as e: print( f"Error Crítico: No se pudo escribir JSON en '{json_filepath}'. Error: {e}" ) + return False # Indicar fallo except TypeError as e: - print(f"Error Crítico: Problema al serializar a JSON. Error: {e}") + print( + f"Error Crítico: Problema al serializar a JSON (posiblemente datos no serializables). Error: {e}" + ) + # Opcional: Imprimir una versión parcial o depurar 'result' + # print("--- Datos antes de JSON DUMP (parcial) ---") + # try: print(json.dumps({k: v for k, v in result.items() if k != 'networks'}, indent=2)) # Imprimir sin redes + # except: print("No se pudo imprimir datos parciales.") + return False # Indicar fallo + except etree.XMLSyntaxError as e: print( f"Error Crítico: Sintaxis XML inválida en '{xml_filepath}'. Detalles: {e}" ) + return False # Indicar fallo except Exception as e: print(f"Error Crítico: Error inesperado durante la conversión: {e}") - print("--- Traceback ---") traceback.print_exc() - print("--- Fin Traceback ---") + return False # Indicar fallo +# --- Punto de Entrada Principal (__main__) --- if __name__ == "__main__": - # Imports necesarios solo para la ejecución como script principal - import argparse - import os - import sys - - # Configurar ArgumentParser para recibir la ruta del XML obligatoria parser = argparse.ArgumentParser( - description="Convert Simatic XML (LAD/FBD/SCL/STL) to simplified JSON. Expects XML filepath as argument." + description="Convert Simatic XML (LAD/FBD/SCL/STL/OB/DB) to simplified JSON using dynamic parsers." ) parser.add_argument( - "xml_filepath", # Argumento posicional obligatorio + "xml_filepath", help="Path to the input XML file passed from the main script (x0_main.py).", ) - args = parser.parse_args() # Parsea los argumentos de sys.argv + args = parser.parse_args() + xml_input_file = args.xml_filepath - xml_input_file = args.xml_filepath # Obtiene la ruta del argumento - - # Verificar si el archivo de entrada existe (es una buena práctica aunque x0 lo haga) if not os.path.exists(xml_input_file): - print(f"Error Crítico (x1): Archivo XML no encontrado: '{xml_input_file}'") - sys.exit(1) # Salir si el archivo no existe + print( + f"Error Crítico (x1): Archivo XML no encontrado: '{xml_input_file}'", + file=sys.stderr, + ) + sys.exit(1) - # Derivar nombre base para archivo de salida JSON - # El archivo JSON se guardará en el mismo directorio que el XML de entrada + # --- Cargar Parsers Dinámicamente --- + loaded_parsers = load_parsers() + if not loaded_parsers: + print("Error Crítico (x1): No se cargaron parsers. Abortando.", file=sys.stderr) + sys.exit(1) + + # Derivar nombre de salida JSON xml_filename_base = os.path.splitext(os.path.basename(xml_input_file))[0] - output_dir = os.path.dirname(xml_input_file) # Directorio del XML de entrada - # Asegurarse de que el directorio de salida exista (aunque debería si el XML existe) + output_dir = os.path.dirname(xml_input_file) + # Asegurarse que el directorio de salida exista (puede ser el mismo que el de entrada) os.makedirs(output_dir, exist_ok=True) json_output_file = os.path.join(output_dir, f"{xml_filename_base}_simplified.json") @@ -1556,13 +491,15 @@ if __name__ == "__main__": f"(x1) Convirtiendo: '{os.path.relpath(xml_input_file)}' -> '{os.path.relpath(json_output_file)}'" ) - # Llamar a la función principal de conversión del script - # Asumiendo que tu función principal se llama convert_xml_to_json(input_path, output_path) - try: - convert_xml_to_json(xml_input_file, json_output_file) - except Exception as e: - print(f"Error Crítico (x1) durante la conversión de '{xml_input_file}': {e}") - import traceback + # Llamar a la función de conversión principal + success = convert_xml_to_json(xml_input_file, json_output_file, loaded_parsers) - traceback.print_exc() - sys.exit(1) # Salir con error si la función principal falla + # Salir con código de error apropiado + if success: + sys.exit(0) # Éxito + else: + print( + f"\nError durante la conversión de '{os.path.relpath(xml_input_file)}'.", + file=sys.stderr, + ) + sys.exit(1) # Fallo diff --git a/ToUpload/x2_process.py b/ToUpload/x2_process.py index a0f3f93..748b417 100644 --- a/ToUpload/x2_process.py +++ b/ToUpload/x2_process.py @@ -18,17 +18,14 @@ from processors.processor_utils import ( from processors.symbol_manager import SymbolManager # Import the manager # --- Constantes y Configuración --- -# SCL_SUFFIX = "_scl" # Old suffix SCL_SUFFIX = "_sympy_processed" # New suffix to indicate processing method GROUPED_COMMENT = "// Logic included in grouped IF" SIMPLIFIED_IF_COMMENT = "// Simplified IF condition by script" # May still be useful -# Global data dictionary (consider passing 'data' as argument if needed elsewhere) -# It's currently used by process_group_ifs implicitly via the outer scope, -# which works but passing it explicitly might be cleaner. +# Global data dictionary data = {} - +# --- (Incluye aquí las funciones process_group_ifs y load_processors SIN CAMBIOS) --- def process_group_ifs(instruction, network_id, sympy_map, symbol_manager, data): """ Busca condiciones (ya procesadas -> tienen expr SymPy en sympy_map) @@ -112,6 +109,9 @@ def process_group_ifs(instruction, network_id, sympy_map, symbol_manager, data): # SCoil/RCoil might also be groupable if their SCL is final assignment "SCoil", "RCoil", + "BLKMOV", # Added BLKMOV + "TON", "TOF", "TP", "Se", "Sd", # Added timers + "CTU", "CTD", "CTUD", # Added counters ] for consumer_instr in network_logic: @@ -135,26 +135,26 @@ def process_group_ifs(instruction, network_id, sympy_map, symbol_manager, data): is_enabled_by_us = True # Check if consumer is groupable AND has its final SCL generated - # The suffix check needs adjustment based on how terminating processors set it. - # Assuming processors like Move, Add, Call, SCoil, RCoil NOW generate final SCL and add a suffix. if ( is_enabled_by_us - and consumer_type.endswith(SCL_SUFFIX) # Or a specific "final_scl" suffix + and consumer_type.endswith(SCL_SUFFIX) # Check if processed and consumer_type_original in groupable_types ): consumer_scl = consumer_instr.get("scl", "") - # Extract core SCL (logic is similar, maybe simpler if SCL is cleaner now) + # Extract core SCL core_scl = None if consumer_scl: # If consumer SCL itself is an IF generated by EN, take the body if consumer_scl.strip().startswith("IF"): match = re.search( - r"THEN\s*(.*?)\s*END_IF;", + r"IF\s+.*?THEN\s*(.*?)\s*END_IF;", # More robust regex consumer_scl, re.DOTALL | re.IGNORECASE, ) core_scl = match.group(1).strip() if match else None + # If body contains another IF, maybe don't group? (optional complexity) + # if core_scl and core_scl.strip().startswith("IF"): core_scl = None elif not consumer_scl.strip().startswith( "//" ): # Otherwise, take the whole line if not comment @@ -300,8 +300,7 @@ def load_processors(processors_dir="processors"): # Devolver el mapa (para lookup rápido si es necesario) y la lista ordenada return processor_map, processor_list_sorted - -# --- Bucle Principal de Procesamiento (Modificado para STL) --- +# --- Bucle Principal de Procesamiento (Modificado para STL y tipo de bloque) --- def process_json_to_scl(json_filepath): """ Lee JSON simplificado, aplica procesadores dinámicos (ignorando redes STL y bloques DB), @@ -321,15 +320,14 @@ def process_json_to_scl(json_filepath): traceback.print_exc() return - # --- Obtener lenguaje del bloque principal --- - block_language = data.get("language", "Unknown") - block_type = data.get("block_type", "Unknown") # FC, FB, GlobalDB - print(f"Procesando bloque tipo: {block_type}, Lenguaje principal: {block_language}") + # --- MODIFICADO: Obtener tipo de bloque (FC, FB, GlobalDB, OB) --- + block_type = data.get("block_type", "Unknown") # FC, FB, GlobalDB, OB + print(f"Procesando bloque tipo: {block_type}, Lenguaje principal: {data.get('language', 'Unknown')}") - # --- SI ES UN DB, SALTAR EL PROCESAMIENTO LÓGICO --- - if block_language == "DB": + # --- MODIFICADO: SI ES UN GlobalDB, SALTAR EL PROCESAMIENTO LÓGICO --- + if block_type == "GlobalDB": # <-- Comprobar tipo de bloque print( - "INFO: El bloque es un Data Block (DB). Saltando procesamiento lógico de x2." + "INFO: El bloque es un Data Block (GlobalDB). Saltando procesamiento lógico de x2." ) # Simplemente guardamos una copia (o el mismo archivo si no se requiere sufijo) output_filename = json_filepath.replace( @@ -345,8 +343,8 @@ def process_json_to_scl(json_filepath): traceback.print_exc() return # <<< SALIR TEMPRANO PARA DBs - # --- SI NO ES DB, CONTINUAR CON EL PROCESAMIENTO LÓGICO (FC/FB) --- - print("INFO: El bloque es FC/FB. Iniciando procesamiento lógico...") + # --- SI NO ES DB (FC, FB, OB), CONTINUAR CON EL PROCESAMIENTO LÓGICO --- + print(f"INFO: El bloque es {block_type}. Iniciando procesamiento lógico...") # <-- Mensaje actualizado script_dir = os.path.dirname(__file__) processors_dir_path = os.path.join(script_dir, "processors") @@ -391,7 +389,7 @@ def process_json_to_scl(json_filepath): passes = 0 processing_complete = False - print("\n--- Iniciando Bucle de Procesamiento Iterativo (FC/FB) ---") + print(f"\n--- Iniciando Bucle de Procesamiento Iterativo ({block_type}) ---") # <-- Mensaje actualizado while passes < max_passes and not processing_complete: passes += 1 made_change_in_base_pass = False @@ -408,34 +406,44 @@ def process_json_to_scl(json_filepath): func_to_call = processor_info["func"] for network in data.get("networks", []): network_id = network["id"] - network_lang = network.get("language", "LAD") - if network_lang == "STL": - continue # Saltar STL + network_lang = network.get("language", "LAD") # Lenguaje de la red + if network_lang == "STL": # Saltar redes STL + continue access_map = network_access_maps.get(network_id, {}) network_logic = network.get("logic", []) for instruction in network_logic: instr_uid = instruction.get("instruction_uid") - instr_type_original = instruction.get("type", "Unknown") + # Usar el tipo *actual* de la instrucción para el lookup + instr_type_current = instruction.get("type", "Unknown") + + # Saltar si ya está procesado, es error, agrupado, o tipo crudo if ( - instr_type_original.endswith(SCL_SUFFIX) - or "_error" in instr_type_original + instr_type_current.endswith(SCL_SUFFIX) + or "_error" in instr_type_current or instruction.get("grouped", False) - or instr_type_original - in ["RAW_STL_CHUNK", "RAW_SCL_CHUNK", "UNSUPPORTED_LANG"] + or instr_type_current + in ["RAW_STL_CHUNK", "RAW_SCL_CHUNK", "UNSUPPORTED_LANG", "UNSUPPORTED_CONTENT", "PARSING_ERROR"] ): continue - lookup_key = instr_type_original.lower() - effective_type_name = lookup_key - if instr_type_original == "Call": - block_type = instruction.get("block_type", "").upper() - if block_type == "FC": - effective_type_name = "call_fc" - elif block_type == "FB": - effective_type_name = "call_fb" + # El lookup usa el tipo actual (que aún no tiene el sufijo) + lookup_key = instr_type_current.lower() + effective_type_name = lookup_key + + # Mapeo especial para llamadas FC/FB + if instr_type_current == "Call": + call_block_type = instruction.get("block_type", "").upper() + if call_block_type == "FC": + effective_type_name = "call_fc" + elif call_block_type == "FB": + effective_type_name = "call_fb" + # Añadir otros tipos de llamada si es necesario + + # Si el tipo efectivo coincide con el procesador actual if effective_type_name == current_type_name: try: + # Pasar 'data' a la función del procesador changed = func_to_call( instruction, network_id, sympy_map, symbol_manager, data ) @@ -444,22 +452,24 @@ def process_json_to_scl(json_filepath): num_sympy_processed_this_pass += 1 except Exception as e: print( - f"ERROR(SymPy Base) al procesar {instr_type_original} UID {instr_uid}: {e}" + f"ERROR(SymPy Base) al procesar {instr_type_current} UID {instr_uid}: {e}" ) traceback.print_exc() instruction["scl"] = ( f"// ERROR en SymPy procesador base: {e}" ) - instruction["type"] = instr_type_original + "_error" - made_change_in_base_pass = True + # Añadir sufijo de error al tipo actual + instruction["type"] = instr_type_current + "_error" + made_change_in_base_pass = True # Se hizo un cambio (marcar como error) print( f" -> {num_sympy_processed_this_pass} instrucciones (no STL) procesadas con SymPy." ) + # --- FASE 2: Agrupación IF (Ignorando STL) --- if ( made_change_in_base_pass or passes == 1 - ): # Ejecutar siempre en el primer pase + ): # Ejecutar siempre en el primer pase o si hubo cambios print(f" Fase 2 (Agrupación IF con Simplificación):") num_grouped_this_pass = 0 # Resetear contador para el pase for network in data.get("networks", []): @@ -468,19 +478,30 @@ def process_json_to_scl(json_filepath): if network_lang == "STL": continue # Saltar STL network_logic = network.get("logic", []) - for instruction in network_logic: - try: - group_changed = process_group_ifs( - instruction, network_id, sympy_map, symbol_manager, data - ) - if group_changed: - made_change_in_group_pass = True - num_grouped_this_pass += 1 - except Exception as e: - print( - f"ERROR(GroupLoop) al intentar agrupar desde UID {instruction.get('instruction_uid')}: {e}" - ) - traceback.print_exc() + # Iterar en orden por UID puede ser más estable para agrupación + uids_in_network = sorted([instr.get("instruction_uid", "Z") for instr in network_logic if instr.get("instruction_uid")]) + for uid_to_process in uids_in_network: + instruction = next((instr for instr in network_logic if instr.get("instruction_uid") == uid_to_process), None) + if not instruction: continue + + # Saltar si ya está agrupada, es error, etc. + if instruction.get("grouped") or "_error" in instruction.get("type", ""): + continue + # La agrupación sólo aplica a instrucciones que generan condiciones booleanas + # y que ya fueron procesadas (tienen el sufijo) + if instruction.get("type", "").endswith(SCL_SUFFIX): + try: + group_changed = process_group_ifs( + instruction, network_id, sympy_map, symbol_manager, data + ) + if group_changed: + made_change_in_group_pass = True + num_grouped_this_pass += 1 + except Exception as e: + print( + f"ERROR(GroupLoop) al intentar agrupar desde UID {instruction.get('instruction_uid')}: {e}" + ) + traceback.print_exc() print( f" -> {num_grouped_this_pass} agrupaciones realizadas (en redes no STL)." ) @@ -503,14 +524,16 @@ def process_json_to_scl(json_filepath): # --- FIN BUCLE ITERATIVO --- # --- Verificación Final (Ajustada para RAW_STL_CHUNK) --- - print("\n--- Verificación Final de Instrucciones No Procesadas (FC/FB) ---") + print(f"\n--- Verificación Final de Instrucciones No Procesadas ({block_type}) ---") # <-- Mensaje actualizado unprocessed_count = 0 unprocessed_details = [] ignored_types = [ "raw_scl_chunk", "unsupported_lang", "raw_stl_chunk", - ] # Añadido raw_stl_chunk + "unsupported_content", # Añadido de x1 + "parsing_error", # Añadido de x1 + ] for network in data.get("networks", []): network_id = network.get("id", "Unknown ID") network_title = network.get("title", f"Network {network_id}") @@ -547,7 +570,7 @@ def process_json_to_scl(json_filepath): output_filename = json_filepath.replace( "_simplified.json", "_simplified_processed.json" ) - print(f"\nGuardando JSON procesado (FC/FB) en: {output_filename}") + print(f"\nGuardando JSON procesado ({block_type}) en: {output_filename}") # <-- Mensaje actualizado try: with open(output_filename, "w", encoding="utf-8") as f: json.dump(data, f, indent=4, ensure_ascii=False) @@ -557,7 +580,7 @@ def process_json_to_scl(json_filepath): traceback.print_exc() -# --- Ejecución (sin cambios) --- +# --- Ejecución (sin cambios en esta parte) --- if __name__ == "__main__": # Imports necesarios solo para la ejecución como script principal import argparse @@ -577,12 +600,10 @@ if __name__ == "__main__": source_xml_file = args.source_xml_filepath # Obtiene la ruta del XML original # Verificar si el archivo XML original existe (como referencia, útil para depuración) - # No es estrictamente necesario para la lógica aquí, pero ayuda a confirmar if not os.path.exists(source_xml_file): print( f"Advertencia (x2): Archivo XML original no encontrado: '{source_xml_file}', pero se intentará encontrar el JSON correspondiente." ) - # No salir necesariamente, pero es bueno saberlo. # Derivar nombre del archivo JSON de entrada (_simplified.json) xml_filename_base = os.path.splitext(os.path.basename(source_xml_file))[0] @@ -610,14 +631,13 @@ if __name__ == "__main__": sys.exit(1) # Salir si el archivo necesario no está else: # Llamar a la función principal de procesamiento del script - # Asumiendo que tu función principal se llama process_json_to_scl(input_json_path) try: process_json_to_scl(input_json_file) except Exception as e: print( f"Error Crítico (x2) durante el procesamiento de '{input_json_file}': {e}" ) - import traceback + import traceback # Asegurar que traceback está importado traceback.print_exc() - sys.exit(1) # Salir con error si la función principal falla + sys.exit(1) # Salir con error si la función principal falla \ No newline at end of file diff --git a/ToUpload/x3_generate_scl.py b/ToUpload/x3_generate_scl.py index e90d74e..e20b9d2 100644 --- a/ToUpload/x3_generate_scl.py +++ b/ToUpload/x3_generate_scl.py @@ -46,24 +46,23 @@ except ImportError: # para formatear valores iniciales def format_scl_start_value(value, datatype): """Formatea un valor para la inicialización SCL según el tipo.""" + # Add initial debug print + # print(f"DEBUG format_scl_start_value: value='{value}', datatype='{datatype}'") if value is None: - return None + return None # Retornar None si no hay valor datatype_lower = datatype.lower() if datatype else "" value_str = str(value) - if "bool" in datatype_lower: - return "TRUE" if value_str.lower() == "true" else "FALSE" - elif "string" in datatype_lower: - escaped_value = value_str.replace("'", "''") - if escaped_value.startswith("'") and escaped_value.endswith("'"): - escaped_value = escaped_value[1:-1] - return f"'{escaped_value}'" - elif "char" in datatype_lower: # Añadido Char - escaped_value = value_str.replace("'", "''") - if escaped_value.startswith("'") and escaped_value.endswith("'"): - escaped_value = escaped_value[1:-1] - return f"'{escaped_value}'" - elif any( + # Intentar quitar comillas si existen (para manejar "TRUE" vs TRUE) + if value_str.startswith('"') and value_str.endswith('"') and len(value_str) > 1: + value_str_unquoted = value_str[1:-1] + elif value_str.startswith("'") and value_str.endswith("'") and len(value_str) > 1: + value_str_unquoted = value_str[1:-1] + else: + value_str_unquoted = value_str + + # --- Integer-like types --- + if any( t in datatype_lower for t in [ "int", @@ -79,72 +78,169 @@ def format_scl_start_value(value, datatype): "udint", "ulint", ] - ): # Ampliado + ): try: - return str(int(value_str)) + # Intentar convertir el valor (sin comillas) a entero + return str(int(value_str_unquoted)) except ValueError: - if re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", value_str): - return value_str - return f"'{value_str}'" # O como string si no es entero ni símbolo + # Si no es un entero válido, podría ser una constante simbólica + if re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", value_str_unquoted): + return value_str_unquoted # Devolver como símbolo + + # --- Fallback for non-integer, non-symbol --- + print( + f"DEBUG format_scl_start_value: Fallback for int-like. value_str_unquoted='{repr(value_str_unquoted)}', datatype='{datatype}'" + ) # More debug + # MODIFIED FALLBACK: Escape newlines and use repr() for safety before formatting + try: + # Escape backslashes and single quotes properly for SCL string literal + escaped_for_scl = value_str_unquoted.replace("\\", "\\\\").replace( + "'", "''" + ) + # Remove potential newlines that break Python f-string; SCL strings usually don't span lines implicitly + escaped_for_scl = escaped_for_scl.replace("\n", "").replace("\r", "") + # Format as SCL string literal + formatted_scl_string = f"'{escaped_for_scl}'" + print( + f"DEBUG format_scl_start_value: Fallback result='{formatted_scl_string}'" + ) + return formatted_scl_string + except Exception as format_exc: + print( + f"ERROR format_scl_start_value: Exception during fallback formatting: {format_exc}" + ) + return f"'ERROR_FORMATTING_{value_str_unquoted[:20]}'" # Return an error string + + # --- Other types (Bool, Real, String, Char, Time, Date, etc.) --- + elif "bool" in datatype_lower: + # Comparar sin importar mayúsculas/minúsculas y sin comillas + return "TRUE" if value_str_unquoted.lower() == "true" else "FALSE" + elif "string" in datatype_lower: + # Usar el valor sin comillas originales y escapar las internas + escaped_value = value_str_unquoted.replace("'", "''") + return f"'{escaped_value}'" + elif "char" in datatype_lower: + # Usar el valor sin comillas originales y escapar las internas + escaped_value = value_str_unquoted.replace("'", "''") + # SCL usa comillas simples para Char. Asegurar que sea un solo caracter si es posible? + # Por ahora, solo formatear. Longitud se verifica en TIA. + return f"'{escaped_value}'" elif "real" in datatype_lower or "lreal" in datatype_lower: try: - f_val = float(value_str) + # Intentar convertir a float + f_val = float(value_str_unquoted) s_val = str(f_val) + # Asegurar que tenga punto decimal si es entero if "." not in s_val and "e" not in s_val.lower(): s_val += ".0" return s_val except ValueError: - if re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", value_str): - return value_str - return f"'{value_str}'" - elif "time" in datatype_lower: # Añadido Time, S5Time, LTime - # Quitar T#, LT#, S5T# si existen + # Podría ser constante simbólica + if re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", value_str_unquoted): + return value_str_unquoted + print( + f"Advertencia: Valor '{value_str}' no reconocido como real o símbolo para tipo {datatype}. Devolviendo como string." + ) + # Use the robust fallback formatting here too + escaped_for_scl = ( + value_str_unquoted.replace("\\", "\\\\") + .replace("'", "''") + .replace("\n", "") + .replace("\r", "") + ) + return f"'{escaped_for_scl}'" + elif "time" in datatype_lower: + # Quitar prefijos y añadir el correcto según el tipo específico prefix = "" - if value_str.upper().startswith("T#"): + val_to_use = value_str_unquoted # Usar valor sin comillas + if val_to_use.upper().startswith("T#"): prefix = "T#" - value_str = value_str[2:] - elif value_str.upper().startswith("LT#"): + val_to_use = val_to_use[2:] + elif val_to_use.upper().startswith("LT#"): prefix = "LT#" - value_str = value_str[3:] - elif value_str.upper().startswith("S5T#"): + val_to_use = val_to_use[3:] + elif val_to_use.upper().startswith("S5T#"): prefix = "S5T#" - value_str = value_str[4:] - # Devolver con el prefijo correcto o T# por defecto si no había - if prefix: - return f"{prefix}{value_str}" - elif "s5time" in datatype_lower: - return f"S5T#{value_str}" + val_to_use = val_to_use[4:] + + if "s5time" in datatype_lower: + return f"S5T#{val_to_use}" elif "ltime" in datatype_lower: - return f"LT#{value_str}" + return f"LT#{val_to_use}" else: - return f"T#{value_str}" # Default a TIME - elif "date" in datatype_lower: # Añadido Date, DT, TOD - if value_str.upper().startswith("D#"): - return value_str - elif "dt" in datatype_lower or "date_and_time" in datatype_lower: - if value_str.upper().startswith("DT#"): - return value_str - else: - return f"DT#{value_str}" # Añadir prefijo DT# + return f"T#{val_to_use}" # Default a TIME + elif "date" in datatype_lower: + val_to_use = value_str_unquoted + # Handle DTL first as it's longer + if "dtl" in datatype_lower or "date_and_time" in datatype_lower: + prefix = "DTL#" if val_to_use.upper().startswith("DTL#") else "DTL#" + val_to_use = ( + val_to_use[4:] if val_to_use.upper().startswith("DTL#") else val_to_use + ) + return f"{prefix}{val_to_use}" + elif "dt" in datatype_lower: + prefix = "DT#" if val_to_use.upper().startswith("DT#") else "DT#" + val_to_use = ( + val_to_use[3:] if val_to_use.upper().startswith("DT#") else val_to_use + ) + return f"{prefix}{val_to_use}" elif "tod" in datatype_lower or "time_of_day" in datatype_lower: - if value_str.upper().startswith("TOD#"): - return value_str - else: - return f"TOD#{value_str}" # Añadir prefijo TOD# - else: - return f"D#{value_str}" # Default a Date - # Fallback genérico + prefix = "TOD#" if val_to_use.upper().startswith("TOD#") else "TOD#" + val_to_use = ( + val_to_use[4:] if val_to_use.upper().startswith("TOD#") else val_to_use + ) + return f"{prefix}{val_to_use}" + else: # Default a Date D# + prefix = "D#" if val_to_use.upper().startswith("D#") else "D#" + val_to_use = ( + val_to_use[2:] if val_to_use.upper().startswith("D#") else val_to_use + ) + return f"{prefix}{val_to_use}" + + # --- Fallback for completely unknown types or complex structures --- else: + # Si es un nombre válido (posiblemente UDT, constante global, etc.), devolverlo tal cual + # Ajustar regex para permitir más caracteres si es necesario if re.match( - r'^[a-zA-Z_][a-zA-Z0-9_."#\[\]]+$', value_str - ): # Permitir más caracteres en símbolos/tipos - # Si es un UDT o Struct complejo, podría venir con comillas, quitarlas - if value_str.startswith('"') and value_str.endswith('"'): + r'^[a-zA-Z_#"][a-zA-Z0-9_."#\[\]%]+$', value_str + ): # Permitir % para accesos tipo %DB1.DBD0 + # Quitar comillas externas si es un UDT o struct complejo + if ( + value_str.startswith('"') + and value_str.endswith('"') + and len(value_str) > 1 + ): return value_str[1:-1] + # Mantener comillas si es acceso a DB ("DB_Name".Var) + if '"' in value_str and "." in value_str and value_str.count('"') == 2: + return value_str + # Si no tiene comillas y es un nombre simple o acceso #temp o %I0.0 etc + if not value_str.startswith('"') and not value_str.startswith("'"): + # Formatear nombres simples, pero dejar accesos % y # tal cual + if value_str.startswith("#") or value_str.startswith("%"): + return value_str + else: + # return format_variable_name(value_str) # Evitar formatear aquí, puede ser una constante + return value_str # Return as is if it looks symbolic + # Devolver el valor original si tiene comillas internas o estructura compleja no manejada arriba return value_str else: - escaped_value = value_str.replace("'", "''") - return f"'{escaped_value}'" + # Si no parece un nombre/símbolo/acceso, tratarlo como string (último recurso) + print( + f"DEBUG format_scl_start_value: Fallback final. value_str_unquoted='{repr(value_str_unquoted)}', datatype='{datatype}'" + ) + # Use the robust fallback formatting + escaped_for_scl = ( + value_str_unquoted.replace("\\", "\\\\") + .replace("'", "''") + .replace("\n", "") + .replace("\r", "") + ) + return f"'{escaped_for_scl}'" + + +# ... (generate_scl_declarations and generate_scl function remain the same as the previous version) ... +# --- (Incluye aquí las funciones generate_scl_declarations y generate_scl SIN CAMBIOS respecto a la respuesta anterior) --- # --- NUEVA FUNCIÓN RECURSIVA para generar declaraciones SCL (VAR/STRUCT/ARRAY) --- @@ -155,87 +251,132 @@ def generate_scl_declarations(variables, indent_level=1): for var in variables: var_name_scl = format_variable_name(var.get("name")) var_dtype_raw = var.get("datatype", "VARIANT") - # Limpiar comillas de tipos de datos UDT ("MyType" -> MyType) - var_dtype = ( - var_dtype_raw.strip('"') - if var_dtype_raw.startswith('"') and var_dtype_raw.endswith('"') - else var_dtype_raw - ) - var_comment = var.get("comment") start_value = var.get("start_value") children = var.get("children") # Para structs array_elements = var.get("array_elements") # Para arrays - # Manejar tipos de datos Array especiales - array_match = re.match(r"(Array\[.*\]\s+of\s+)(.*)", var_dtype, re.IGNORECASE) - base_type_for_init = var_dtype - declaration_dtype = var_dtype - if array_match: - array_prefix = array_match.group(1) - base_type_raw = array_match.group(2).strip() - # Limpiar comillas del tipo base del array - base_type_for_init = ( - base_type_raw.strip('"') - if base_type_raw.startswith('"') and base_type_raw.endswith('"') - else base_type_raw + # Limpiar comillas del tipo de dato si es UDT/String/etc. + var_dtype_cleaned = var_dtype_raw + if isinstance(var_dtype_raw, str): + if var_dtype_raw.startswith('"') and var_dtype_raw.endswith('"'): + var_dtype_cleaned = var_dtype_raw[1:-1] + # Manejar caso 'Array [...] of "MyUDT"' + array_match = re.match( + r'(Array\[.*\]\s+of\s+)"(.*)"', var_dtype_raw, re.IGNORECASE ) - declaration_dtype = ( - f'{array_prefix}"{base_type_for_init}"' - if '"' not in base_type_raw - else f"{array_prefix}{base_type_raw}" - ) # Reconstruir con comillas si es UDT + if array_match: + var_dtype_cleaned = f"{array_match.group(1)}{array_match.group(2)}" # Quitar comillas del tipo base - # Reconstruir declaración con comillas si es UDT y no array - elif ( - not array_match and var_dtype != base_type_for_init - ): # Es un tipo que necesita comillas (UDT) - declaration_dtype = f'"{var_dtype}"' + # Determinar tipo base para inicialización (importante para arrays) + base_type_for_init = var_dtype_cleaned + array_prefix_for_decl = "" + if var_dtype_cleaned.lower().startswith("array["): + match = re.match( + r"(Array\[.*\]\s+of\s+)(.*)", var_dtype_cleaned, re.IGNORECASE + ) + if match: + array_prefix_for_decl = match.group(1) + base_type_for_init = match.group(2).strip() + + # Construir tipo de dato para la declaración SCL + declaration_dtype = var_dtype_raw # Usar el raw por defecto + # Si es UDT o tipo complejo que requiere comillas y no es array simple + if base_type_for_init != var_dtype_cleaned and not array_prefix_for_decl: + # Poner comillas si no las tiene ya el tipo base + if not base_type_for_init.startswith('"'): + declaration_dtype = f'"{base_type_for_init}"' + else: + declaration_dtype = base_type_for_init # Ya tiene comillas + # Si es array de UDT/complejo, reconstruir con comillas en el tipo base + elif array_prefix_for_decl and base_type_for_init != var_dtype_cleaned: + if not base_type_for_init.startswith('"'): + declaration_dtype = f'{array_prefix_for_decl}"{base_type_for_init}"' + else: + declaration_dtype = f"{array_prefix_for_decl}{base_type_for_init}" declaration_line = f"{indent}{var_name_scl} : {declaration_dtype}" - init_value = None + init_value_scl = None # ---- Arrays ---- if array_elements: - # Ordenar índices (asumiendo que son numéricos) + # Ordenar índices (asumiendo que son numéricos '0', '1', ...) try: - sorted_indices = sorted(array_elements.keys(), key=int) + # Extraer números de los índices string + indices_numeric = {int(k): v for k, v in array_elements.items()} + sorted_indices = sorted(indices_numeric.keys()) + # Mapear de nuevo a string para buscar valor + sorted_indices_str = [str(k) for k in sorted_indices] except ValueError: - sorted_indices = sorted( - array_elements.keys() - ) # Fallback a orden alfabético + # Fallback a orden alfabético si los índices no son números + print( + f"Advertencia: Índices de array no numéricos para '{var_name_scl}'. Usando orden alfabético." + ) + sorted_indices_str = sorted(array_elements.keys()) - init_values = [ - format_scl_start_value(array_elements[idx], base_type_for_init) - for idx in sorted_indices - ] + init_values = [] + for idx_str in sorted_indices_str: + try: + formatted_val = format_scl_start_value( + array_elements[idx_str], base_type_for_init + ) + init_values.append(formatted_val) + except Exception as e_fmt: + print( + f"ERROR: Falló formateo para índice {idx_str} de array '{var_name_scl}'. Valor: {array_elements[idx_str]}. Error: {e_fmt}" + ) + init_values.append(f"/*ERR_FMT_{idx_str}*/") # Placeholder de error + + # Filtrar Nones que pueden venir de format_scl_start_value si el valor era None valid_inits = [v for v in init_values if v is not None] if valid_inits: - init_value = f"[{', '.join(valid_inits)}]" + # Si todos los valores son iguales y es un array grande, podríamos usar notación x(value) + # Simplificación: por ahora, listar todos + init_value_scl = f"[{', '.join(valid_inits)}]" + elif array_elements: # Si había elementos pero todos formatearon a None + print( + f"Advertencia: Todos los valores iniciales para array '{var_name_scl}' son None o inválidos." + ) # ---- Structs ---- elif children: - # No añadir comentario // Struct aquí, es redundante - scl_lines.append(declaration_line) # Añadir línea de declaración base + # El valor inicial de un struct se maneja recursivamente dentro + # Añadir comentario? Puede ser redundante. + scl_lines.append( + declaration_line + ) # Añadir línea de declaración base STRUCT scl_lines.append(f"{indent}STRUCT") + # Llamada recursiva para los miembros internos scl_lines.extend(generate_scl_declarations(children, indent_level + 1)) scl_lines.append(f"{indent}END_STRUCT;") - if var_comment: + if var_comment: # Comentario después de END_STRUCT scl_lines.append(f"{indent}// {var_comment}") - scl_lines.append("") # Línea extra - continue # Saltar resto para Struct + scl_lines.append("") # Línea extra para legibilidad + continue # Saltar el resto de la lógica para este struct # ---- Tipos Simples ---- else: if start_value is not None: - init_value = format_scl_start_value(start_value, var_dtype) + try: + init_value_scl = format_scl_start_value( + start_value, base_type_for_init + ) # Usar tipo base + except Exception as e_fmt_simple: + print( + f"ERROR: Falló formateo para valor simple de '{var_name_scl}'. Valor: {start_value}. Error: {e_fmt_simple}" + ) + init_value_scl = f"/*ERR_FMT_SIMPLE*/" # Placeholder + + # Añadir inicialización si existe y no es None + if init_value_scl is not None: + declaration_line += f" := {init_value_scl}" - # Añadir inicialización si existe - if init_value: - declaration_line += f" := {init_value}" declaration_line += ";" + + # Añadir comentario si existe if var_comment: declaration_line += f" // {var_comment}" + scl_lines.append(declaration_line) return scl_lines @@ -243,7 +384,7 @@ def generate_scl_declarations(variables, indent_level=1): # --- Función Principal de Generación SCL --- def generate_scl(processed_json_filepath, output_scl_filepath): - """Genera un archivo SCL a partir del JSON procesado (FC/FB o DB).""" + """Genera un archivo SCL a partir del JSON procesado (FC/FB/OB o DB).""" # Actualizado if not os.path.exists(processed_json_filepath): print( @@ -263,33 +404,41 @@ def generate_scl(processed_json_filepath, output_scl_filepath): # --- Extracción de Información del Bloque (Común) --- block_name = data.get("block_name", "UnknownBlock") block_number = data.get("block_number") - block_lang_original = data.get("language", "Unknown") # Será "DB" para Data Blocks - block_type = data.get("block_type", "Unknown") # FC, FB, GlobalDB + # block_lang_original = data.get("language", "Unknown") # Lenguaje original (SCL, LAD, DB...) + block_type = data.get( + "block_type", "Unknown" + ) # Tipo de bloque (FC, FB, GlobalDB, OB) <-- Usar este block_comment = data.get("block_comment", "") scl_block_name = format_variable_name(block_name) # Nombre SCL seguro print( - f"Generando SCL para: {block_type} '{scl_block_name}' (Original: {block_name}, Lang: {block_lang_original})" + f"Generando SCL para: {block_type} '{scl_block_name}' (Original: {block_name})" # Quitado lenguaje original del log ) scl_output = [] - # --- GENERACIÓN PARA DATA BLOCK (DB) --- - if block_lang_original == "DB": + # --- MODIFICADO: GENERACIÓN PARA DATA BLOCK (GlobalDB) --- + if block_type == "GlobalDB": # <-- Comprobar tipo de bloque print("Modo de generación: DATA_BLOCK") scl_output.append(f"// Block Type: {block_type}") scl_output.append(f"// Block Name (Original): {block_name}") if block_number: scl_output.append(f"// Block Number: {block_number}") if block_comment: - scl_output.append(f"// Block Comment: {block_comment}") + # Dividir comentarios largos en múltiples líneas + comment_lines = block_comment.splitlines() + scl_output.append(f"// Block Comment:") + for line in comment_lines: + scl_output.append(f"// {line}") scl_output.append("") scl_output.append(f'DATA_BLOCK "{scl_block_name}"') scl_output.append("{ S7_Optimized_Access := 'TRUE' }") # Asumir optimizado scl_output.append("VERSION : 0.1") scl_output.append("") interface_data = data.get("interface", {}) + # En DBs, la sección relevante suele ser 'Static' static_vars = interface_data.get("Static", []) if static_vars: scl_output.append("VAR") + # Usar la función recursiva para generar declaraciones scl_output.extend(generate_scl_declarations(static_vars, indent_level=1)) scl_output.append("END_VAR") scl_output.append("") @@ -297,182 +446,288 @@ def generate_scl(processed_json_filepath, output_scl_filepath): print( "Advertencia: No se encontró sección 'Static' o está vacía en la interfaz del DB." ) + # Añadir bloque VAR vacío si no hay variables scl_output.append("VAR") scl_output.append("END_VAR") scl_output.append("") scl_output.append("BEGIN") - scl_output.append("") + scl_output.append( + " // Los Data Blocks no tienen código ejecutable en BEGIN/END" + ) scl_output.append("END_DATA_BLOCK") - # --- GENERACIÓN PARA FUNCTION BLOCK / FUNCTION (FC/FB) --- + # --- MODIFICADO: GENERACIÓN PARA FC/FB/OB --- else: - print("Modo de generación: FUNCTION_BLOCK / FUNCTION") - scl_block_keyword = "FUNCTION_BLOCK" if block_type == "FB" else "FUNCTION" + # Determinar palabra clave SCL + scl_block_keyword = "FUNCTION_BLOCK" # Default + if block_type == "FC": + scl_block_keyword = "FUNCTION" + elif block_type == "OB": + scl_block_keyword = "ORGANIZATION_BLOCK" + elif block_type == "FB": + scl_block_keyword = "FUNCTION_BLOCK" + else: # Fallback + print( + f"Advertencia: Tipo de bloque desconocido '{block_type}', usando FUNCTION_BLOCK." + ) + scl_block_keyword = "FUNCTION_BLOCK" # O quizás lanzar error? + + print(f"Modo de generación: {scl_block_keyword}") + # Cabecera del Bloque scl_output.append(f"// Block Type: {block_type}") scl_output.append(f"// Block Name (Original): {block_name}") if block_number: scl_output.append(f"// Block Number: {block_number}") - scl_output.append(f"// Original Language: {block_lang_original}") + # Indicar lenguaje original de las redes si es relevante + original_net_langs = set( + n.get("language", "Unknown") for n in data.get("networks", []) + ) + scl_output.append( + f"// Original Network Languages: {', '.join(l for l in original_net_langs if l != 'Unknown')}" + ) if block_comment: - scl_output.append(f"// Block Comment: {block_comment}") + comment_lines = block_comment.splitlines() + scl_output.append(f"// Block Comment:") + for line in comment_lines: + scl_output.append(f"// {line}") scl_output.append("") - # Manejar tipo de retorno para FUNCTION + + # Manejar tipo de retorno para FUNCTION (FC) return_type = "Void" # Default interface_data = data.get("interface", {}) if scl_block_keyword == "FUNCTION" and interface_data.get("Return"): - return_member = interface_data["Return"][ - 0 - ] # Asumir un solo valor de retorno + # Asumir un solo valor de retorno + return_member = interface_data["Return"][0] return_type_raw = return_member.get("datatype", "Void") + # Limpiar comillas si es UDT/String return_type = ( - return_type_raw.strip('"') - if return_type_raw.startswith('"') and return_type_raw.endswith('"') + return_type_raw[1:-1] + if isinstance(return_type_raw, str) + and return_type_raw.startswith('"') + and return_type_raw.endswith('"') else return_type_raw ) - # Añadir comillas si es UDT - if return_type != return_type_raw: + # Añadir comillas si es UDT y no las tenía + if ( + return_type != return_type_raw + and not return_type_raw.lower().startswith("array") + ): return_type = f'"{return_type}"' + else: # Mantener raw si es tipo básico o ya tenía comillas + return_type = return_type_raw - scl_output.append( - f'{scl_block_keyword} "{scl_block_name}" : {return_type}' - if scl_block_keyword == "FUNCTION" - else f'{scl_block_keyword} "{scl_block_name}"' - ) - scl_output.append("{ S7_Optimized_Access := 'TRUE' }") + # Línea de declaración del bloque + if scl_block_keyword == "FUNCTION": + scl_output.append(f'{scl_block_keyword} "{scl_block_name}" : {return_type}') + else: # FB y OB + scl_output.append(f'{scl_block_keyword} "{scl_block_name}"') + + # Atributos y versión + scl_output.append("{ S7_Optimized_Access := 'TRUE' }") # Asumir optimizado scl_output.append("VERSION : 0.1") scl_output.append("") - # Declaraciones de Interfaz FC/FB - section_order = [ - "Input", - "Output", - "InOut", - "Static", - "Temp", - "Constant", - ] # Return ya está en cabecera - declared_temps = set() + # Declaraciones de Interfaz (Input, Output, InOut, Static, Temp, Constant) + # Orden estándar SCL + section_order = ["Input", "Output", "InOut", "Static", "Temp", "Constant"] + declared_temps = set() # Para rastrear temps ya declaradas + has_declarations = False + for section_name in section_order: vars_in_section = interface_data.get(section_name, []) if vars_in_section: + has_declarations = True + # Mapeo de nombres de sección JSON a palabras clave SCL VAR_ scl_section_keyword = f"VAR_{section_name.upper()}" if section_name == "Static": - scl_section_keyword = "VAR_STAT" + scl_section_keyword = "VAR_STAT" # Para FBs if section_name == "Temp": scl_section_keyword = "VAR_TEMP" if section_name == "Constant": - scl_section_keyword = "CONSTANT" + scl_section_keyword = "CONSTANT" # CONSTANT no usa VAR_ + scl_output.append(scl_section_keyword) + # Usar la función recursiva para generar declaraciones scl_output.extend( generate_scl_declarations(vars_in_section, indent_level=1) ) + # Añadir END_VAR (o END_CONSTANT) + scl_output.append( + "END_VAR" if section_name != "Constant" else "END_CONSTANT" + ) + scl_output.append("") # Línea en blanco + + # Guardar nombres de Temp declarados explícitamente if section_name == "Temp": declared_temps.update( format_variable_name(v.get("name")) for v in vars_in_section if v.get("name") ) - scl_output.append("END_VAR") - scl_output.append("") - - # Declaraciones VAR_TEMP adicionales detectadas - temp_vars = set() + # Declaraciones VAR_TEMP adicionales (auto-detectadas) + # Buscar variables que empiecen con #_temp_ en el SCL generado + temp_vars_detected = set() + # Patrón para encontrar #variable o "#variable" temp_pattern = re.compile( - r'"?#(_temp_[a-zA-Z0-9_]+)"?|"?(_temp_[a-zA-Z0-9_]+)"?' - ) + r'"?(#\w+)"?' + ) # Busca # seguido de caracteres alfanuméricos + for network in data.get("networks", []): for instruction in network.get("logic", []): + # Revisar el SCL final y el SCL de actualización de memoria si existe scl_code = instruction.get("scl", "") - edge_update_code = instruction.get("_edge_mem_update_scl", "") + edge_update_code = instruction.get( + "_edge_mem_update_scl", "" + ) # Para flancos code_to_scan = ( (scl_code if scl_code else "") + "\n" + (edge_update_code if edge_update_code else "") ) + if code_to_scan: + # Usar findall para encontrar todas las ocurrencias found_temps = temp_pattern.findall(code_to_scan) - for temp_tuple in found_temps: - temp_name = next((t for t in temp_tuple if t), None) + for temp_name in found_temps: + # findall devuelve el grupo capturado (#...) if temp_name: - temp_vars.add( - "#" + temp_name - if not temp_name.startswith("#") - else temp_name - ) - additional_temps = sorted(list(temp_vars - declared_temps)) + temp_vars_detected.add(temp_name) + + # Filtrar las que ya estaban declaradas + additional_temps = sorted(list(temp_vars_detected - declared_temps)) + if additional_temps: - if not interface_data.get("Temp"): + print(f"INFO: Detectadas {len(additional_temps)} VAR_TEMP adicionales.") + # Si no se declaró la sección Temp antes, añadirla ahora + if "Temp" not in interface_data or not interface_data["Temp"]: scl_output.append("VAR_TEMP") - for var_name in additional_temps: - scl_name = format_variable_name(var_name) - inferred_type = "Bool" # Asumir Bool + + for temp_name in additional_temps: + # Formatear por si acaso, aunque el patrón ya debería dar #nombre + scl_name = format_variable_name(temp_name) + # Inferir tipo (Bool es lo más común para temporales internos) + # Se podría mejorar si el nombre da pistas (ej. _temp_r para Real) + inferred_type = "Bool" # Asumir Bool por defecto scl_output.append( f" {scl_name} : {inferred_type}; // Auto-generated temporary" ) - if not interface_data.get("Temp"): + + # Si abrimos la sección aquí, cerrarla + if "Temp" not in interface_data or not interface_data["Temp"]: scl_output.append("END_VAR") scl_output.append("") - # Cuerpo del Bloque FC/FB + # --- Cuerpo del Bloque (BEGIN...END) --- scl_output.append("BEGIN") scl_output.append("") - # Iterar por redes y lógica (como antes, incluyendo manejo STL Markdown) + # Iterar por redes y lógica (incluyendo manejo STL/SCL crudo) for i, network in enumerate(data.get("networks", [])): - network_title = network.get("title", f'Network {network.get("id")}') + network_title = network.get( + "title", f'Network {network.get("id", i+1)}' + ) # Usar i+1 si falta ID network_comment = network.get("comment", "") - network_lang = network.get("language", "LAD") + network_lang = network.get("language", "LAD") # Lenguaje original de la red scl_output.append( f" // Network {i+1}: {network_title} (Original Language: {network_lang})" ) if network_comment: + # Indentar comentarios de red for line in network_comment.splitlines(): - scl_output.append(f" // {line}") - scl_output.append("") + scl_output.append(f" // {line}") + scl_output.append("") # Línea en blanco antes del código de red + network_has_code = False + logic_in_network = network.get("logic", []) + + if not logic_in_network: + scl_output.append(f" // Network {i+1} has no logic elements.") + scl_output.append("") + continue + + # --- Manejo Especial Redes STL --- if network_lang == "STL": - network_has_code = True - if ( - network.get("logic") - and network["logic"][0].get("type") == "RAW_STL_CHUNK" - ): - raw_stl_code = network["logic"][0].get( + # Asumir que la lógica STL está en el primer elemento como RAW_STL_CHUNK + if logic_in_network[0].get("type") == "RAW_STL_CHUNK": + network_has_code = True + raw_stl_code = logic_in_network[0].get( "stl", "// ERROR: STL code missing" ) - scl_output.append(f" {'//'} ```STL") + # Incrustar STL como comentario multi-línea o delimitado + scl_output.append(f" // --- BEGIN STL Network {i+1} ---") + # Comentar cada línea STL for stl_line in raw_stl_code.splitlines(): - scl_output.append(f" {stl_line}") - scl_output.append(f" {'//'} ```") + scl_output.append(f" // {stl_line}") + scl_output.append(f" // --- END STL Network {i+1} ---") + scl_output.append("") # Línea en blanco después else: - scl_output.append(" // ERROR: Contenido STL inesperado.") - else: # LAD, FBD, SCL, etc. - for instruction in network.get("logic", []): + scl_output.append( + f" // ERROR: Contenido STL inesperado en Network {i+1}." + ) + scl_output.append("") + + # --- Manejo Redes SCL/LAD/FBD procesadas --- + else: + # Iterar por las instrucciones procesadas + for instruction in logic_in_network: instruction_type = instruction.get("type", "") scl_code = instruction.get("scl", "") is_grouped = instruction.get("grouped", False) + + # Saltar instrucciones agrupadas (su lógica está en el IF) if is_grouped: continue + + # Incluir SCL si la instrucción fue procesada o es un chunk crudo/error/placeholder if ( instruction_type.endswith(SCL_SUFFIX) - or instruction_type in ["RAW_SCL_CHUNK", "UNSUPPORTED_LANG"] + or instruction_type + in [ + "RAW_SCL_CHUNK", + "UNSUPPORTED_LANG", + "UNSUPPORTED_CONTENT", + "PARSING_ERROR", + ] + or "_error" in instruction_type # Incluir errores comentados ) and scl_code: + + # Comprobar si el SCL es solo un comentario (a menos que sea un bloque IF) is_only_comment = all( line.strip().startswith("//") for line in scl_code.splitlines() if line.strip() ) is_if_block = scl_code.strip().startswith("IF") - if not is_only_comment or is_if_block: + + # Añadir el SCL indentado si no es solo un comentario (o si es un IF/Error) + if ( + not is_only_comment + or is_if_block + or "_error" in instruction_type + or instruction_type + in [ + "UNSUPPORTED_LANG", + "UNSUPPORTED_CONTENT", + "PARSING_ERROR", + ] + ): network_has_code = True for line in scl_code.splitlines(): - scl_output.append(f" {line}") - if network_has_code: + scl_output.append(f" {line}") # Indentar código + # Añadir línea en blanco después de cada bloque SCL para legibilidad + scl_output.append("") + + # Si la red no produjo código SCL imprimible (ej. solo lógica interna) + if ( + not network_has_code and network_lang != "STL" + ): # No añadir para STL ya comentado + scl_output.append( + f" // Network {i+1} did not produce printable SCL code." + ) scl_output.append("") - else: - scl_output.append(f" // Network did not produce printable SCL code.") - scl_output.append("") - # Fin del bloque FC/FB - scl_output.append(f"END_{scl_block_keyword}") + + # Fin del bloque FC/FB/OB + scl_output.append(f"END_{scl_block_keyword}") # <-- Usar keyword determinada # --- Escritura del Archivo SCL (Común) --- print(f"Escribiendo archivo SCL en: {output_scl_filepath}") @@ -492,7 +747,7 @@ if __name__ == "__main__": import argparse import os import sys - import traceback # Asegurarse que traceback está importado si se usa en generate_scl + import traceback # Asegurarse que traceback está importado # Configurar ArgumentParser para recibir la ruta del XML original obligatoria parser = argparse.ArgumentParser( @@ -511,7 +766,6 @@ if __name__ == "__main__": print( f"Advertencia (x3): Archivo XML original no encontrado: '{source_xml_file}', pero se intentará encontrar el JSON procesado." ) - # No salir necesariamente. # Derivar nombres de archivos de entrada (JSON procesado) y salida (SCL) xml_filename_base = os.path.splitext(os.path.basename(source_xml_file))[0] @@ -521,8 +775,9 @@ if __name__ == "__main__": input_json_file = os.path.join( base_dir, f"{xml_filename_base}_simplified_processed.json" ) + # Cambiar extensión de salida a .scl output_scl_file = os.path.join( - base_dir, f"{xml_filename_base}_simplified_processed.scl" + base_dir, f"{xml_filename_base}_generated.scl" # Cambiado nombre de salida ) print( @@ -540,13 +795,13 @@ if __name__ == "__main__": sys.exit(1) # Salir si el archivo necesario no está else: # Llamar a la función principal de generación SCL del script - # Asumiendo que tu función principal se llama generate_scl(input_json_path, output_scl_path) try: generate_scl(input_json_file, output_scl_file) + sys.exit(0) # Salir con éxito explícitamente except Exception as e: print( f"Error Crítico (x3) durante la generación de SCL desde '{input_json_file}': {e}" ) - # traceback ya debería estar importado si generate_scl lo necesita + # traceback ya debería estar importado traceback.print_exc() sys.exit(1) # Salir con error si la función principal falla diff --git a/create_processor_files.py b/create_processor_files.py deleted file mode 100644 index 93fb2a6..0000000 --- a/create_processor_files.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -import os -import sys -import re -import argparse - -# Directorio donde se crearán los archivos de procesador -PROCESSORS_DIR = "processors" - -# Cabecera estándar para añadir a cada nuevo archivo -FILE_HEADER = """# -*- coding: utf-8 -*- - -# TODO: Import necessary functions from processor_utils -# Example: from .processor_utils import get_scl_representation, format_variable_name -# Or: import processors.processor_utils as utils - -# TODO: Define constants if needed (e.g., SCL_SUFFIX) or import them -SCL_SUFFIX = "_scl" - -# --- Function code starts --- -""" - -# Pie de página estándar con la función get_processor_info de plantilla -def get_file_footer(func_name): - """Generates the standard footer with a placeholder get_processor_info.""" - type_name_guess = func_name.replace('process_', '') - return f""" -# --- Function code ends --- - -# --- Processor Information Function --- -def get_processor_info(): - \"\"\"Returns the type name and processing function for this module.\"\"\" - # TODO: Adjust the type_name if needed (e.g., call, edge_detector, comparison, math). - # TODO: Return a list if this module handles multiple types (e.g., PBox/NBox, FC/FB). - type_name = "{type_name_guess}" # Basic guess - return {{'type_name': type_name, 'processor_func': {func_name}}} -""" - -def extract_and_create_processors(source_py_file): - """ - Extracts top-level functions starting with 'process_' from the source file - and creates individual processor files in the PROCESSORS_DIR, copying - the entire function body until the next top-level definition. - """ - if not os.path.exists(source_py_file): - print(f"Error: Source file not found: '{source_py_file}'") - return - - print(f"Reading source file: '{source_py_file}'") - try: - with open(source_py_file, 'r', encoding='utf-8') as f: - lines = f.readlines() - except Exception as e: - print(f"Error reading source file: {e}") - return - - os.makedirs(PROCESSORS_DIR, exist_ok=True) - print(f"Ensuring '{PROCESSORS_DIR}' directory exists.") - print("Searching for processor functions (def process_...):") - - processor_functions = [] # Store tuples of (name, start_line_index, end_line_index) - current_func_start = -1 - current_func_name = None - - # Pattern to find ANY top-level function definition - any_func_def_pattern = re.compile(r"^def\s+(\w+)\s*\(") - # Pattern specific to processor functions - process_func_def_pattern = re.compile(r"^def\s+(process_\w+)\s*\(") - - # First pass: Identify start and end lines of all top-level functions - for i, line in enumerate(lines): - match = any_func_def_pattern.match(line) - if match: - # Found a new top-level function definition - if current_func_name is not None: - # Mark the end of the *previous* function - # Only add if it was a 'process_' function - if current_func_name.startswith("process_"): - processor_functions.append((current_func_name, current_func_start, i)) - - # Start tracking the new function - current_func_name = match.group(1) - current_func_start = i - - # Add the last function found in the file (if it was a process_ function) - if current_func_name is not None and current_func_name.startswith("process_"): - processor_functions.append((current_func_name, current_func_start, len(lines))) - - # Second pass: Create files using the identified line ranges - processor_count = 0 - if not processor_functions: - print("\nWarning: No functions starting with 'process_' found at the top level.") - return - - print(f"Found {len(processor_functions)} potential processor functions.") - - for func_name, start_idx, end_idx in processor_functions: - print(f" - Processing: {func_name} (lines {start_idx+1}-{end_idx})") - func_lines = lines[start_idx:end_idx] # Extract lines for this function - # Remove trailing blank lines from the extracted block, often happens before next def - while func_lines and func_lines[-1].strip() == "": - func_lines.pop() - - create_processor_file(func_name, func_lines) - processor_count += 1 - - print(f"\nFinished processing. Attempted to create/check {processor_count} processor files in '{PROCESSORS_DIR}'.") - - -def create_processor_file(func_name, func_lines): - """Creates the individual processor file if it doesn't exist.""" - target_filename = f"{func_name}.py" - target_filepath = os.path.join(PROCESSORS_DIR, target_filename) - - if os.path.exists(target_filepath): - print(f" * Skipping: '{target_filename}' already exists.") - return - - print(f" * Creating: '{target_filename}'...") - try: - with open(target_filepath, 'w', encoding='utf-8') as f: - f.write(FILE_HEADER) - # Write the function lines, ensuring consistent newline endings - for line in func_lines: - f.write(line.rstrip() + '\n') - f.write(get_file_footer(func_name)) - except Exception as e: - print(f" Error writing file '{target_filename}': {e}") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Extracts 'process_*' functions from a source Python file " - "and creates individual processor files." - ) - parser.add_argument( - "source_file", - default="x2_process.py", # Valor por defecto - nargs='?', # Hacerlo opcional para que use el default - help="Path to the source Python file (default: x2_process.py)" - ) - args = parser.parse_args() - - extract_and_create_processors(args.source_file) \ No newline at end of file diff --git a/parsers/__init__.py b/parsers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/parsers/parse_lad_fbd.py b/parsers/parse_lad_fbd.py new file mode 100644 index 0000000..f3e7ad9 --- /dev/null +++ b/parsers/parse_lad_fbd.py @@ -0,0 +1,548 @@ +# ToUpload/parsers/parse_lad_fbd.py +# -*- coding: utf-8 -*- +from lxml import etree +from collections import defaultdict +import copy +import traceback + +# Importar desde las utilidades del parser +from .parser_utils import ( + ns, + parse_access, + parse_part, + parse_call, + get_multilingual_text, +) + +# Sufijo usado en x2 para identificar instrucciones procesadas (útil para EN/ENO) +SCL_SUFFIX = "_sympy_processed" # Asumimos que este es el sufijo de x2 + + +def parse_lad_fbd_network(network_element): + """ + Parsea una red LAD/FBD/GRAPH, extrae lógica y añade conexiones EN/ENO implícitas. + Devuelve un diccionario representando la red para el JSON. + """ + if network_element is None: + return { + "id": "ERROR", + "title": "Invalid Network Element", + "logic": [], + "error": "Input element was None", + } + + network_id = network_element.get("ID") + # Usar get_multilingual_text de utils + title_element = network_element.xpath( + ".//iface:MultilingualText[@CompositionName='Title']", namespaces=ns + ) + network_title = ( + get_multilingual_text(title_element[0]) + if title_element + else f"Network {network_id}" + ) + comment_element = network_element.xpath( + "./ObjectList/MultilingualText[@CompositionName='Comment']", namespaces=ns + ) # OJO: Path relativo a CompileUnit? + if not comment_element: # Intentar path alternativo si el anterior falla + comment_element = network_element.xpath( + ".//MultilingualText[@CompositionName='Comment']", namespaces=ns + ) # Más genérico dentro de la red + network_comment = ( + get_multilingual_text(comment_element[0]) if comment_element else "" + ) + + # --- Determinar Lenguaje (ya que este parser maneja varios) --- + network_lang = "Unknown" + attr_list_net = network_element.xpath("./AttributeList") + if attr_list_net: + lang_node_net = attr_list_net[0].xpath("./ProgrammingLanguage/text()") + if lang_node_net: + network_lang = lang_node_net[0].strip() + + # --- Buscar FlgNet --- + # Buscar NetworkSource y luego FlgNet (ambos usan namespace flg) + network_source_node = network_element.xpath(".//flg:NetworkSource", namespaces=ns) + flgnet = None + if network_source_node: + flgnet_list = network_source_node[0].xpath("./flg:FlgNet", namespaces=ns) + if flgnet_list: + flgnet = flgnet_list[0] + else: # Intentar buscar FlgNet directamente si no hay NetworkSource + flgnet_list = network_element.xpath(".//flg:FlgNet", namespaces=ns) + if flgnet_list: + flgnet = flgnet_list[0] + + if flgnet is None: + return { + "id": network_id, + "title": network_title, + "comment": network_comment, + "language": network_lang, + "logic": [], + "error": "FlgNet not found inside NetworkSource or CompileUnit", + } + + # 1. Parse Access, Parts, Calls (usan utils) + access_map = {} + # Corregir XPath para buscar Access dentro de FlgNet/Parts + for acc in flgnet.xpath(".//flg:Parts/flg:Access", namespaces=ns): + acc_info = parse_access(acc) + if acc_info and acc_info.get("uid") and "error" not in acc_info.get("type", ""): + access_map[acc_info["uid"]] = acc_info + elif acc_info: + print( + f"Advertencia: Ignorando Access inválido o con error UID={acc_info.get('uid')} en red {network_id}" + ) + + parts_and_calls_map = {} + # Corregir XPath para buscar Part y Call dentro de FlgNet/Parts + instruction_elements = flgnet.xpath( + ".//flg:Parts/flg:Part | .//flg:Parts/flg:Call", namespaces=ns + ) + for element in instruction_elements: + parsed_info = None + tag_name = etree.QName(element.tag).localname + if tag_name == "Part": + parsed_info = parse_part(element) # Usa utils + elif tag_name == "Call": + parsed_info = parse_call(element) # Usa utils + + if ( + parsed_info + and parsed_info.get("uid") + and "error" not in parsed_info.get("type", "") + ): + parts_and_calls_map[parsed_info["uid"]] = parsed_info + elif parsed_info: + # Si parse_call/parse_part devolvió error, lo guardamos para tener el UID + print( + f"Advertencia: {tag_name} con error UID={parsed_info.get('uid')} en red {network_id}. Error: {parsed_info.get('error')}" + ) + parts_and_calls_map[parsed_info["uid"]] = ( + parsed_info # Guardar aunque tenga error + ) + + # 2. Parse Wires (lógica compleja, mantener aquí) + wire_connections = defaultdict(list) # destination -> [source1, source2] + source_connections = defaultdict(list) # source -> [dest1, dest2] + eno_outputs = defaultdict(list) + qname_powerrail = etree.QName(ns["flg"], "Powerrail") + qname_identcon = etree.QName( + ns["flg"], "IdentCon" + ) # Conexión a/desde Access (variable/constante) + qname_namecon = etree.QName( + ns["flg"], "NameCon" + ) # Conexión a/desde Part/Call (pin con nombre) + qname_openbranch = etree.QName( + ns["flg"], "Openbranch" + ) # Rama abierta (normalmente ignorada o tratada como TRUE?) + qname_opencon = etree.QName( + ns["flg"], "OpenCon" + ) # Conexión abierta (pin no conectado) + + # Corregir XPath para buscar Wire dentro de FlgNet/Wires + for wire in flgnet.xpath(".//flg:Wires/flg:Wire", namespaces=ns): + children = wire.getchildren() + if len(children) < 2: + continue # Necesita al menos origen y destino + + source_elem = children[0] + source_uid, source_pin = None, None + + # Determinar origen + if source_elem.tag == qname_powerrail: + source_uid, source_pin = "POWERRAIL", "out" + elif source_elem.tag == qname_identcon: # Origen es una variable/constante + source_uid = source_elem.get("UId") + source_pin = "value" # Salida implícita de un Access + elif source_elem.tag == qname_namecon: # Origen es pin de instrucción + source_uid = source_elem.get("UId") + source_pin = source_elem.get("Name") + elif source_elem.tag == qname_openbranch: + # ¿Cómo manejar OpenBranch como fuente? Podría ser TRUE o una condición OR implícita + source_uid = "OPENBRANCH_" + wire.get( + "UId", "Unknown" + ) # UID único para la rama + source_pin = "out" + print( + f"Advertencia: OpenBranch encontrado como fuente en Wire UID={wire.get('UId')} (Red {network_id}). Tratando como fuente especial." + ) + # No lo añadimos a parts_and_calls_map, get_sympy_representation necesitará manejarlo + # Ignorar OpenCon como fuente (no tiene sentido) + if source_uid is None or source_pin is None: + # print(f"Advertencia: Fuente de wire inválida o no soportada: {source_elem.tag} en Wire UID={wire.get('UId')}") + continue + + source_info = (source_uid, source_pin) + + # Procesar destinos + for dest_elem in children[1:]: + dest_uid, dest_pin = None, None + + if ( + dest_elem.tag == qname_identcon + ): # Destino es una variable/constante (asignación) + dest_uid = dest_elem.get("UId") + dest_pin = "value" # Entrada implícita de un Access + elif dest_elem.tag == qname_namecon: # Destino es pin de instrucción + dest_uid = dest_elem.get("UId") + dest_pin = dest_elem.get("Name") + # Ignorar Powerrail, OpenBranch, OpenCon como destinos válidos de conexión lógica principal + + if dest_uid is not None and dest_pin is not None: + dest_key = (dest_uid, dest_pin) + if source_info not in wire_connections[dest_key]: + wire_connections[dest_key].append(source_info) + + # Mapa inverso: source -> list of destinations + source_key = (source_uid, source_pin) + dest_info = (dest_uid, dest_pin) + if dest_info not in source_connections[source_key]: + source_connections[source_key].append(dest_info) + + # Trackear salidas ENO específicamente si la fuente es una instrucción + if source_pin == "eno" and source_uid in parts_and_calls_map: + if dest_info not in eno_outputs[source_uid]: + eno_outputs[source_uid].append(dest_info) + + # 3. Build Initial Logic Structure (incorporando errores) + all_logic_steps = {} + # Lista de tipos funcionales (usados para inferencia EN) + # Estos son los tipos *originales* de las instrucciones + functional_block_types = [ + "Move", + "Add", + "Sub", + "Mul", + "Div", + "Mod", + "Convert", + "Call", # Call ya está aquí + "TON", + "TOF", + "TP", + "CTU", + "CTD", + "CTUD", + "BLKMOV", # Añadidos + "Se", + "Sd", # Estos son tipos LAD que se mapearán a timers SCL + ] + # Lista de generadores RLO (usados para inferencia EN) + rlo_generators = [ + "Contact", + "O", + "Eq", + "Ne", + "Gt", + "Lt", + "Ge", + "Le", + "And", + "Xor", + "PBox", + "NBox", + "Not", + ] + + # Iterar sobre UIDs válidos (los que se pudieron parsear, aunque sea con error) + valid_instruction_uids = list(parts_and_calls_map.keys()) + + for instruction_uid in valid_instruction_uids: + instruction_info = parts_and_calls_map[instruction_uid] + # Hacer copia profunda para no modificar el mapa original + instruction_repr = copy.deepcopy(instruction_info) + instruction_repr["instruction_uid"] = instruction_uid # Asegurar UID + instruction_repr["inputs"] = {} + instruction_repr["outputs"] = {} + + # Si la instrucción ya tuvo un error de parseo, añadirlo aquí + if "error" in instruction_info: + instruction_repr["parsing_error"] = instruction_info["error"] + # No intentar poblar inputs/outputs si el parseo base falló + all_logic_steps[instruction_uid] = instruction_repr + continue + + original_type = instruction_repr.get("type", "") # Tipo de la instrucción + + # --- Poblar Entradas --- + # Lista base de pines posibles (podría obtenerse de XSDs o dinámicamente) + possible_input_pins = set(["en", "in", "in1", "in2", "pre"]) + # Añadir pines dinámicamente basados en el tipo de instrucción + if original_type in ["Contact", "Coil", "SCoil", "RCoil", "SdCoil"]: + possible_input_pins.add("operand") + elif original_type in [ + "Add", + "Sub", + "Mul", + "Div", + "Mod", + "Eq", + "Ne", + "Gt", + "Lt", + "Ge", + "Le", + ]: + possible_input_pins.update(["in1", "in2"]) + elif original_type in ["TON", "TOF", "TP"]: + possible_input_pins.update(["IN", "PT"]) # Pines SCL + elif original_type in ["Se", "Sd"]: + possible_input_pins.update(["s", "tv", "timer"]) # Pines LAD + elif original_type in ["CTU", "CTD", "CTUD"]: + possible_input_pins.update(["CU", "CD", "R", "LD", "PV"]) # Pines SCL/LAD + elif original_type in ["PBox", "NBox"]: + possible_input_pins.update( + ["bit", "clk", "in"] + ) # PBox/NBox usa 'in' y 'bit' + elif original_type == "BLKMOV": + possible_input_pins.add("SRCBLK") + elif original_type == "Move": + possible_input_pins.add("in") + elif original_type == "Convert": + possible_input_pins.add("in") + elif original_type == "Call": + # Para Calls, los nombres de los parámetros reales se definen en el XML + # El Xpath busca Parameter DENTRO de CallInfo, que está DENTRO de Call + call_xml_element_list = flgnet.xpath( + f".//flg:Parts/flg:Call[@UId='{instruction_uid}']", namespaces=ns + ) + if call_xml_element_list: + call_xml_element = call_xml_element_list[0] + call_info_node_list = call_xml_element.xpath( + "./flg:CallInfo", namespaces=ns + ) + if call_info_node_list: + call_param_names = call_info_node_list[0].xpath( + "./flg:Parameter/@Name", namespaces=ns + ) + possible_input_pins.update(call_param_names) + # print(f"DEBUG Call UID={instruction_uid}: Params={call_param_names}") + else: # Fallback si no hay namespace (menos probable) + call_info_node_list_no_ns = call_xml_element.xpath("./CallInfo") + if call_info_node_list_no_ns: + possible_input_pins.update( + call_info_node_list_no_ns[0].xpath("./Parameter/@Name") + ) + + # Iterar sobre pines posibles y buscar conexiones + for pin_name in possible_input_pins: + dest_key = (instruction_uid, pin_name) + if dest_key in wire_connections: + sources_list = wire_connections[dest_key] + input_sources_repr = [] + for source_uid, source_pin in sources_list: + source_repr = None + if source_uid == "POWERRAIL": + source_repr = {"type": "powerrail"} + elif source_uid.startswith("OPENBRANCH_"): + source_repr = { + "type": "openbranch", + "uid": source_uid, + } # Fuente especial + elif source_uid in access_map: + source_repr = copy.deepcopy(access_map[source_uid]) + elif source_uid in parts_and_calls_map: + source_instr_info = parts_and_calls_map[source_uid] + source_repr = { + "type": "connection", + "source_instruction_type": source_instr_info.get( + "type", "Unknown" + ), # Usar tipo base + "source_instruction_uid": source_uid, + "source_pin": source_pin, + } + else: + # Fuente desconocida (ni Access, ni Part/Call válido) + print( + f"Advertencia: Fuente desconocida UID={source_uid} conectada a {instruction_uid}.{pin_name}" + ) + source_repr = {"type": "unknown_source", "uid": source_uid} + input_sources_repr.append(source_repr) + + # Guardar la representación de la entrada (lista o dict) + instruction_repr["inputs"][pin_name] = ( + input_sources_repr[0] + if len(input_sources_repr) == 1 + else input_sources_repr + ) + + # --- Poblar Salidas (simplificado: solo conexiones a Access) --- + possible_output_pins = set( + [ + "out", + "out1", + "Q", + "q", + "eno", + "RET_VAL", + "DSTBLK", + "rt", + "cv", + "QU", + "QD", + "ET", # Añadir pines de salida estándar SCL + ] + ) + if original_type == "BLKMOV": + possible_output_pins.add("DSTBLK") + if ( + original_type == "Call" + ): # Para Calls, las salidas dependen del bloque llamado + call_xml_element_list = flgnet.xpath( + f".//flg:Parts/flg:Call[@UId='{instruction_uid}']", namespaces=ns + ) + if call_xml_element_list: + call_info_node_list = call_xml_element_list[0].xpath( + "./flg:CallInfo", namespaces=ns + ) + if call_info_node_list: + # Buscar parámetros con Section="Output" o "InOut" o "Return" + output_param_names = call_info_node_list[0].xpath( + "./flg:Parameter[@Section='Output' or @Section='InOut' or @Section='Return']/@Name", + namespaces=ns, + ) + possible_output_pins.update(output_param_names) + + for pin_name in possible_output_pins: + source_key = (instruction_uid, pin_name) + if source_key in source_connections: + if pin_name not in instruction_repr["outputs"]: + instruction_repr["outputs"][pin_name] = [] + for dest_uid, dest_pin in source_connections[source_key]: + if ( + dest_uid in access_map + ): # Solo registrar si va a una variable/constante + dest_operand_copy = copy.deepcopy(access_map[dest_uid]) + if ( + dest_operand_copy + not in instruction_repr["outputs"][pin_name] + ): + instruction_repr["outputs"][pin_name].append( + dest_operand_copy + ) + + all_logic_steps[instruction_uid] = instruction_repr + + # 4. Inferencia EN (modificado para usar tipos originales) + processed_blocks_en_inference = set() + try: + # Ordenar UIDs numéricamente si es posible + sorted_uids_for_en = sorted( + all_logic_steps.keys(), + key=lambda x: ( + int(x) if isinstance(x, str) and x.isdigit() else float("inf") + ), + ) + except ValueError: + sorted_uids_for_en = sorted(all_logic_steps.keys()) # Fallback sort + + ordered_logic_list_for_en = [ + all_logic_steps[uid] for uid in sorted_uids_for_en if uid in all_logic_steps + ] + + for i, instruction in enumerate(ordered_logic_list_for_en): + part_uid = instruction["instruction_uid"] + # Usar el tipo original para la lógica de inferencia + part_type_original = ( + instruction.get("type", "").replace(SCL_SUFFIX, "").replace("_error", "") + ) + + # Inferencia solo para tipos funcionales que no tengan EN explícito + if ( + part_type_original in functional_block_types + and "en" not in instruction.get("inputs", {}) + and part_uid not in processed_blocks_en_inference + and "error" not in part_type_original + ): # No inferir para errores + + inferred_en_source = None + # Buscar hacia atrás en la lista ordenada + if i > 0: + for j in range(i - 1, -1, -1): + prev_instr = ordered_logic_list_for_en[j] + if "error" in prev_instr.get("type", ""): + continue # Saltar errores previos + + prev_uid = prev_instr["instruction_uid"] + prev_type_original = ( + prev_instr.get("type", "") + .replace(SCL_SUFFIX, "") + .replace("_error", "") + ) + + if prev_type_original in rlo_generators: # Fuente RLO encontrada + inferred_en_source = { + "type": "connection", + "source_instruction_uid": prev_uid, + "source_instruction_type": prev_type_original, # Tipo original + "source_pin": "out", + } + break # Detener búsqueda + elif ( + prev_type_original in functional_block_types + ): # Bloque funcional previo + # Comprobar si este bloque tiene salida ENO conectada + if (prev_uid, "eno") in source_connections: + inferred_en_source = { + "type": "connection", + "source_instruction_uid": prev_uid, + "source_instruction_type": prev_type_original, # Tipo original + "source_pin": "eno", + } + # Si no tiene ENO conectado, el flujo RLO se detiene aquí + break # Detener búsqueda + elif prev_type_original in [ + "Coil", + "SCoil", + "RCoil", + "SdCoil", + "SetCoil", + "ResetCoil", + ]: + # Bobinas terminan el flujo RLO + break # Detener búsqueda + + # Si no se encontró fuente, conectar a PowerRail + if inferred_en_source is None: + inferred_en_source = {"type": "powerrail"} + + # Actualizar la instrucción EN el diccionario principal + if part_uid in all_logic_steps: + # Asegurar que inputs exista + if "inputs" not in all_logic_steps[part_uid]: + all_logic_steps[part_uid]["inputs"] = {} + all_logic_steps[part_uid]["inputs"]["en"] = inferred_en_source + processed_blocks_en_inference.add(part_uid) + + # 5. Lógica ENO (añadir destinos ENO si existen) + for source_instr_uid, eno_destinations in eno_outputs.items(): + if source_instr_uid in all_logic_steps and "error" not in all_logic_steps[ + source_instr_uid + ].get("type", ""): + all_logic_steps[source_instr_uid]["eno_destinations"] = eno_destinations + + # 6. Ordenar y Devolver + final_logic_list = [ + all_logic_steps[uid] for uid in sorted_uids_for_en if uid in all_logic_steps + ] + + return { + "id": network_id, + "title": network_title, + "comment": network_comment, + "language": network_lang, # Lenguaje original de la red + "logic": final_logic_list, + # No añadir 'error' aquí a menos que el parseo completo falle + } + + +# --- Función de Información del Parser --- +def get_parser_info(): + """Devuelve la información para este parser.""" + # Este parser maneja LAD, FBD y GRAPH + return { + "language": ["LAD", "FBD", "GRAPH"], # Lista de lenguajes soportados + "parser_func": parse_lad_fbd_network, # Función a llamar + } diff --git a/parsers/parse_scl.py b/parsers/parse_scl.py new file mode 100644 index 0000000..b88e779 --- /dev/null +++ b/parsers/parse_scl.py @@ -0,0 +1,253 @@ +# ToUpload/parsers/parse_scl.py +# -*- coding: utf-8 -*- +from lxml import etree +import re + +# Importar desde las utilidades del parser +from .parser_utils import ns, get_multilingual_text + +def reconstruct_scl_from_tokens(st_node): + """ + Reconstruye SCL desde , mejorando el manejo de + variables, constantes literales, tokens básicos, espacios y saltos de línea. + """ + if st_node is None: + return "// Error: StructuredText node not found.\n" + + scl_parts = [] + # Usar st:* para obtener todos los elementos hijos dentro del namespace st + children = st_node.xpath("./st:*", namespaces=ns) + + for elem in children: + tag = etree.QName(elem.tag).localname + + if tag == "Token": + scl_parts.append(elem.get("Text", "")) + elif tag == "Blank": + # Añadir espacios solo si es necesario o más de uno + num_spaces = int(elem.get("Num", 1)) + if not scl_parts or not scl_parts[-1].endswith(" "): + scl_parts.append(" " * num_spaces) + elif num_spaces > 1: + scl_parts.append(" " * (num_spaces -1)) + + elif tag == "NewLine": + # Quitar espacios finales antes del salto de línea + if scl_parts: + scl_parts[-1] = scl_parts[-1].rstrip() + scl_parts.append("\n") + elif tag == "Access": + scope = elem.get("Scope") + access_str = f"/*_ERR_Scope_{scope}_*/" # Placeholder + + # --- Variables --- + if scope in [ + "GlobalVariable", "LocalVariable", "TempVariable", "InOutVariable", + "InputVariable", "OutputVariable", "ConstantVariable", + "GlobalConstant", "LocalConstant" # Añadir constantes simbólicas + ]: + symbol_elem = elem.xpath("./st:Symbol", namespaces=ns) + if symbol_elem: + components = symbol_elem[0].xpath("./st:Component", namespaces=ns) + symbol_text_parts = [] + for i, comp in enumerate(components): + name = comp.get("Name", "_ERR_COMP_") + if i > 0: symbol_text_parts.append(".") + + # Check for HasQuotes attribute (adjust namespace if needed) + # El atributo está en el Component o en el Access padre? Probar ambos + has_quotes_comp = comp.get("HasQuotes", "false").lower() == "true" # Check directly on Component + has_quotes_access = False + access_parent = comp.xpath("ancestor::st:Access[1]", namespaces=ns) # Get immediate Access parent + if access_parent: + has_quotes_attr = access_parent[0].xpath("./st:BooleanAttribute[@Name='HasQuotes']/text()", namespaces=ns) + has_quotes_access = has_quotes_attr and has_quotes_attr[0].lower() == 'true' + + has_quotes = has_quotes_comp or has_quotes_access + is_temp = name.startswith("#") + + # Apply quotes based on HasQuotes or if it's the first component and not temp + if has_quotes or (i == 0 and not is_temp and '"' not in name): # Avoid double quotes + symbol_text_parts.append(f'"{name}"') + else: + symbol_text_parts.append(name) + + # --- Array Index Access --- + index_access_nodes = comp.xpath("./st:Access", namespaces=ns) + if index_access_nodes: + # Llamada recursiva para cada índice + indices_text = [reconstruct_scl_from_tokens(idx_node) for idx_node in index_access_nodes] + # Limpiar saltos de línea dentro de los corchetes + indices_cleaned = [idx.replace('\n', '').strip() for idx in indices_text] + symbol_text_parts.append(f"[{','.join(indices_cleaned)}]") + + access_str = "".join(symbol_text_parts) + else: + access_str = f"/*_ERR_NO_SYMBOL_IN_{scope}_*/" + + # --- Constantes Literales --- + elif scope == "LiteralConstant": + constant_elem = elem.xpath("./st:Constant", namespaces=ns) + if constant_elem: + val_elem = constant_elem[0].xpath("./st:ConstantValue/text()", namespaces=ns) + type_elem = constant_elem[0].xpath("./st:ConstantType/text()", namespaces=ns) + const_type = type_elem[0].strip().lower() if type_elem and type_elem[0] is not None else "" + const_val = val_elem[0].strip() if val_elem and val_elem[0] is not None else "_ERR_CONSTVAL_" + + # Formatear según tipo + if const_type == "bool": access_str = const_val.upper() + elif const_type.lower() == "string": + replaced_val = const_val.replace("'", "''") + access_str = f"'{replaced_val}'" + elif const_type.lower() == "char": + replaced_val = const_val.replace("'", "''") + access_str = f"'{replaced_val}'" + elif const_type == "wstring": + replaced_val = const_val.replace("'", "''") + access_str = f"WSTRING#'{replaced_val}'" + elif const_type == "wchar": + replaced_val = const_val.replace("'", "''") + access_str = f"WCHAR#'{replaced_val}'" + elif const_type == "time": access_str = f"T#{const_val}" + elif const_type == "ltime": access_str = f"LT#{const_val}" + elif const_type == "s5time": access_str = f"S5T#{const_val}" + elif const_type == "date": access_str = f"D#{const_val}" + elif const_type == "dtl": access_str = f"DTL#{const_val}" + elif const_type == "dt": access_str = f"DT#{const_val}" + elif const_type == "tod": access_str = f"TOD#{const_val}" + elif const_type in ["int", "dint", "sint", "usint", "uint", "udint", "real", "lreal", "word", "dword", "byte"]: + # Añadir .0 para reales si no tienen decimal + if const_type in ["real", "lreal"] and '.' not in const_val and 'e' not in const_val.lower(): + access_str = f"{const_val}.0" + else: + access_str = const_val + else: # Otros tipos (LWORD, etc.) o desconocidos + access_str = const_val + else: + access_str = "/*_ERR_NOCONST_*/" + + # --- Llamadas a Funciones/Bloques (Scope=Call) --- + elif scope == "Call": + call_info_node = elem.xpath("./st:CallInfo", namespaces=ns) + if call_info_node: + ci = call_info_node[0] + call_name = ci.get("Name", "_ERR_CALLNAME_") + call_type = ci.get("BlockType") # FB, FC, etc. + + # Parámetros (están como Access o Token dentro de CallInfo/Parameter) + params = ci.xpath("./st:Parameter", namespaces=ns) + param_parts = [] + for p in params: + p_name = p.get("Name", "_ERR_PARAMNAME_") + # El valor del parámetro está dentro del nodo Parameter + p_value_node = p.xpath("./st:Access | ./st:Token", namespaces=ns) # Buscar Access o Token + p_value_scl = "" + if p_value_node: + p_value_scl = reconstruct_scl_from_tokens(p) # Parsear el contenido del parámetro + p_value_scl = p_value_scl.replace('\n', '').strip() # Limpiar SCL resultante + param_parts.append(f"{p_name} := {p_value_scl}") + + # Manejar FB vs FC + if call_type == "FB": + instance_node = ci.xpath("./st:Instance/st:Component/@Name", namespaces=ns) + if instance_node: + instance_name = f'"{instance_node[0]}"' + access_str = f"{instance_name}({', '.join(param_parts)})" + else: # FB sin instancia? Podría ser STAT + access_str = f'"{call_name}"({", ".join(param_parts)}) (* FB sin instancia explícita? *)' + elif call_type == "FC": + access_str = f'"{call_name}"({", ".join(param_parts)})' + else: # Otros tipos de llamada + access_str = f'"{call_name}"({", ".join(param_parts)}) (* Tipo: {call_type} *)' + else: + access_str = "/*_ERR_NO_CALLINFO_*/" + + # Añadir más scopes si son necesarios (e.g., Address, Label, Reference) + + scl_parts.append(access_str) + + elif tag == "Comment" or tag == "LineComment": + # Usar get_multilingual_text del parser_utils + comment_text = get_multilingual_text(elem) + if tag == "Comment": + scl_parts.append(f"(* {comment_text} *)") + else: + scl_parts.append(f"// {comment_text}") + # Ignorar otros tipos de nodos si no son relevantes para el SCL + + full_scl = "".join(scl_parts) + + # --- Re-indentación Simple --- + output_lines = [] + indent_level = 0 + indent_str = " " # Dos espacios + for line in full_scl.splitlines(): + trimmed_line = line.strip() + if not trimmed_line: + # Mantener líneas vacías? Opcional. + # output_lines.append("") + continue + + # Reducir indentación ANTES de imprimir para END, ELSE, etc. + if trimmed_line.upper().startswith(("END_", "UNTIL", "}")) or \ + trimmed_line.upper() in ["ELSE", "ELSIF"]: + indent_level = max(0, indent_level - 1) + + output_lines.append(indent_str * indent_level + trimmed_line) + + # Aumentar indentación DESPUÉS de imprimir para IF, FOR, etc. + # Ser más específico con las palabras clave que aumentan indentación + # Usar .upper() para ignorar mayúsculas/minúsculas + line_upper = trimmed_line.upper() + if line_upper.endswith(("THEN", "DO", "OF", "{")) or \ + line_upper.startswith(("IF ", "FOR ", "WHILE ", "CASE ", "REPEAT", "STRUCT")) or \ + line_upper == "ELSE": + # Excepción: No indentar después de ELSE IF + if not (line_upper == "ELSE" and "IF" in output_lines[-1].upper()): + indent_level += 1 + + return "\n".join(output_lines) + + +def parse_scl_network(network_element): + """ + Parsea una red SCL extrayendo el código fuente reconstruido. + Devuelve un diccionario representando la red para el JSON. + """ + network_id = network_element.get("ID", "UnknownSCL_ID") + network_lang = "SCL" # Sabemos que es SCL + + # Buscar NetworkSource y luego StructuredText + network_source_node = network_element.xpath(".//flg:NetworkSource", namespaces=ns) + structured_text_node = None + if network_source_node: + structured_text_node_list = network_source_node[0].xpath("./st:StructuredText", namespaces=ns) + if structured_text_node_list: + structured_text_node = structured_text_node_list[0] + + reconstructed_scl = "// SCL extraction failed: StructuredText node not found.\n" + if structured_text_node is not None: + reconstructed_scl = reconstruct_scl_from_tokens(structured_text_node) + + # Crear la estructura de datos para la red + parsed_network_data = { + "id": network_id, + "language": network_lang, + "logic": [ # SCL se guarda como un único bloque lógico + { + "instruction_uid": f"SCL_{network_id}", # UID sintético + "type": "RAW_SCL_CHUNK", # Tipo especial para SCL crudo + "scl": reconstructed_scl, # El código SCL reconstruido + } + ], + # No añadimos error aquí, reconstruct_scl_from_tokens ya incluye comentarios de error + } + return parsed_network_data + +# --- Función de Información del Parser --- +def get_parser_info(): + """Devuelve la información para este parser.""" + return { + 'language': ['SCL'], # Lista de lenguajes soportados + 'parser_func': parse_scl_network # Función a llamar + } \ No newline at end of file diff --git a/parsers/parse_stl.py b/parsers/parse_stl.py new file mode 100644 index 0000000..c5c3f94 --- /dev/null +++ b/parsers/parse_stl.py @@ -0,0 +1,278 @@ +# ToUpload/parsers/parse_stl.py +# -*- coding: utf-8 -*- +from lxml import etree + +# Importar desde las utilidades del parser +from .parser_utils import ns # Solo necesitamos los namespaces aquí + +# --- Funciones Auxiliares de Reconstrucción STL (Adaptadas de x1) --- + +def get_access_text_stl(access_element): + """Reconstruye una representación textual simple de un Access en STL.""" + if access_element is None: return "_ERR_ACCESS_" + scope = access_element.get("Scope") + + # Símbolo (Variable, Constante Simbólica) + symbol_elem = access_element.xpath("./stl:Symbol", namespaces=ns) + if symbol_elem: + components = symbol_elem[0].xpath("./stl:Component", namespaces=ns) + parts = [] + for i, comp in enumerate(components): + name = comp.get("Name", "_ERR_COMP_") + # Comprobar HasQuotes (en Access padre?) + has_quotes_elem = comp.xpath("ancestor::stl:Access/stl:BooleanAttribute[@Name='HasQuotes']/text()", namespaces=ns) + has_quotes = has_quotes_elem and has_quotes_elem[0].lower() == "true" + is_temp = name.startswith("#") + + if i > 0: parts.append(".") + # Aplicar comillas + if has_quotes or (i == 0 and not is_temp and '"' not in name): + parts.append(f'"{name}"') + else: + parts.append(name) + # Índices de Array + index_access = comp.xpath("./stl:Access", namespaces=ns) + if index_access: + indices = [get_access_text_stl(ia) for ia in index_access] + parts.append(f"[{','.join(indices)}]") + return "".join(parts) + + # Constante Literal + constant_elem = access_element.xpath("./stl:Constant", namespaces=ns) + if constant_elem: + val_elem = constant_elem[0].xpath("./stl:ConstantValue/text()", namespaces=ns) + type_elem = constant_elem[0].xpath("./stl:ConstantType/text()", namespaces=ns) + const_type = (type_elem[0].strip().lower() if type_elem and type_elem[0] is not None else "") + const_val = (val_elem[0].strip() if val_elem and val_elem[0] is not None else "_ERR_CONST_") + + # Añadir prefijos estándar STL + if const_type == "time": return f"T#{const_val}" + if const_type == "s5time": return f"S5T#{const_val}" + if const_type == "date": return f"D#{const_val}" + if const_type == "dt": return f"DT#{const_val}" + if const_type == "time_of_day" or const_type=="tod": return f"TOD#{const_val}" + if const_type.lower() == "string": + replaced_val = const_val.replace("'", "''") + access_str = f"'{replaced_val}'" + if const_type.lower() == "char": + replaced_val = const_val.replace("'", "''") + access_str = f"'{replaced_val}'" + if const_type == "wstring": + replaced_val = const_val.replace("'", "''") + access_str = f"WSTRING#'{replaced_val}'" + if const_type == "wchar": + replaced_val = const_val.replace("'", "''") + access_str = f"WCHAR#'{replaced_val}'" # Añadir más si es necesario (WSTRING#, BYTE#, WORD#...) + if const_type == "byte" and const_val.startswith("16#"): return f"B#{const_val}" # Formato B#16#FF + if const_type == "word" and const_val.startswith("16#"): return f"W#{const_val}" + if const_type == "dword" and const_val.startswith("16#"): return f"DW#{const_val}" + # Real con punto decimal + if const_type == "real" and '.' not in const_val and 'e' not in const_val.lower(): return f"{const_val}.0" + return const_val # Valor por defecto + + # Etiqueta + label_elem = access_element.xpath("./stl:Label", namespaces=ns) + if label_elem: + return label_elem[0].get("Name", "_ERR_LABEL_") + + # Acceso Indirecto (Punteros) + indirect_elem = access_element.xpath("./stl:Indirect", namespaces=ns) + if indirect_elem: + reg = indirect_elem[0].get("Register", "AR?") # AR1, AR2 + offset_str = indirect_elem[0].get("BitOffset", "0") + area = indirect_elem[0].get("Area", "DB") # DB, DI, L, etc. + width = indirect_elem[0].get("Width", "X") # Bit, Byte, Word, Double + try: + bit_offset = int(offset_str) + byte_offset = bit_offset // 8 + bit_in_byte = bit_offset % 8 + p_format_offset = f"P#{byte_offset}.{bit_in_byte}" + except ValueError: + p_format_offset = "P#?.?" + width_map = {"Bit": "X", "Byte": "B", "Word": "W", "Double": "D", "Long": "D"} + width_char = width_map.get(width, width[0] if width else "?") + return f"{area}{width_char}[{reg},{p_format_offset}]" + + # Dirección Absoluta (I, Q, M, PI, PQ, T, C, DBX, DIX, L) + address_elem = access_element.xpath("./stl:Address", namespaces=ns) + if address_elem: + area = address_elem[0].get("Area", "??") # Input, Output, Memory, DB, DI, Local, Timer, Counter... + bit_offset_str = address_elem[0].get("BitOffset", "0") + addr_type_str = address_elem[0].get("Type", "Bool") # Bool, Byte, Word, DWord, Int, DInt, Real... + try: + bit_offset = int(bit_offset_str) + byte_offset = bit_offset // 8 + bit_in_byte = bit_offset % 8 + # Determinar ancho (X, B, W, D) + addr_width = "X" # Default bit + if addr_type_str in ["Byte", "SInt", "USInt"]: addr_width = "B" + elif addr_type_str in ["Word", "Int", "UInt"]: addr_width = "W" + elif addr_type_str in ["DWord", "DInt", "UDInt", "Real", "Time", "DT", "TOD"]: addr_width = "D" + elif addr_type_str in ["LReal", "LTime", "LWord", "LInt", "ULInt"]: addr_width = "D" # L se maneja como D en direccionamiento base? O usar L? Chequear estándar. STL clásico no tenía L. + # Mapear Área XML a Área STL + area_map = {"Input": "I", "Output": "Q", "Memory": "M", + "PeripheryInput": "PI", "PeripheryOutput": "PQ", + "DB": "DB", "DI": "DI", "Local": "L", + "Timer": "T", "Counter": "C"} + stl_area = area_map.get(area, area) + + if stl_area in ["DB", "DI"]: + block_num = address_elem[0].get("BlockNumber") # Para DB10.DBX0.0 + if block_num: + return f"{stl_area}{block_num}.{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" + else: # Para acceso con registro DB/DI (DBX, DIW, etc.) + return f"{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" + elif stl_area in ["T", "C"]: + return f"{stl_area}{byte_offset}" # T 5, C 10 (offset es el número) + else: # I, Q, M, L, PI, PQ + return f"{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" # M10.1, IW0, QB5, etc. + + except ValueError: + return f"{area}?{bit_offset_str}?" + + # CallInfo (para CALL FC10, CALL FB20, DB10) + call_info_elem = access_element.xpath("./stl:CallInfo", namespaces=ns) + if call_info_elem: + name = call_info_elem[0].get("Name", "_ERR_CALL_") + btype = call_info_elem[0].get("BlockType", "FC") # FC, FB, DB + instance_node = call_info_elem[0].xpath("./stl:Instance/stl:Component/@Name", namespaces=ns) + if btype == "FB" and instance_node: + # Para CALL FB, el operando es el DB de instancia + db_name_raw = instance_node[0] + return f'"{db_name_raw}"' if '"' not in db_name_raw else db_name_raw + elif btype == "DB": + return f'DB "{name}"' # O solo DB name? ej. DB10 + else: # FC + return f'{btype} "{name}"' # FC "Nombre" + + return f"_{scope}_?" # Fallback + + +def get_comment_text_stl(comment_element): + """Extrae texto de un LineComment o Comment para STL.""" + if comment_element is None: return "" + # STL Comments suelen tener directamente + text_nodes = comment_element.xpath("./stl:Text/text()", namespaces=ns) + if text_nodes: + return text_nodes[0].strip() + return "" # Vacío si no hay + +def reconstruct_stl_from_statementlist(statement_list_node): + """Reconstruye el código STL como una cadena de texto desde .""" + if statement_list_node is None: + return "// Error: StatementList node not found.\n" + stl_lines = [] + statements = statement_list_node.xpath("./stl:StlStatement", namespaces=ns) + + for stmt in statements: + line_parts = [] + inline_comment = "" # Comentarios en la misma línea + + # 1. Comentarios iniciales (línea completa //) + initial_comments = stmt.xpath("child::stl:Comment[not(@Inserted='true')] | child::stl:LineComment[not(@Inserted='true')]", namespaces=ns) + for comm in initial_comments: + comment_text = get_comment_text_stl(comm) + if comment_text: + for comment_line in comment_text.splitlines(): + stl_lines.append(f"// {comment_line}") + + # 2. Etiqueta (Label) + label_decl = stmt.xpath("./stl:LabelDeclaration", namespaces=ns) + label_str = "" + if label_decl: + label_name = label_decl[0].xpath("./stl:Label/@Name", namespaces=ns) + if label_name: + label_str = f"{label_name[0]}:" + # Comentarios después de la etiqueta (inline) + label_comments = label_decl[0].xpath("./stl:Comment[@Inserted='true'] | ./stl:LineComment[@Inserted='true']", namespaces=ns) + for lcomm in label_comments: + inline_comment += f" // {get_comment_text_stl(lcomm)}" + if label_str: + line_parts.append(label_str) + + # 3. Instrucción (StlToken) + instruction_token = stmt.xpath("./stl:StlToken", namespaces=ns) + instruction_str = "" + if instruction_token: + token_text = instruction_token[0].get("Text", "_ERR_TOKEN_") + if token_text == "EMPTY_LINE": + stl_lines.append("") # Línea vacía + continue # Saltar resto del statement + elif token_text == "COMMENT": # Marcador de línea de comentario completo + # Ya manejado por initial_comments? Verificar XML. Si no, extraer comentario aquí. + pass # Asumir manejado antes + else: + instruction_str = token_text + # Comentarios asociados al token (inline) + token_comments = instruction_token[0].xpath("./stl:Comment[@Inserted='true'] | ./stl:LineComment[@Inserted='true']", namespaces=ns) + for tcomm in token_comments: + inline_comment += f" // {get_comment_text_stl(tcomm)}" + if instruction_str: + # Añadir tabulación si hay etiqueta + line_parts.append("\t" + instruction_str if label_str else instruction_str) + + # 4. Operando (Access) + access_elem = stmt.xpath("./stl:Access", namespaces=ns) + access_str = "" + if access_elem: + access_text = get_access_text_stl(access_elem[0]) + access_str = access_text + # Comentarios dentro del Access (inline) + access_comments = access_elem[0].xpath("child::stl:Comment[@Inserted='true'] | child::stl:LineComment[@Inserted='true']", namespaces=ns) + for acc_comm in access_comments: + inline_comment += f" // {get_comment_text_stl(acc_comm)}" + if access_str: + line_parts.append(access_str) + + # Construir línea final + current_line = " ".join(lp for lp in line_parts if lp) # Unir partes con espacio + if inline_comment: + current_line += f"\t{inline_comment.strip()}" # Añadir comentario con tab + + if current_line.strip(): # Añadir solo si no está vacía después de todo + stl_lines.append(current_line.rstrip()) # Quitar espacios finales + + return "\n".join(stl_lines) + + +def parse_stl_network(network_element): + """ + Parsea una red STL extrayendo el código fuente reconstruido. + Devuelve un diccionario representando la red para el JSON. + """ + network_id = network_element.get("ID", "UnknownSTL_ID") + network_lang = "STL" + + # Buscar NetworkSource y luego StatementList + network_source_node = network_element.xpath(".//flg:NetworkSource", namespaces=ns) + statement_list_node = None + if network_source_node: + statement_list_node_list = network_source_node[0].xpath("./stl:StatementList", namespaces=ns) + if statement_list_node_list: + statement_list_node = statement_list_node_list[0] + + reconstructed_stl = "// STL extraction failed: StatementList node not found.\n" + if statement_list_node is not None: + reconstructed_stl = reconstruct_stl_from_statementlist(statement_list_node) + + # Crear la estructura de datos para la red + parsed_network_data = { + "id": network_id, + "language": network_lang, + "logic": [ # STL se guarda como un único bloque lógico + { + "instruction_uid": f"STL_{network_id}", # UID sintético + "type": "RAW_STL_CHUNK", # Tipo especial para STL crudo + "stl": reconstructed_stl, # El código STL reconstruido + } + ], + } + return parsed_network_data + +# --- Función de Información del Parser --- +def get_parser_info(): + """Devuelve la información para este parser.""" + return { + 'language': ['STL'], # Lenguaje soportado + 'parser_func': parse_stl_network # Función a llamar + } \ No newline at end of file diff --git a/parsers/parser_utils.py b/parsers/parser_utils.py new file mode 100644 index 0000000..88b2fb0 --- /dev/null +++ b/parsers/parser_utils.py @@ -0,0 +1,387 @@ +# ToUpload/parsers/parser_utils.py +# -*- coding: utf-8 -*- +from lxml import etree +import traceback + +# --- Namespaces (Común para muchos parsers) --- +ns = { + "iface": "http://www.siemens.com/automation/Openness/SW/Interface/v5", + "flg": "http://www.siemens.com/automation/Openness/SW/NetworkSource/FlgNet/v4", + "st": "http://www.siemens.com/automation/Openness/SW/NetworkSource/StructuredText/v3", + "stl": "http://www.siemens.com/automation/Openness/SW/NetworkSource/StatementList/v4", +} + +# --- Funciones Comunes de Extracción de Texto y Nodos --- + + +def get_multilingual_text(element, default_lang="en-US", fallback_lang="it-IT"): + """Extrae texto multilingüe de un elemento XML, asegurando devolver siempre string.""" + if element is None: + return "" # Devolver cadena vacía si el elemento es None + try: + # Intenta buscar el idioma por defecto + xpath_expr_default = f".//iface:MultilingualTextItem[iface:AttributeList/iface:Culture='{default_lang}']/iface:AttributeList/iface:Text" + text_items_default = element.xpath(xpath_expr_default, namespaces=ns) + # CORRECCIÓN: Devolver "" si .text es None + if text_items_default and text_items_default[0].text is not None: + return text_items_default[0].text.strip() + # Intentar buscar el idioma de fallback + xpath_expr_fallback = f".//iface:MultilingualTextItem[iface:AttributeList/iface:Culture='{fallback_lang}']/iface:AttributeList/iface:Text" + text_items_fallback = element.xpath(xpath_expr_fallback, namespaces=ns) + # CORRECCIÓN: Devolver "" si .text es None + if text_items_fallback and text_items_fallback[0].text is not None: + return text_items_fallback[0].text.strip() + + # Si no encuentra ninguno, toma el primer texto que encuentre + xpath_expr_any = ".//iface:MultilingualTextItem/iface:AttributeList/iface:Text" + text_items_any = element.xpath(xpath_expr_any, namespaces=ns) + # CORRECCIÓN: Devolver "" si .text es None + if text_items_any and text_items_any[0].text is not None: + return text_items_any[0].text.strip() + + # Fallback final si no se encontró ningún MultilingualTextItem con texto + return "" # Asegurar retorno de string vacío + except Exception as e: + print(f"Advertencia: Error extrayendo MultilingualText: {e}") + # traceback.print_exc() # Descomentar para más detalles del error + return "" # Devolver cadena vacía en caso de excepción + + +def get_symbol_name(symbol_element): + """Obtiene el nombre completo de un símbolo desde un elemento .""" + if symbol_element is None: + return None + try: + components = symbol_element.xpath("./flg:Component/@Name", namespaces=ns) + return ( + ".".join( + f'"{c}"' if not c.startswith("#") and '"' not in c else c + for c in components + ) + if components + else None + ) + except Exception as e: + print(f"Advertencia: Excepción en get_symbol_name: {e}") + return None + + +def parse_access(access_element): + """Parsea un nodo devolviendo un diccionario con su información.""" + if access_element is None: + return None + uid = access_element.get("UId") + scope = access_element.get("Scope") + info = {"uid": uid, "scope": scope, "type": "unknown"} + symbol = access_element.xpath("./flg:Symbol", namespaces=ns) + constant = access_element.xpath("./flg:Constant", namespaces=ns) + + if symbol: + info["type"] = "variable" + info["name"] = get_symbol_name(symbol[0]) + if info["name"] is None: + info["type"] = "error_parsing_symbol" + print(f"Error: No se pudo parsear nombre símbolo Access UID={uid}") + raw_text = "".join(symbol[0].xpath(".//text()")).strip() + info["name"] = ( + f'"_ERR_PARSING_{raw_text[:20]}"' + if raw_text + else f'"_ERR_PARSING_EMPTY_SYMBOL_ACCESS_{uid}"' + ) + elif constant: + info["type"] = "constant" + const_type_elem = constant[0].xpath("./flg:ConstantType", namespaces=ns) + const_val_elem = constant[0].xpath("./flg:ConstantValue", namespaces=ns) + info["datatype"] = ( + const_type_elem[0].text.strip() + if const_type_elem and const_type_elem[0].text is not None + else "Unknown" + ) + value_str = ( + const_val_elem[0].text.strip() + if const_val_elem and const_val_elem[0].text is not None + else None + ) + if value_str is None: + info["type"] = "error_parsing_constant" + info["value"] = None + print(f"Error: Constante sin valor Access UID={uid}") + if info["datatype"] == "Unknown" and value_str: + val_lower = value_str.lower() + if val_lower in ["true", "false"]: + info["datatype"] = "Bool" + elif value_str.isdigit() or ( + value_str.startswith("-") and value_str[1:].isdigit() + ): + info["datatype"] = "Int" + elif "." in value_str: + try: + float(value_str) + info["datatype"] = "Real" + except ValueError: + pass + elif "#" in value_str: + parts = value_str.split("#", 1) + prefix = parts[0].upper() + if prefix == "T": + info["datatype"] = "Time" + elif prefix == "LT": + info["datatype"] = "LTime" + elif prefix == "S5T": + info["datatype"] = "S5Time" + elif prefix == "D": + info["datatype"] = "Date" + elif prefix == "DT": + info["datatype"] = "DT" + elif prefix == "DTL": + info["datatype"] = "DTL" + elif prefix == "TOD": + info["datatype"] = "Time_Of_Day" + elif value_str.startswith("'") and value_str.endswith("'"): + info["datatype"] = "String" + else: + info["datatype"] = "TypedConstant" + elif value_str.startswith("'") and value_str.endswith("'"): + info["datatype"] = "String" + info["value"] = value_str + dtype_lower = info["datatype"].lower() + val_str_processed = value_str + if isinstance(value_str, str): + if "#" in value_str: + val_str_processed = value_str.split("#", 1)[-1] + if ( + val_str_processed.startswith("'") + and val_str_processed.endswith("'") + and len(val_str_processed) > 1 + ): + val_str_processed = val_str_processed[1:-1] + try: + if dtype_lower in [ + "int", + "dint", + "udint", + "sint", + "usint", + "lint", + "ulint", + "word", + "dword", + "lword", + "byte", + ]: + info["value"] = int(val_str_processed) + elif dtype_lower == "bool": + info["value"] = ( + val_str_processed.lower() == "true" or val_str_processed == "1" + ) + elif dtype_lower in ["real", "lreal"]: + info["value"] = float(val_str_processed) + except (ValueError, TypeError): + info["value"] = value_str + else: + info["type"] = "unknown_structure" + print(f"Advertencia: Access UID={uid} no es Symbol ni Constant.") + if info["type"] == "variable" and info.get("name") is None: + print(f"Error Interno: parse_access var sin nombre UID {uid}.") + info["type"] = "error_no_name" + return info + + +def parse_part(part_element): + """Parsea un nodo de LAD/FBD.""" + if part_element is None: + return None + uid = part_element.get("UId") + name = part_element.get("Name") + if not uid or not name: + print( + f"Error: Part sin UID o Name: {etree.tostring(part_element, encoding='unicode')}" + ) + return None + template_values = {} + negated_pins = {} + try: + for tv in part_element.xpath("./TemplateValue"): + tv_name = tv.get("Name") + tv_type = tv.get("Type") + if tv_name and tv_type: + template_values[tv_name] = tv_type + except Exception as e: + print(f"Advertencia: Error extrayendo TemplateValues Part UID={uid}: {e}") + try: + for negated_elem in part_element.xpath("./Negated"): + negated_pin_name = negated_elem.get("Name") + if negated_pin_name: + negated_pins[negated_pin_name] = True + except Exception as e: + print(f"Advertencia: Error extrayendo Negated Pins Part UID={uid}: {e}") + return { + "uid": uid, + "type": name, + "template_values": template_values, + "negated_pins": negated_pins, + } + + +def parse_call(call_element): + """Parsea un nodo de LAD/FBD.""" + if call_element is None: + return None + uid = call_element.get("UId") + if not uid: + print( + f"Error: Call encontrado sin UID: {etree.tostring(call_element, encoding='unicode')}" + ) + return None + call_info_elem = call_element.xpath("./flg:CallInfo", namespaces=ns) + if not call_info_elem: + call_info_elem_no_ns = call_element.xpath("./CallInfo") + if not call_info_elem_no_ns: + print(f"Error: Call UID {uid} sin elemento CallInfo.") + return {"uid": uid, "type": "Call_error", "error": "Missing CallInfo"} + else: + print(f"Advertencia: Call UID {uid} encontró CallInfo SIN namespace.") + call_info = call_info_elem_no_ns[0] + else: + call_info = call_info_elem[0] + block_name = call_info.get("Name") + block_type = call_info.get("BlockType") + if not block_name or not block_type: + print(f"Error: CallInfo para UID {uid} sin Name o BlockType.") + return { + "uid": uid, + "type": "Call_error", + "error": "Missing Name or BlockType in CallInfo", + } + instance_name, instance_scope = None, None + if block_type == "FB": + instance_elem_list = call_info.xpath("./flg:Instance", namespaces=ns) + if instance_elem_list: + instance_elem = instance_elem_list[0] + instance_scope = instance_elem.get("Scope") + component_elem_list = instance_elem.xpath("./flg:Component", namespaces=ns) + if component_elem_list: + component_elem = component_elem_list[0] + db_name_raw = component_elem.get("Name") + if db_name_raw: + instance_name = ( + f'"{db_name_raw}"' + if not db_name_raw.startswith('"') + else db_name_raw + ) + else: + print( + f"Advertencia: en FB Call UID {uid} sin 'Name'." + ) + else: + print( + f"Advertencia: No se encontró en FB Call UID {uid}." + ) + else: + print( + f"Advertencia: FB Call '{block_name}' UID {uid} sin . ¿Llamada a multi-instancia STAT?" + ) + call_scope = call_element.get("Scope") + if call_scope == "LocalVariable": + instance_name = f'"{block_name}"' + instance_scope = "Static" + print( + f"INFO: Asumiendo instancia STAT '{instance_name}' para FB Call UID {uid}." + ) + call_data = { + "uid": uid, + "type": "Call", + "block_name": block_name, + "block_type": block_type, + } + if instance_name: + call_data["instance_db"] = instance_name + if instance_scope: + call_data["instance_scope"] = instance_scope + return call_data + + +def parse_interface_members(member_elements): + """Parsea recursivamente miembros de interfaz/estructura.""" + members_data = [] + if not member_elements: + return members_data + for member in member_elements: + member_name = member.get("Name") + member_dtype_raw = member.get("Datatype") + member_version = member.get("Version") + member_remanence = member.get("Remanence", "NonRetain") + member_accessibility = member.get("Accessibility", "Public") + if not member_name or not member_dtype_raw: + print("Advertencia: Miembro sin nombre o tipo de dato. Saltando.") + continue + member_dtype = ( + f"{member_dtype_raw}:v{member_version}" + if member_version + else member_dtype_raw + ) + member_info = { + "name": member_name, + "datatype": member_dtype, + "remanence": member_remanence, + "accessibility": member_accessibility, + "start_value": None, + "comment": None, + "children": [], + "array_elements": {}, + } + comment_node = member.xpath("./iface:Comment", namespaces=ns) + if comment_node: + member_info["comment"] = get_multilingual_text( + comment_node[0] + ) # Usa la función robusta + start_value_node = member.xpath("./iface:StartValue", namespaces=ns) + if start_value_node: + constant_name = start_value_node[0].get("ConstantName") + member_info["start_value"] = ( + constant_name + if constant_name + else ( + start_value_node[0].text + if start_value_node[0].text is not None + else None + ) + ) # Devolver None si está vacío + nested_sections = member.xpath( + "./iface:Sections/iface:Section[@Name='None']/iface:Member", namespaces=ns + ) + if nested_sections: + member_info["children"] = parse_interface_members(nested_sections) + if isinstance(member_dtype, str) and member_dtype.lower().startswith("array["): + subelements = member.xpath("./iface:Subelement", namespaces=ns) + for sub in subelements: + path = sub.get("Path") + sub_start_value_node = sub.xpath("./iface:StartValue", namespaces=ns) + if path and sub_start_value_node: + constant_name = sub_start_value_node[0].get("ConstantName") + value = ( + constant_name + if constant_name + else ( + sub_start_value_node[0].text + if sub_start_value_node[0].text is not None + else None + ) + ) # Devolver None si está vacío + member_info["array_elements"][path] = value + sub_comment_node = sub.xpath("./iface:Comment", namespaces=ns) + if path and sub_comment_node: + sub_comment_text = get_multilingual_text( + sub_comment_node[0] + ) # Usa la función robusta + if isinstance(member_info["array_elements"].get(path), dict): + member_info["array_elements"][path][ + "comment" + ] = sub_comment_text + else: + member_info["array_elements"][path] = { + "value": member_info["array_elements"].get(path), + "comment": sub_comment_text, + } + members_data.append(member_info) + return members_data diff --git a/paste.py b/paste.py index c3a554e..b4067a8 100644 --- a/paste.py +++ b/paste.py @@ -1,184 +1,344 @@ -# processors/process_call.py +# x3_generate_scl.py # -*- coding: utf-8 -*- -import sympy +import json +import os +import re +import argparse +import sys import traceback -# Asumiendo que estas funciones ahora existen y están adaptadas -from .processor_utils import get_sympy_representation, sympy_expr_to_scl, format_variable_name, get_target_scl_name -from .symbol_manager import SymbolManager # Necesitamos pasar el symbol_manager -# Definir sufijo globalmente o importar -SCL_SUFFIX = "_sympy_processed" +# --- Importar Utilidades y Constantes --- +try: + from processors.processor_utils import format_variable_name + SCL_SUFFIX = "_sympy_processed" + GROUPED_COMMENT = "// Logic included in grouped IF" +except ImportError: + print("Advertencia: No se pudo importar 'format_variable_name'. Usando fallback.") + def format_variable_name(name): # Fallback + if not name: return "_INVALID_NAME_" + if name.startswith('"') and name.endswith('"'): return name + prefix = "#" if name.startswith("#") else "" + if prefix: name = name[1:] + if name and name[0].isdigit(): name = "_" + name + name = re.sub(r"[^a-zA-Z0-9_]", "_", name) + return prefix + name + SCL_SUFFIX = "_sympy_processed" + GROUPED_COMMENT = "// Logic included in grouped IF" -def process_call(instruction, network_id, sympy_map, symbol_manager: SymbolManager, data): - instr_uid = instruction["instruction_uid"] - # Get original type before potential suffix/error was added by x1 or previous passes - # This requires storing the original type perhaps, or removing known suffixes - # Let's assume 'block_type' (FC/FB) and 'block_name' are correct from x1 - block_name = instruction.get("block_name", f"UnknownCall_{instr_uid}") - block_type = instruction.get("block_type") # FC, FB - instance_db = instruction.get("instance_db") # Nombre del DB de instancia (para FB) - - # Check if already processed - if instruction.get("type", "").endswith(SCL_SUFFIX) or "_error" in instruction.get("type", ""): - return False - - # Formatear nombres SCL (para la llamada final) - block_name_scl = format_variable_name(block_name) - instance_db_scl = format_variable_name(instance_db) if instance_db else None - - # --- Manejo de EN --- - en_input = instruction["inputs"].get("en") - sympy_en_expr = get_sympy_representation(en_input, network_id, sympy_map, symbol_manager) if en_input else sympy.true - - if sympy_en_expr is None: - # print(f"DEBUG Call {instr_uid}: EN dependency not ready.") - return False # Dependencia EN no resuelta - - # --- Procesar Parámetros de Entrada --- - scl_call_params = [] - processed_inputs = {"en"} # Track processed pins to avoid duplicates if 'en' is also listed elsewhere - dependencies_resolved = True - - # Iterar sobre las entradas que x1 debería haber poblado - # Ordenar por nombre de pin para consistencia en la llamada SCL - input_pin_names = sorted(instruction.get("inputs", {}).keys()) - - for pin_name in input_pin_names: - if pin_name not in processed_inputs: # Skip 'en' if already handled - source_info = instruction["inputs"][pin_name] - - # Get the representation of the source (SymPy, constant, or SCL string) - source_sympy_or_const = get_sympy_representation(source_info, network_id, sympy_map, symbol_manager) - - if source_sympy_or_const is None: - # print(f"DEBUG Call {instr_uid}: Input param '{pin_name}' dependency not ready.") - dependencies_resolved = False - break # Exit if one dependency is not ready - - # Convert the expression/constant to SCL for the call - # Simplification of inputs is generally not needed here, convert directly - param_scl_value = sympy_expr_to_scl(source_sympy_or_const, symbol_manager) - - # Parameter pin name needs formatting for SCL - pin_name_scl = format_variable_name(pin_name) - - # Special check for DB_ANY or ANY_POINTER - pass name directly without := - # We need the original parameter type info for this, which is not in the simplified JSON. - # WORKAROUND: Check if param_scl_value looks like a DB name ("DB_NAME") - # This is heuristic and might be wrong. Ideally, x1 should pass type info. - # For now, we assume standard 'Param := Value' syntax. - # if param_scl_value.startswith('"') and param_scl_value.endswith('"') and block_type == "FC": # Heuristic for DB_ANY? - # scl_call_params.append(f"{pin_name_scl} := {param_scl_value}") # Still use := for clarity? TIA might infer - # else: - scl_call_params.append(f"{pin_name_scl} := {param_scl_value}") - - processed_inputs.add(pin_name) - - if not dependencies_resolved: - return False - - # --- Construcción de la Llamada SCL (con parámetros) --- - scl_call_body = "" - param_string = ", ".join(scl_call_params) # Join parameters with commas - - if block_type == "FB": - if not instance_db_scl: - print(f"Error: Call FB '{block_name_scl}' (UID {instr_uid}) sin instancia.") - instruction["scl"] = f"// ERROR: FB Call {block_name_scl} sin instancia" - instruction["type"] = f"Call_FB_error" # Mark with error - return True # Processed (with error) - # FB Call: InstanceName(Param1 := Value1, Param2 := Value2); - scl_call_body = f"{instance_db_scl}({param_string});" - elif block_type == "FC": - # FC Call: BlockName(Param1 := Value1, Param2 := Value2); - scl_call_body = f"{block_name_scl}({param_string});" - else: - print(f"Advertencia: Tipo de bloque no soportado para Call UID {instr_uid}: {block_type}") - scl_call_body = f"// ERROR: Call a bloque tipo '{block_type}' no soportado: {block_name_scl}" - # Mark instruction type with error - instruction["type"] = f"Call_{block_type or 'Unknown'}_error" # Add specific type if known - - - # --- Aplicar Condición EN (usando la expresión SymPy EN) --- - scl_final = "" - if sympy_en_expr != sympy.true: - # Simplify the EN condition before converting to SCL +# --- Función format_scl_start_value (SIN CAMBIOS) --- +def format_scl_start_value(value, datatype): + if value is None: return None + datatype_lower = datatype.lower() if datatype else "" + value_str = str(value); value_str_unquoted = value_str + if value_str.startswith('"') and value_str.endswith('"') and len(value_str) > 1: value_str_unquoted = value_str[1:-1] + elif value_str.startswith("'") and value_str.endswith("'") and len(value_str) > 1: value_str_unquoted = value_str[1:-1] + if any(t in datatype_lower for t in ["int","byte","word","dint","dword","lint","lword","sint","usint","uint","udint","ulint"]): + try: return str(int(value_str_unquoted)) + except ValueError: + if re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", value_str_unquoted): return value_str_unquoted + escaped_for_scl = value_str_unquoted.replace("\\", "\\\\").replace("'", "''").replace("\n", "").replace("\r", ""); return f"'{escaped_for_scl}'" + elif "bool" in datatype_lower: return "TRUE" if value_str_unquoted.lower() == "true" else "FALSE" + elif "string" in datatype_lower: escaped_value = value_str_unquoted.replace("'", "''"); return f"'{escaped_value}'" + elif "char" in datatype_lower: escaped_value = value_str_unquoted.replace("'", "''"); return f"'{escaped_value}'" + elif "real" in datatype_lower or "lreal" in datatype_lower: try: - #simplified_en_expr = sympy.simplify_logic(sympy_en_expr, force=True) - simplified_en_expr = sympy.logic.boolalg.to_dnf(sympy_en_expr, simplify=True) - except Exception as e: - print(f"Error simplifying EN for Call {instr_uid} ({block_name_scl}): {e}") - simplified_en_expr = sympy_en_expr # Fallback - en_condition_scl = sympy_expr_to_scl(simplified_en_expr, symbol_manager) - - # Avoid IF TRUE/FALSE blocks - if en_condition_scl == "TRUE": - scl_final = scl_call_body - elif en_condition_scl == "FALSE": - scl_final = f"// Call {block_name_scl} (UID {instr_uid}) condition simplified to FALSE." - # Also update type to avoid further processing? - # instruction["type"] = f"Call_{block_type}{SCL_SUFFIX}_Optimized" + f_val = float(value_str_unquoted); s_val = str(f_val) + if "." not in s_val and "e" not in s_val.lower(): s_val += ".0" + return s_val + except ValueError: + if re.match(r"^[a-zA-Z_][a-zA-Z0-9_]*$", value_str_unquoted): return value_str_unquoted + escaped_for_scl = value_str_unquoted.replace("\\", "\\\\").replace("'", "''").replace("\n", "").replace("\r", ""); return f"'{escaped_for_scl}'" + elif "time" in datatype_lower: + prefix, val_to_use = "", value_str_unquoted + if val_to_use.upper().startswith("T#"): prefix, val_to_use = "T#", val_to_use[2:] + elif val_to_use.upper().startswith("LT#"): prefix, val_to_use = "LT#", val_to_use[3:] + elif val_to_use.upper().startswith("S5T#"): prefix, val_to_use = "S5T#", val_to_use[4:] + if "s5time" in datatype_lower: return f"S5T#{val_to_use}" + elif "ltime" in datatype_lower: return f"LT#{val_to_use}" + else: return f"T#{val_to_use}" + elif "date" in datatype_lower: + val_to_use = value_str_unquoted + if "dtl" in datatype_lower or "date_and_time" in datatype_lower: + prefix = "DTL#" if val_to_use.upper().startswith("DTL#") else "DTL#" + val_to_use = val_to_use[4:] if val_to_use.upper().startswith("DTL#") else val_to_use; return f"{prefix}{val_to_use}" + elif "dt" in datatype_lower: + prefix = "DT#" if val_to_use.upper().startswith("DT#") else "DT#" + val_to_use = val_to_use[3:] if val_to_use.upper().startswith("DT#") else val_to_use; return f"{prefix}{val_to_use}" + elif "tod" in datatype_lower or "time_of_day" in datatype_lower: + prefix = "TOD#" if val_to_use.upper().startswith("TOD#") else "TOD#" + val_to_use = val_to_use[4:] if val_to_use.upper().startswith("TOD#") else val_to_use; return f"{prefix}{val_to_use}" + else: # Default a Date D# + prefix = "D#" if val_to_use.upper().startswith("D#") else "D#" + val_to_use = val_to_use[2:] if val_to_use.upper().startswith("D#") else val_to_use; return f"{prefix}{val_to_use}" + else: # Fallback + if re.match(r'^[a-zA-Z_#"][a-zA-Z0-9_."#\[\]%]+$', value_str): + if value_str.startswith('"') and value_str.endswith('"') and len(value_str) > 1: return value_str[1:-1] + if '"' in value_str and "." in value_str and value_str.count('"') == 2: return value_str + if not value_str.startswith('"') and not value_str.startswith("'"): + if value_str.startswith("#") or value_str.startswith("%"): return value_str + else: return value_str + return value_str else: - # Indent the call body within the IF block - indented_call = "\n".join([f" {line}" for line in scl_call_body.splitlines()]) - scl_final = f"IF {en_condition_scl} THEN\n{indented_call}\nEND_IF;" + escaped_for_scl = value_str_unquoted.replace("\\", "\\\\").replace("'", "''").replace("\n", "").replace("\r", ""); return f"'{escaped_for_scl}'" + +# --- Función generate_scl_declarations (SIN CAMBIOS) --- +def generate_scl_declarations(variables, indent_level=1): + scl_lines = [] + indent = " " * indent_level + for var in variables: + var_name_scl = format_variable_name(var.get("name")) + var_dtype_raw = var.get("datatype", "VARIANT") + var_comment = var.get("comment") + start_value = var.get("start_value"); children = var.get("children"); array_elements = var.get("array_elements") + var_dtype_cleaned = var_dtype_raw + if isinstance(var_dtype_raw, str): + if var_dtype_raw.startswith('"') and var_dtype_raw.endswith('"'): var_dtype_cleaned = var_dtype_raw[1:-1] + array_match = re.match(r'(Array\[.*\]\s+of\s+)"(.*)"', var_dtype_raw, re.IGNORECASE) + if array_match: var_dtype_cleaned = f"{array_match.group(1)}{array_match.group(2)}" + base_type_for_init = var_dtype_cleaned; array_prefix_for_decl = "" + if var_dtype_cleaned.lower().startswith("array["): + match = re.match(r"(Array\[.*\]\s+of\s+)(.*)", var_dtype_cleaned, re.IGNORECASE) + if match: array_prefix_for_decl, base_type_for_init = match.group(1), match.group(2).strip() + declaration_dtype = var_dtype_raw + if base_type_for_init != var_dtype_cleaned and not array_prefix_for_decl: + if not base_type_for_init.startswith('"'): declaration_dtype = f'"{base_type_for_init}"' + else: declaration_dtype = base_type_for_init + elif array_prefix_for_decl and base_type_for_init != var_dtype_cleaned: + if not base_type_for_init.startswith('"'): declaration_dtype = f'{array_prefix_for_decl}"{base_type_for_init}"' + else: declaration_dtype = f"{array_prefix_for_decl}{base_type_for_init}" + declaration_line = f"{indent}{var_name_scl} : {declaration_dtype}"; init_value_scl = None + if array_elements: + try: + indices_numeric = {int(k): v for k, v in array_elements.items()} + sorted_indices_str = [str(k) for k in sorted(indices_numeric.keys())] + except ValueError: print(f"Advertencia: Índices de array no numéricos para '{var_name_scl}'. Usando orden alfabético."); sorted_indices_str = sorted(array_elements.keys()) + init_values = [] + for idx_str in sorted_indices_str: + try: formatted_val = format_scl_start_value(array_elements[idx_str], base_type_for_init); init_values.append(formatted_val) + except Exception as e_fmt: print(f"ERROR: Falló formateo para índice {idx_str} de array '{var_name_scl}'. Valor: {array_elements[idx_str]}. Error: {e_fmt}"); init_values.append(f"/*ERR_FMT_{idx_str}*/") + valid_inits = [v for v in init_values if v is not None] + if valid_inits: init_value_scl = f"[{', '.join(valid_inits)}]" + elif array_elements: print(f"Advertencia: Todos los valores iniciales para array '{var_name_scl}' son None o inválidos.") + elif children: + scl_lines.append(declaration_line); scl_lines.append(f"{indent}STRUCT") + scl_lines.extend(generate_scl_declarations(children, indent_level + 1)) + scl_lines.append(f"{indent}END_STRUCT;"); + if var_comment: scl_lines.append(f"{indent}// {var_comment}") + scl_lines.append(""); continue + else: + if start_value is not None: + try: init_value_scl = format_scl_start_value(start_value, base_type_for_init) + except Exception as e_fmt_simple: print(f"ERROR: Falló formateo para valor simple de '{var_name_scl}'. Valor: {start_value}. Error: {e_fmt_simple}"); init_value_scl = f"/*ERR_FMT_SIMPLE*/" + if init_value_scl is not None: declaration_line += f" := {init_value_scl}" + declaration_line += ";" + if var_comment: declaration_line += f" // {var_comment}" + scl_lines.append(declaration_line) + return scl_lines + +# --- NUEVAS FUNCIONES para generar Markdown --- +def generate_udt_markdown(data): + """Genera contenido Markdown para un UDT.""" + md_lines = []; udt_name = data.get("block_name", "UnknownUDT"); udt_comment = data.get("block_comment", "") + md_lines.append(f"# UDT: {udt_name}"); md_lines.append("") + if udt_comment: + md_lines.append(f"**Comment:**") + for line in udt_comment.splitlines(): md_lines.append(f"> {line}") + md_lines.append("") + members = data.get("interface", {}).get("None", []) + if members: + md_lines.append("## Members"); md_lines.append("") + md_lines.append("| Name | Datatype | Start Value | Comment |"); md_lines.append("|---|---|---|---|") + md_lines.extend(generate_markdown_member_rows(members)) # Llamada a la función auxiliar + md_lines.append("") + else: md_lines.append("No members found in the UDT interface."); md_lines.append("") + return md_lines + +# --- generate_markdown_member_rows (MODIFICADA) --- +def generate_markdown_member_rows(members, level=0): + """Función auxiliar recursiva para generar filas Markdown para miembros de UDT.""" + md_rows = []; prefix = "    " * level + for member in members: + name = member.get("name", "N/A"); datatype = member.get("datatype", "N/A") + start_value_raw = member.get("start_value") + start_value_fmt = format_scl_start_value(start_value_raw, datatype) if start_value_raw is not None else "" + # CORRECCIÓN: Manejar el caso en que comment sea None + comment_raw = member.get("comment") + comment = comment_raw.replace('|', '\|').replace('\n', ' ') if comment_raw else "" # Usar "" si es None + + md_rows.append(f"| {prefix}`{name}` | `{datatype}` | `{start_value_fmt}` | {comment} |") + children = member.get("children") + if children: md_rows.extend(generate_markdown_member_rows(children, level + 1)) + array_elements = member.get("array_elements") + if array_elements: + base_type_for_init = datatype + if isinstance(datatype, str) and datatype.lower().startswith("array["): + match = re.match(r"(Array\[.*\]\s+of\s+)(.*)", datatype, re.IGNORECASE) + if match: base_type_for_init = match.group(2).strip() + md_rows.append(f"| {prefix}  *(Initial Values)* | | | |") + try: + indices_numeric = {int(k): v for k, v in array_elements.items()} + sorted_indices_str = [str(k) for k in sorted(indices_numeric.keys())] + except ValueError: sorted_indices_str = sorted(array_elements.keys()) + for idx_str in sorted_indices_str: + val_raw = array_elements[idx_str] + val_fmt = format_scl_start_value(val_raw, base_type_for_init) if val_raw is not None else "" + md_rows.append(f"| {prefix}  `[{idx_str}]` | | `{val_fmt}` | |") + return md_rows + +def generate_tag_table_markdown(data): + """Genera contenido Markdown para una tabla de tags.""" + md_lines = []; table_name = data.get("block_name", "UnknownTagTable"); tags = data.get("tags", []) + md_lines.append(f"# Tag Table: {table_name}"); md_lines.append("") + if tags: + md_lines.append("| Name | Datatype | Address | Comment |"); md_lines.append("|---|---|---|---|") + for tag in tags: + name = tag.get("name", "N/A"); datatype = tag.get("datatype", "N/A") + address = tag.get("address", "N/A") or " "; + # CORRECCIÓN: Manejar el caso en que comment sea None + comment_raw = tag.get("comment") + comment = comment_raw.replace('|', '\|').replace('\n', ' ') if comment_raw else "" + md_lines.append(f"| `{name}` | `{datatype}` | `{address}` | {comment} |") + md_lines.append("") + else: md_lines.append("No tags found in this table."); md_lines.append("") + return md_lines + +# --- Función Principal de Generación (SIN CAMBIOS EN LA LÓGICA PRINCIPAL) --- +def generate_scl_or_markdown(processed_json_filepath, output_directory): + """ + Genera un archivo SCL o Markdown a partir del JSON procesado, + eligiendo el formato y la extensión según el tipo de bloque. + """ + if not os.path.exists(processed_json_filepath): print(f"Error: Archivo JSON no encontrado: '{processed_json_filepath}'"); return + print(f"Cargando JSON procesado desde: {processed_json_filepath}") + try: + with open(processed_json_filepath, "r", encoding="utf-8") as f: data = json.load(f) + except Exception as e: print(f"Error al cargar o parsear JSON: {e}"); traceback.print_exc(); return + + block_name = data.get("block_name", "UnknownBlock"); block_number = data.get("block_number") + block_type = data.get("block_type", "Unknown"); block_comment = data.get("block_comment", "") + scl_block_name = format_variable_name(block_name); output_content = []; output_extension = ".scl" + print(f"Generando salida para: {block_type} '{scl_block_name}' (Original: {block_name})") + + if block_type == "PlcUDT": + print(" -> Modo de generación: UDT Markdown"); output_content = generate_udt_markdown(data); output_extension = ".md" + elif block_type == "PlcTagTable": + print(" -> Modo de generación: Tag Table Markdown"); output_content = generate_tag_table_markdown(data); output_extension = ".md" + elif block_type == "GlobalDB": + print(" -> Modo de generación: DATA_BLOCK SCL"); output_extension = ".scl" + # (Lógica SCL DB...) + output_content.append(f"// Block Type: {block_type}") + if block_name != scl_block_name : output_content.append(f"// Block Name (Original): {block_name}") + if block_number: output_content.append(f"// Block Number: {block_number}") + if block_comment: output_content.append(f"// Block Comment:"); [output_content.append(f"// {line}") for line in block_comment.splitlines()] + output_content.append(""); output_content.append(f'DATA_BLOCK "{scl_block_name}"') + output_content.append("{ S7_Optimized_Access := 'TRUE' }"); output_content.append("VERSION : 0.1"); output_content.append("") + interface_data = data.get("interface", {}); static_vars = interface_data.get("Static", []) + if static_vars: output_content.append("VAR"); output_content.extend(generate_scl_declarations(static_vars, indent_level=1)); output_content.append("END_VAR") + else: print("Advertencia: No se encontró sección 'Static' o está vacía en la interfaz del DB."); output_content.append("VAR\nEND_VAR") + output_content.append(""); output_content.append("BEGIN"); output_content.append(" // Data Blocks have no executable code"); output_content.append("END_DATA_BLOCK") + elif block_type in ["FC", "FB", "OB"]: + print(f" -> Modo de generación: {block_type} SCL"); output_extension = ".scl" + # (Lógica SCL FC/FB/OB...) + scl_block_keyword = "FUNCTION_BLOCK"; + if block_type == "FC": scl_block_keyword = "FUNCTION" + elif block_type == "OB": scl_block_keyword = "ORGANIZATION_BLOCK" + output_content.append(f"// Block Type: {block_type}") + if block_name != scl_block_name : output_content.append(f"// Block Name (Original): {block_name}") + if block_number: output_content.append(f"// Block Number: {block_number}") + original_net_langs = set(n.get("language", "Unknown") for n in data.get("networks", [])) + output_content.append(f"// Original Network Languages: {', '.join(l for l in original_net_langs if l != 'Unknown')}") + if block_comment: output_content.append(f"// Block Comment:"); [output_content.append(f"// {line}") for line in block_comment.splitlines()] + output_content.append("") + return_type = "Void"; interface_data = data.get("interface", {}) + if scl_block_keyword == "FUNCTION" and interface_data.get("Return"): + return_member = interface_data["Return"][0]; return_type_raw = return_member.get("datatype", "Void") + return_type = (return_type_raw[1:-1] if isinstance(return_type_raw, str) and return_type_raw.startswith('"') and return_type_raw.endswith('"') else return_type_raw) + if return_type != return_type_raw and not return_type_raw.lower().startswith("array"): return_type = f'"{return_type}"' + else: return_type = return_type_raw + if scl_block_keyword == "FUNCTION": output_content.append(f'{scl_block_keyword} "{scl_block_name}" : {return_type}') + else: output_content.append(f'{scl_block_keyword} "{scl_block_name}"') + output_content.append("{ S7_Optimized_Access := 'TRUE' }"); output_content.append("VERSION : 0.1"); output_content.append("") + section_order = ["Input", "Output", "InOut", "Static", "Temp", "Constant"] + declared_temps = set(); has_declarations = False + for section_name in section_order: + vars_in_section = interface_data.get(section_name, []) + if vars_in_section: + has_declarations = True; scl_section_keyword = f"VAR_{section_name.upper()}" + if section_name == "Static": scl_section_keyword = "VAR_STAT" + if section_name == "Temp": scl_section_keyword = "VAR_TEMP" + if section_name == "Constant": scl_section_keyword = "CONSTANT" + output_content.append(scl_section_keyword) + output_content.extend(generate_scl_declarations(vars_in_section, indent_level=1)) + output_content.append("END_VAR" if section_name != "Constant" else "END_CONSTANT"); output_content.append("") + if section_name == "Temp": declared_temps.update(format_variable_name(v.get("name")) for v in vars_in_section if v.get("name")) + temp_vars_detected = set(); temp_pattern = re.compile(r'"?(#\w+)"?') + for network in data.get("networks", []): + for instruction in network.get("logic", []): + scl_code = instruction.get("scl", ""); edge_update_code = instruction.get("_edge_mem_update_scl", "") + code_to_scan = (scl_code if scl_code else "") + "\n" + (edge_update_code if edge_update_code else "") + if code_to_scan: + found_temps = temp_pattern.findall(code_to_scan) + for temp_name in found_temps: + if temp_name: temp_vars_detected.add(temp_name) + additional_temps = sorted(list(temp_vars_detected - declared_temps)) + if additional_temps: + print(f"INFO: Detectadas {len(additional_temps)} VAR_TEMP adicionales.") + if "Temp" not in interface_data or not interface_data["Temp"]: output_content.append("VAR_TEMP") + for temp_name in additional_temps: scl_name = format_variable_name(temp_name); inferred_type = "Bool"; output_content.append(f" {scl_name} : {inferred_type}; // Auto-generated temporary") + if "Temp" not in interface_data or not interface_data["Temp"]: output_content.append("END_VAR"); output_content.append("") + output_content.append("BEGIN"); output_content.append("") + for i, network in enumerate(data.get("networks", [])): + network_title = network.get("title", f'Network {network.get("id", i+1)}') + network_comment = network.get("comment", ""); network_lang = network.get("language", "LAD") + output_content.append(f" // Network {i+1}: {network_title} (Original Language: {network_lang})") + if network_comment: [output_content.append(f" // {line}") for line in network_comment.splitlines()] + output_content.append(""); network_has_code = False; logic_in_network = network.get("logic", []) + if not logic_in_network: output_content.append(f" // Network {i+1} has no logic elements."); output_content.append(""); continue + if network_lang == "STL": + if logic_in_network[0].get("type") == "RAW_STL_CHUNK": + network_has_code = True; raw_stl_code = logic_in_network[0].get("stl", "// ERROR: STL code missing") + output_content.append(f" // --- BEGIN STL Network {i+1} ---"); [output_content.append(f" // {stl_line}") for stl_line in raw_stl_code.splitlines()]; output_content.append(f" // --- END STL Network {i+1} ---"); output_content.append("") + else: output_content.append(f" // ERROR: Contenido STL inesperado en Network {i+1}."); output_content.append("") + else: # SCL/LAD/FBD + for instruction in logic_in_network: + instruction_type = instruction.get("type", ""); scl_code = instruction.get("scl", ""); is_grouped = instruction.get("grouped", False) + if is_grouped: continue + if (instruction_type.endswith(SCL_SUFFIX) or instruction_type in ["RAW_SCL_CHUNK","UNSUPPORTED_LANG","UNSUPPORTED_CONTENT","PARSING_ERROR"] or "_error" in instruction_type) and scl_code: + is_only_comment = all(line.strip().startswith("//") for line in scl_code.splitlines() if line.strip()) + is_if_block = scl_code.strip().startswith("IF") + if (not is_only_comment or is_if_block or "_error" in instruction_type or instruction_type in ["UNSUPPORTED_LANG","UNSUPPORTED_CONTENT","PARSING_ERROR"]): + network_has_code = True; [output_content.append(f" {line}") for line in scl_code.splitlines()]; output_content.append("") + if not network_has_code and network_lang != "STL": output_content.append(f" // Network {i+1} did not produce printable SCL/MD code."); output_content.append("") + output_content.append(f"END_{scl_block_keyword}") + + else: print(f"Error: Tipo de bloque desconocido '{block_type}' en JSON."); return + + # --- Escritura del Archivo --- + output_filename_base = f"{scl_block_name}{output_extension}" + output_filepath = os.path.join(output_directory, output_filename_base) + print(f" -> Escribiendo archivo de salida en: {output_filepath}") + try: + os.makedirs(output_directory, exist_ok=True) + with open(output_filepath, "w", encoding="utf-8") as f: + for line in output_content: f.write(line + "\n") + print(f"Generación de {output_extension.upper()} completada.") + except Exception as e: print(f"Error al escribir el archivo {output_extension.upper()}: {e}"); traceback.print_exc() + +# --- Ejecución (SIN CAMBIOS) --- +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Generate final SCL or Markdown file from processed JSON.") + parser.add_argument("source_xml_filepath",help="Path to the original source XML file.") + args = parser.parse_args(); source_xml_file = args.source_xml_filepath + if not os.path.exists(source_xml_file): print(f"Advertencia (x3): Archivo XML original no encontrado: '{source_xml_file}'.") + xml_filename_base = os.path.splitext(os.path.basename(source_xml_file))[0] + base_dir = os.path.dirname(source_xml_file) + input_json_file = os.path.join(base_dir, f"{xml_filename_base}_simplified_processed.json") + output_dir = base_dir + print(f"(x3) Generando SCL/MD desde: '{os.path.relpath(input_json_file)}' en directorio: '{os.path.relpath(output_dir)}'") + if not os.path.exists(input_json_file): + print(f"Error Fatal (x3): Archivo JSON procesado no encontrado: '{input_json_file}'") + print(f"Asegúrate de que 'x2_process.py' se ejecutó correctamente para '{os.path.relpath(source_xml_file)}'.") + sys.exit(1) else: - # No IF needed if EN is always TRUE - scl_final = scl_call_body - - # --- Actualizar Instrucción y Mapa SymPy --- - instruction["scl"] = scl_final # Guardar el SCL final generado - - # Update instruction type to mark as processed (unless already marked as error) - if "_error" not in instruction.get("type", ""): - instruction["type"] = f"Call_{block_type}{SCL_SUFFIX}" - - # Propagar el estado ENO (es la expresión SymPy de EN) - map_key_eno = (network_id, instr_uid, "eno") - sympy_map[map_key_eno] = sympy_en_expr # Guardar la expresión SymPy para ENO - - # --- Propagar Valores de Salida (Importante pero complejo) --- - # Esto requiere conocer la interfaz del bloque llamado (que no tenemos aquí directamente) - # O asumir convenciones estándar (ej. FCs tienen Ret_Val, FBs tienen outputs en su instancia) - - # Heurística simple: Si es un FC, intentar propagar Ret_Val si existe en outputs - # Si es un FB, las salidas se acceden a través de la instancia (e.g., "MyInstance".Output1) - # Por ahora, dejaremos la propagación de salidas más avanzada para una mejora futura - # o requerirá pasar información de la interfaz del bloque llamado. - - # Ejemplo básico (necesita mejorar): - # for pin_name, dest_list in instruction.get("outputs", {}).items(): - # if pin_name != 'eno' and dest_list: # Asumir que hay un destino - # map_key_out = (network_id, instr_uid, pin_name) - # pin_name_scl = format_variable_name(pin_name) - # if block_type == "FB" and instance_db_scl: - # # Salida de FB: "Instancia".NombrePin - # output_scl_access = f"{instance_db_scl}.{pin_name_scl}" - # # Podríamos guardar el string SCL o crear/obtener un Symbol - # sympy_out_symbol = symbol_manager.get_symbol(output_scl_access) - # sympy_map[map_key_out] = sympy_out_symbol if sympy_out_symbol else output_scl_access # Prefiere Symbol - # elif block_type == "FC": - # # Salida de FC: Requiere asignar a una variable (temporal o de interfaz) - # # Esto se complica porque el destino está en 'dest_list' - # if len(dest_list) == 1 and dest_list[0].get("type") == "variable": - # target_var_name = format_variable_name(dest_list[0].get("name")) - # # Guardar el nombre del destino SCL que contendrá el valor - # sympy_map[map_key_out] = target_var_name - # # Necesitaríamos modificar scl_final para incluir la asignación: - # # target_var_name := FC_Call(...); (requiere reestructurar la generación SCL) - # else: - # # Múltiples destinos o destino no variable es complejo para FC outputs - # sympy_map[map_key_out] = f"/* TODO: Assign FC output {pin_name_scl} */" - - - return True - - -# --- Processor Information Function --- -def get_processor_info(): - """Devuelve la información para las llamadas a FC y FB.""" - # Asegurarse que los type_name coincidan con los usados en x1 y x2 - return [ - {'type_name': 'call_fc', 'processor_func': process_call, 'priority': 6}, # Prioridad alta - {'type_name': 'call_fb', 'processor_func': process_call, 'priority': 6} # Prioridad alta - ] \ No newline at end of file + try: generate_scl_or_markdown(input_json_file, output_dir); sys.exit(0) + except Exception as e: print(f"Error Crítico (x3) durante la generación de SCL/MD desde '{input_json_file}': {e}"); traceback.print_exc(); sys.exit(1) \ No newline at end of file diff --git a/x1_to_json.py b/x1_to_json.py index de1a4fb..2e9f4ca 100644 --- a/x1_to_json.py +++ b/x1_to_json.py @@ -1,1621 +1,422 @@ +# ToUpload/x1_to_json.py # -*- coding: utf-8 -*- import json import argparse import os -import re -from lxml import etree +import sys import traceback +import importlib +from lxml import etree from collections import defaultdict -import copy # Importar copy para deepcopy +import copy -# --- Namespaces --- -ns = { - "iface": "http://www.siemens.com/automation/Openness/SW/Interface/v5", - "flg": "http://www.siemens.com/automation/Openness/SW/NetworkSource/FlgNet/v4", - "st": "http://www.siemens.com/automation/Openness/SW/NetworkSource/StructuredText/v3", - "stl": "http://www.siemens.com/automation/Openness/SW/NetworkSource/StatementList/v4", -} +# Importar funciones comunes y namespaces desde el nuevo módulo de utils +try: + from parsers.parser_utils import ns, get_multilingual_text, parse_interface_members +except ImportError as e: + print( + f"Error crítico: No se pudieron importar funciones desde parsers.parser_utils: {e}" + ) + print( + "Asegúrate de que el directorio 'parsers' y 'parsers/parser_utils.py' existen y son correctos." + ) + sys.exit(1) +# --- NUEVAS FUNCIONES DE PARSEO para UDT y Tag Table --- -# --- (Funciones helper SIN CAMBIOS: get_multilingual_text, get_symbol_name, parse_access, parse_part, parse_call, reconstruct_scl_from_tokens, etc.) --- -# --- (Incluye aquí tus funciones helper sin cambios) --- -def get_multilingual_text(element, default_lang="en-US", fallback_lang="it-IT"): - # (Sin cambios respecto a la versión anterior) - if element is None: - return "" - try: - # Intenta buscar el idioma por defecto - xpath_expr_default = f".//iface:MultilingualTextItem[iface:AttributeList/iface:Culture='{default_lang}']/iface:AttributeList/iface:Text" - text_items_default = element.xpath(xpath_expr_default, namespaces=ns) - if text_items_default and text_items_default[0].text is not None: - return text_items_default[0].text.strip() - - # Intenta buscar el idioma de fallback - xpath_expr_fallback = f".//iface:MultilingualTextItem[iface:AttributeList/iface:Culture='{fallback_lang}']/iface:AttributeList/iface:Text" - text_items_fallback = element.xpath(xpath_expr_fallback, namespaces=ns) - if text_items_fallback and text_items_fallback[0].text is not None: - return text_items_fallback[0].text.strip() - - # Si no encuentra ninguno, toma el primer texto que encuentre - xpath_expr_any = ".//iface:MultilingualTextItem/iface:AttributeList/iface:Text" - text_items_any = element.xpath(xpath_expr_any, namespaces=ns) - if text_items_any and text_items_any[0].text is not None: - return text_items_any[0].text.strip() - - # Fallback si MultilingualText está vacío o tiene una estructura inesperada - return "" - except Exception as e: - print(f"Advertencia: Error extrayendo MultilingualText: {e}") - # traceback.print_exc() # Descomentar para más detalles del error - return "" - - -def get_symbol_name(symbol_element): - # Adaptado para usar namespace flg - if symbol_element is None: - return None - try: - # Asume que Component está dentro de Symbol y ambos están en el namespace flg - components = symbol_element.xpath("./flg:Component/@Name", namespaces=ns) - # Formatear correctamente con comillas dobles - return ".".join(f'"{c}"' for c in components) if components else None - except Exception as e: - print(f"Advertencia: Excepción en get_symbol_name: {e}") - return None - - -def parse_access(access_element): - # Adaptado para usar namespace flg - if access_element is None: - return None - uid = access_element.get("UId") - scope = access_element.get("Scope") - info = {"uid": uid, "scope": scope, "type": "unknown"} - - # Buscar Symbol o Constant usando el namespace flg - symbol = access_element.xpath("./flg:Symbol", namespaces=ns) - constant = access_element.xpath("./flg:Constant", namespaces=ns) - - if symbol: - info["type"] = "variable" - # Llamar a get_symbol_name que ahora espera flg:Symbol - info["name"] = get_symbol_name(symbol[0]) - if info["name"] is None: - info["type"] = "error_parsing_symbol" - print(f"Error: No se pudo parsear nombre símbolo Access UID={uid}") - return info - elif constant: - info["type"] = "constant" - # Buscar ConstantType y ConstantValue usando el namespace flg - const_type_elem = constant[0].xpath("./flg:ConstantType", namespaces=ns) - const_val_elem = constant[0].xpath("./flg:ConstantValue", namespaces=ns) - - # Extraer texto - info["datatype"] = ( - const_type_elem[0].text.strip() - if const_type_elem and const_type_elem[0].text is not None - else "Unknown" - ) - value_str = ( - const_val_elem[0].text.strip() - if const_val_elem and const_val_elem[0].text is not None - else None - ) - - if value_str is None: - info["type"] = "error_parsing_constant" - info["value"] = None - print(f"Error: Constante sin valor Access UID={uid}") - return info - - # Inferir tipo si es Unknown (igual que antes) - if info["datatype"] == "Unknown": - val_lower = value_str.lower() - if val_lower in ["true", "false"]: - info["datatype"] = "Bool" - elif value_str.isdigit() or ( - value_str.startswith("-") and value_str[1:].isdigit() - ): - info["datatype"] = "Int" - elif "." in value_str: - try: - float(value_str) - info["datatype"] = "Real" - except ValueError: - pass - elif "#" in value_str: - info["datatype"] = "TypedConstant" - - info["value"] = value_str # Guardar valor original - # Intentar conversión numérica/booleana (igual que antes) - dtype_lower = info["datatype"].lower() - val_str_processed = value_str.split("#")[-1] if "#" in value_str else value_str - try: - if dtype_lower in [ - "int", - "dint", - "udint", - "sint", - "usint", - "lint", - "ulint", - "word", - "dword", - "lword", - "byte", - ]: - info["value"] = int(val_str_processed) - elif dtype_lower == "bool": - info["value"] = ( - val_str_processed.lower() == "true" or val_str_processed == "1" - ) - elif dtype_lower in ["real", "lreal"]: - info["value"] = float(val_str_processed) - # Mantener string para TypedConstant y otros - except (ValueError, TypeError) as e: - print( - f"Advertencia: No se pudo convertir valor '{val_str_processed}' a {dtype_lower} UID={uid}. Error: {e}" - ) - info["value"] = value_str # Mantener string original - - else: - info["type"] = "unknown_structure" - print(f"Advertencia: Access UID={uid} no es Symbol ni Constant.") - return info - - if info["type"] == "variable" and info.get("name") is None: - print(f"Error Interno: parse_access var sin nombre UID {uid}.") - info["type"] = "error_no_name" - return info - return info - - -def parse_part(part_element): - # Asume que Part está en namespace flg - if part_element is None: - return None - uid = part_element.get("UId") - name = part_element.get("Name") - if not uid or not name: - print( - f"Error: Part sin UID o Name: {etree.tostring(part_element, encoding='unicode')}" - ) - return None - - template_values = {} - try: - # TemplateValue parece NO tener namespace flg - for tv in part_element.xpath("./TemplateValue"): - tv_name = tv.get("Name") - tv_type = tv.get("Type") - if tv_name and tv_type: - template_values[tv_name] = tv_type - except Exception as e: - print(f"Advertencia: Error extrayendo TemplateValues Part UID={uid}: {e}") - - negated_pins = {} - try: - # Negated parece NO tener namespace flg - for negated_elem in part_element.xpath("./Negated"): - negated_pin_name = negated_elem.get("Name") - if negated_pin_name: - negated_pins[negated_pin_name] = True - except Exception as e: - print(f"Advertencia: Error extrayendo Negated Pins Part UID={uid}: {e}") - - return { - "uid": uid, - "type": name, - "template_values": template_values, - "negated_pins": negated_pins, +def parse_udt(udt_element): + """Parsea un elemento (UDT).""" + print(" -> Detectado: PlcStruct (UDT)") + block_data = { + "block_name": "UnknownUDT", + "block_type": "PlcUDT", # Identificador para x3 + "language": "UDT", # Lenguaje específico + "interface": {}, + "networks": [], # Los UDTs no tienen redes + "block_comment": "", } + # Extraer nombre y comentario del UDT (similar a como se hace con bloques) + attribute_list_node = udt_element.xpath("./AttributeList") + if attribute_list_node: + attr_list = attribute_list_node[0] + name_node = attr_list.xpath("./Name/text()") + block_data["block_name"] = name_node[0].strip() if name_node else "UnknownUDT" + # Comentario del UDT + comment_node_list = udt_element.xpath("./ObjectList/MultilingualText[@CompositionName='Comment']") + if comment_node_list: + block_data["block_comment"] = get_multilingual_text(comment_node_list[0]) + else: # Fallback + comment_attr_node = attr_list.xpath("../ObjectList/MultilingualText[@CompositionName='Comment']") # Buscar desde el padre + if comment_attr_node : + block_data["block_comment"] = get_multilingual_text(comment_attr_node[0]) -def parse_call(call_element): - # Asume que Call está en namespace flg - if call_element is None: - return None - uid = call_element.get("UId") - if not uid: - print( - f"Error: Call encontrado sin UID: {etree.tostring(call_element, encoding='unicode')}" - ) - return None - # << CORRECCIÓN: CallInfo y sus hijos están en el namespace por defecto (flg) >> - call_info_elem = call_element.xpath("./flg:CallInfo", namespaces=ns) - if not call_info_elem: - print(f"Error: Call UID {uid} sin elemento flg:CallInfo.") - # Intentar sin namespace como fallback por si acaso - call_info_elem_no_ns = call_element.xpath("./CallInfo") - if not call_info_elem_no_ns: - print( - f"Error: Call UID {uid} sin elemento CallInfo (probado sin NS tambien)." - ) - return None + # Extraer interfaz (miembros) + # La interfaz de un UDT suele estar directamente en
+ interface_node_list = udt_element.xpath( + "./AttributeList/Interface/iface:Sections/iface:Section[@Name='None']", namespaces=ns + ) + if interface_node_list: + section_node = interface_node_list[0] + members_in_section = section_node.xpath("./iface:Member", namespaces=ns) + if members_in_section: + # Usar la función existente para parsear miembros + block_data["interface"]["None"] = parse_interface_members(members_in_section) else: - # Si se encontró sin NS, usar ese (menos probable pero posible) - print(f"Advertencia: Call UID {uid} encontró CallInfo SIN namespace.") - call_info = call_info_elem_no_ns[0] - + print(f"Advertencia: Sección 'None' encontrada en UDT '{block_data['block_name']}' pero sin miembros.") else: - call_info = call_info_elem[0] # Usar el encontrado con namespace + # Intentar buscar interfaz directamente si no está en AttributeList (menos común) + interface_node_direct = udt_element.xpath( + ".//iface:Interface/iface:Sections/iface:Section[@Name='None']", namespaces=ns + ) + if interface_node_direct: + section_node = interface_node_direct[0] + members_in_section = section_node.xpath("./iface:Member", namespaces=ns) + if members_in_section: + block_data["interface"]["None"] = parse_interface_members(members_in_section) + else: + print(f"Advertencia: Sección 'None' encontrada directamente en UDT '{block_data['block_name']}' pero sin miembros.") + else: + print(f"Advertencia: No se encontró la sección 'None' de la interfaz para UDT '{block_data['block_name']}'.") - block_name = call_info.get("Name") - block_type = call_info.get("BlockType") - if not block_name or not block_type: - print(f"Error: CallInfo para UID {uid} sin Name o BlockType.") - return None - instance_name = None - instance_scope = None - # Buscar Instance y Component (que también deberían estar en namespace flg) - if block_type == "FB": - instance_elem_list = call_info.xpath("./flg:Instance", namespaces=ns) - if instance_elem_list: - instance_elem = instance_elem_list[0] - instance_scope = instance_elem.get("Scope") - # Buscar Component dentro de Instance - component_elem_list = instance_elem.xpath("./flg:Component", namespaces=ns) - if component_elem_list: - component_elem = component_elem_list[0] - db_name_raw = component_elem.get("Name") - if db_name_raw: - instance_name = f'"{db_name_raw}"' # Añadir comillas + if not block_data["interface"]: + print(f"Advertencia: No se pudo extraer la interfaz del UDT '{block_data['block_name']}'.") + + return block_data + +def parse_tag_table(tag_table_element): + """Parsea un elemento .""" + print(" -> Detectado: PlcTagTable") + table_data = { + "block_name": "UnknownTagTable", + "block_type": "PlcTagTable", # Identificador para x3 + "language": "TagTable", # Lenguaje específico + "tags": [], + "networks": [], # Las Tag Tables no tienen redes + "block_comment": "", # Las tablas de tags no suelen tener comentario de bloque + } + + # Extraer nombre de la tabla + attribute_list_node = tag_table_element.xpath("./AttributeList") + if attribute_list_node: + name_node = attribute_list_node[0].xpath("./Name/text()") + table_data["block_name"] = name_node[0].strip() if name_node else "UnknownTagTable" + + # Extraer tags + tag_elements = tag_table_element.xpath("./ObjectList/SW.Tags.PlcTag") + print(f" - Encontrados {len(tag_elements)} tags.") + for tag_elem in tag_elements: + tag_info = { + "name": "UnknownTag", + "datatype": "Unknown", + "address": None, + "comment": "" + } + tag_attr_list = tag_elem.xpath("./AttributeList") + if tag_attr_list: + attr_list = tag_attr_list[0] + name_node = attr_list.xpath("./Name/text()") + tag_info["name"] = name_node[0].strip() if name_node else "UnknownTag" + dtype_node = attr_list.xpath("./DataTypeName/text()") + tag_info["datatype"] = dtype_node[0].strip() if dtype_node else "Unknown" + addr_node = attr_list.xpath("./LogicalAddress/text()") + tag_info["address"] = addr_node[0].strip() if addr_node else None + + # Extraer comentario del tag + comment_node_list = tag_elem.xpath("./ObjectList/MultilingualText[@CompositionName='Comment']") + if comment_node_list: + tag_info["comment"] = get_multilingual_text(comment_node_list[0]) + + table_data["tags"].append(tag_info) + + return table_data + +# --- Cargador Dinámico de Parsers (sin cambios) --- +def load_parsers(parsers_dir="parsers"): + """ + Escanea el directorio de parsers, importa módulos y construye + un mapa de lenguaje a función de parseo. + """ + parser_map = {} + # Verificar si el directorio existe + script_dir = os.path.dirname(__file__) + parsers_dir_path = os.path.join(script_dir, parsers_dir) + if not os.path.isdir(parsers_dir_path): + print(f"Error: Directorio de parsers no encontrado: '{parsers_dir_path}'") + return parser_map # Devuelve mapa vacío + + print(f"Cargando parsers desde: '{parsers_dir_path}'") + parsers_package = os.path.basename(parsers_dir) + + for filename in os.listdir(parsers_dir_path): + # Buscar archivos que empiecen con 'parse_' y terminen en '.py' + # Excluir '__init__.py' y 'parser_utils.py' + if ( + filename.startswith("parse_") + and filename.endswith(".py") + and filename not in ["__init__.py", "parser_utils.py"] + ): + module_name_rel = filename[:-3] # Nombre sin .py (e.g., parse_lad_fbd) + full_module_name = f"{parsers_package}.{module_name_rel}" # e.g., parsers.parse_lad_fbd + try: + # Importar el módulo dinámicamente + module = importlib.import_module(full_module_name) + + # Verificar si el módulo tiene la función get_parser_info + if hasattr(module, "get_parser_info") and callable( + module.get_parser_info + ): + parser_info = module.get_parser_info() + # Esperamos un diccionario con 'language' (lista) y 'parser_func' + if ( + isinstance(parser_info, dict) + and "language" in parser_info + and "parser_func" in parser_info + ): + languages = parser_info["language"] + parser_func = parser_info["parser_func"] + + if isinstance(languages, list) and callable(parser_func): + # Añadir la función al mapa para cada lenguaje que soporta + for lang in languages: + lang_upper = lang.upper() # Usar mayúsculas como clave + if lang_upper in parser_map: + print( + f" Advertencia: Parser para '{lang_upper}' en {full_module_name} sobrescribe definición anterior." + ) + parser_map[lang_upper] = parser_func + print( + f" - Cargado parser para '{lang_upper}' desde {module_name_rel}.py" + ) + else: + print( + f" Advertencia: Formato inválido en get_parser_info de {full_module_name} (language debe ser lista, parser_func callable)." + ) + else: + print( + f" Advertencia: get_parser_info en {full_module_name} no devolvió el diccionario esperado." + ) else: print( - f"Advertencia: en FB Call UID {uid} sin 'Name'." - ) - else: - print( - f"Advertencia: No se encontró en FB Call UID {uid}." - ) - else: - print(f"Advertencia: FB Call '{block_name}' UID {uid} sin .") - - call_data = { - "uid": uid, - "type": "Call", - "block_name": block_name, - "block_type": block_type, - } - if instance_name: - call_data["instance_db"] = instance_name - if instance_scope: - call_data["instance_scope"] = instance_scope - return call_data - - -def reconstruct_scl_from_tokens(st_node): - """ - Reconstruye SCL desde , mejorando el manejo de - variables, constantes literales, tokens básicos, espacios y saltos de línea. - """ - if st_node is None: - return "// Error: StructuredText node not found.\n" - - scl_parts = [] - children = st_node.xpath("./st:*", namespaces=ns) - - for elem in children: - tag = etree.QName(elem.tag).localname - - if tag == "Token": - scl_parts.append(elem.get("Text", "")) - elif tag == "Blank": - if not scl_parts or not scl_parts[-1].endswith(" "): - scl_parts.append(" " * int(elem.get("Num", 1))) - elif int(elem.get("Num", 1)) > 1: - scl_parts.append(" " * (int(elem.get("Num", 1)) - 1)) - elif tag == "NewLine": - if scl_parts: - scl_parts[-1] = scl_parts[-1].rstrip() - scl_parts.append("\n") - elif tag == "Access": - scope = elem.get("Scope") - access_str = f"/*_ERR_Scope_{scope}_*/" - - if scope in [ - "GlobalVariable", - "LocalVariable", - "TempVariable", - "InOutVariable", - "InputVariable", - "OutputVariable", - "ConstantVariable", - ]: - symbol_elem = elem.xpath("./st:Symbol", namespaces=ns) - if symbol_elem: - components = symbol_elem[0].xpath("./st:Component", namespaces=ns) - symbol_text_parts = [] - for i, comp in enumerate(components): - name = comp.get("Name", "_ERR_COMP_") - if i > 0: - symbol_text_parts.append(".") - - # Check for HasQuotes attribute (adjust namespace if needed) - has_quotes_elem = comp.xpath( - "ancestor::st:Access/st:BooleanAttribute[@Name='HasQuotes']/text()", - namespaces=ns, - ) # Check attribute on Access parent - # print(f"DEBUG HasQuotes check for {name}: {has_quotes_elem}") # Debug - has_quotes = ( - has_quotes_elem and has_quotes_elem[0].lower() == "true" - ) - - is_temp = name.startswith("#") - - # Apply quotes based on HasQuotes or if it's the first component and not temp - if has_quotes or ( - i == 0 and not is_temp and '"' not in name - ): # Avoid double quotes - symbol_text_parts.append(f'"{name}"') - else: - symbol_text_parts.append(name) - - index_access = comp.xpath("./st:Access", namespaces=ns) - if index_access: - indices_text = [ - reconstruct_scl_from_tokens(idx_node) - for idx_node in index_access - ] - symbol_text_parts.append(f"[{','.join(indices_text)}]") - - access_str = "".join(symbol_text_parts) - - elif scope == "LiteralConstant": - constant_elem = elem.xpath("./st:Constant", namespaces=ns) - if constant_elem: - val_elem = constant_elem[0].xpath( - "./st:ConstantValue/text()", namespaces=ns - ) - type_elem = constant_elem[0].xpath( - "./st:ConstantType/text()", namespaces=ns - ) - const_type = ( - type_elem[0].strip() - if type_elem and type_elem[0] is not None - else "" - ) - const_val = ( - val_elem[0].strip() - if val_elem and val_elem[0] is not None - else "_ERR_CONSTVAL_" + f" Advertencia: Módulo {module_name_rel}.py no tiene la función 'get_parser_info'." ) - # Format based on type - if const_type.lower() == "bool": - access_str = const_val.upper() - elif const_type.lower() == "string": - replaced_val = const_val.replace("'", "''") - access_str = f"'{replaced_val}'" - elif const_type.lower() == "char": - replaced_val = const_val.replace("'", "''") - access_str = f"'{replaced_val}'" - elif const_type.lower() == "time": - access_str = f"T#{const_val}" - elif const_type.lower() == "ltime": - access_str = f"LT#{const_val}" - elif const_type.lower() == "s5time": - access_str = f"S5T#{const_val}" - elif const_type.lower() == "date": - access_str = f"D#{const_val}" - elif const_type.lower() == "dtl": - access_str = f"DTL#{const_val}" - elif const_type.lower() == "dt": - access_str = f"DT#{const_val}" - elif const_type.lower() == "tod": - access_str = f"TOD#{const_val}" - else: - access_str = const_val # For Int, Real, etc. - - else: - access_str = "/*_ERR_NOCONST_*/" - # Add more scope handling if needed - - scl_parts.append(access_str) - - elif tag == "Comment" or tag == "LineComment": - # Corrected comment extraction using get_multilingual_text - comment_text = get_multilingual_text( - elem - ) # Pass the or element itself - if tag == "Comment": - scl_parts.append(f"(* {comment_text} *)") - else: - scl_parts.append(f"// {comment_text}") - - full_scl = "".join(scl_parts) - - # Re-indentation (simple approach) - output_lines = [] - indent_level = 0 - indent_str = " " # Two spaces per indent level - for line in full_scl.split("\n"): - trimmed_line = line.strip() - if not trimmed_line: - # output_lines.append("") # Keep empty lines for spacing? Optional. - continue - - # Adjust indent before processing line - if trimmed_line.startswith(("END_", "UNTIL", "ELSE", "ELSIF")): - indent_level = max(0, indent_level - 1) - - output_lines.append(indent_str * indent_level + trimmed_line) - - # Adjust indent after processing line - if ( - trimmed_line.endswith(("THEN", "DO", "OF")) - or trimmed_line == "ELSE" - or trimmed_line.startswith("FOR") - or trimmed_line.startswith("WHILE") - or trimmed_line.startswith("CASE") - or trimmed_line.startswith("REPEAT") - ): - indent_level += 1 - # Handle BEGIN for block structures if necessary (more complex) - - return "\n".join(output_lines) - - -# STL Parser (using namespace stl) -def get_access_text(access_element): - """Reconstruye una representación textual simple de un Access en STL.""" - if access_element is None: - return "_ERR_ACCESS_" - scope = access_element.get("Scope") - - symbol_elem = access_element.xpath("./stl:Symbol", namespaces=ns) - if symbol_elem: - components = symbol_elem[0].xpath("./stl:Component", namespaces=ns) - parts = [] - for i, comp in enumerate(components): - name = comp.get("Name", "_ERR_COMP_") - # Check for HasQuotes attribute (usually on Access, check parent?) - has_quotes_elem = comp.xpath( - "ancestor::stl:Access/stl:BooleanAttribute[@Name='HasQuotes']/text()", - namespaces=ns, - ) - has_quotes = has_quotes_elem and has_quotes_elem[0].lower() == "true" - is_temp = name.startswith("#") - - if i > 0: - parts.append(".") # Add dot separator - - if has_quotes or (i == 0 and not is_temp and '"' not in name): - parts.append(f'"{name}"') - else: - parts.append(name) - - index_access = comp.xpath("./stl:Access", namespaces=ns) - if index_access: - indices = [get_access_text(ia) for ia in index_access] - parts.append(f"[{','.join(indices)}]") - return "".join(parts) - - constant_elem = access_element.xpath("./stl:Constant", namespaces=ns) - if constant_elem: - val_elem = constant_elem[0].xpath("./stl:ConstantValue/text()", namespaces=ns) - type_elem = constant_elem[0].xpath("./stl:ConstantType/text()", namespaces=ns) - const_type = ( - type_elem[0].strip() if type_elem and type_elem[0] is not None else "" - ) - const_val = ( - val_elem[0].strip() - if val_elem and val_elem[0] is not None - else "_ERR_CONST_" - ) - - if const_type.lower() == "time": - return f"T#{const_val}" - if const_type.lower() == "s5time": - return f"S5T#{const_val}" - if const_type.lower() == "date": - return f"D#{const_val}" - if const_type.lower() == "dt": - return f"DT#{const_val}" # Added DT - # Add more type prefixes if needed (LTIME, TOD, DTL...) - return const_val - - label_elem = access_element.xpath("./stl:Label", namespaces=ns) - if label_elem: - return label_elem[0].get("Name", "_ERR_LABEL_") - - indirect_elem = access_element.xpath("./stl:Indirect", namespaces=ns) - if indirect_elem: - reg = indirect_elem[0].get("Register", "AR?") - offset_str = indirect_elem[0].get("BitOffset", "0") - area = indirect_elem[0].get("Area", "DB") - width = indirect_elem[0].get("Width", "X") - try: - bit_offset = int(offset_str) - byte_offset = bit_offset // 8 - bit_in_byte = bit_offset % 8 - p_format_offset = f"P#{byte_offset}.{bit_in_byte}" - except ValueError: - p_format_offset = "P#?.?" - width_map = { - "Bit": "X", - "Byte": "B", - "Word": "W", - "Double": "D", - "Long": "D", - } # Added Long->D - width_char = width_map.get(width, width[0] if width else "?") - return f"{area}{width_char}[{reg},{p_format_offset}]" - - address_elem = access_element.xpath("./stl:Address", namespaces=ns) - if address_elem: - area = address_elem[0].get("Area", "??") - bit_offset_str = address_elem[0].get("BitOffset", "0") - addr_type_str = address_elem[0].get("Type", "Bool") - try: - bit_offset = int(bit_offset_str) - byte_offset = bit_offset // 8 - bit_in_byte = bit_offset % 8 - addr_width = "X" # Default - if addr_type_str == "Byte": - addr_width = "B" - elif addr_type_str == "Word": - addr_width = "W" - elif addr_type_str in ["DWord", "DInt", "Real", "Time", "DT"]: - addr_width = "D" # Added types - elif addr_type_str in ["LReal", "LTime", "LWord", "LInt", "ULInt"]: - addr_width = "L" # Handle 64-bit? Assume L? Needs check. - area_map = { - "Input": "I", - "Output": "Q", - "Memory": "M", - "PeripheryInput": "PI", - "PeripheryOutput": "PQ", - "DB": "DB", - "DI": "DI", - "Local": "L", - "Timer": "T", - "Counter": "C", - } - stl_area = area_map.get(area, area) - - if stl_area in ["DB", "DI"]: - block_num = address_elem[0].get("BlockNumber") - if block_num: - return f"{stl_area}{block_num}.{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" - else: - return f"{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" # Register access DBX, DIX etc. - elif stl_area in ["T", "C"]: - return f"{stl_area}{byte_offset}" # T 5, C 10 - else: - return ( - f"{stl_area}{addr_width}{byte_offset}.{bit_in_byte}" # M10.1, I0.0 - ) - - except ValueError: - return f"{area}?{bit_offset_str}?" - - call_info_elem = access_element.xpath("./stl:CallInfo", namespaces=ns) - if call_info_elem: # Handle Call as operand (e.g., CALL FC10) - name = call_info_elem[0].get("Name", "_ERR_CALL_") - btype = call_info_elem[0].get("BlockType", "FC") - instance_node = call_info_elem[0].xpath( - "./stl:Instance/stl:Component/@Name", namespaces=ns - ) - if btype == "FB" and instance_node: - return f'"{instance_node[0]}"' # Return DB name for FB call operand - else: - return f'{btype} "{name}"' # Return FC "Name" or similar - - return f"_{scope}_?" # Fallback - - -def get_comment_text_stl(comment_element): - """Extrae texto de un LineComment o Comment para STL.""" - if comment_element is None: - return "" - # STL Comments are directly under the element, not usually Multilingual - text_nodes = comment_element.xpath("./stl:Text/text()", namespaces=ns) - if text_nodes: - return text_nodes[0].strip() - # Fallback if structure is different - # return "".join(comment_element.xpath(".//text()")).strip() - return "" # Return empty if no found - - -def reconstruct_stl_from_statementlist(statement_list_node): - """Reconstruye el código STL como una cadena de texto desde .""" - if statement_list_node is None: - return "// Error: StatementList node not found.\n" - stl_lines = [] - statements = statement_list_node.xpath("./stl:StlStatement", namespaces=ns) - - for stmt in statements: - line_parts = [] - inline_comment = "" # Comments after code on the same line - - # 1. Initial Comments (full line //) - initial_comments = stmt.xpath( - "child::stl:Comment[not(@Inserted='true')] | child::stl:LineComment[not(@Inserted='true')]", - namespaces=ns, - ) - for comm in initial_comments: - comment_text = get_comment_text_stl(comm) - if comment_text: - for comment_line in comment_text.splitlines(): - stl_lines.append( - f"// {comment_line}" - ) # Add as separate comment lines - - # 2. Label - label_decl = stmt.xpath("./stl:LabelDeclaration", namespaces=ns) - label_str = "" - if label_decl: - label_name = label_decl[0].xpath("./stl:Label/@Name", namespaces=ns) - if label_name: - label_str = f"{label_name[0]}:" - # Get comments after label but before instruction - label_comments = label_decl[0].xpath( - "./stl:Comment[@Inserted='true'] | ./stl:LineComment[@Inserted='true']", - namespaces=ns, - ) - for lcomm in label_comments: - inline_comment += f" // {get_comment_text_stl(lcomm)}" - - if label_str: - line_parts.append(label_str) - - # 3. Instruction Token - instruction_token = stmt.xpath("./stl:StlToken", namespaces=ns) - instruction_str = "" - if instruction_token: - token_text = instruction_token[0].get("Text", "_ERR_TOKEN_") - # Check if it's an empty line marker - if token_text == "EMPTY_LINE": - stl_lines.append("") # Add an empty line - continue # Skip rest of processing for this statement - elif token_text == "COMMENT": # Handle full-line comment marker if needed - pass # Already handled by initial comments? Check XML example. - else: - instruction_str = token_text - - # Comments directly associated with the token - token_comments = instruction_token[0].xpath( - "./stl:Comment[@Inserted='true'] | ./stl:LineComment[@Inserted='true']", - namespaces=ns, - ) - for tcomm in token_comments: - inline_comment += f" // {get_comment_text_stl(tcomm)}" - - if instruction_str: - # Add tab if label exists - line_parts.append("\t" + instruction_str if label_str else instruction_str) - - # 4. Access/Operand - access_elem = stmt.xpath("./stl:Access", namespaces=ns) - access_str = "" - if access_elem: - access_text = get_access_text(access_elem[0]) - access_str = access_text - # Comments inside Access (can be block or line) - access_comments = access_elem[0].xpath( - "child::stl:Comment[@Inserted='true'] | child::stl:LineComment[@Inserted='true']", - namespaces=ns, - ) - for acc_comm in access_comments: - inline_comment += f" // {get_comment_text_stl(acc_comm)}" - - if access_str: - line_parts.append(access_str) - - # Build the line - current_line = " ".join(line_parts) # Join parts with space - if inline_comment: - current_line += f"\t{inline_comment.strip()}" # Add comment with tab - - if current_line.strip(): - stl_lines.append(current_line.rstrip()) - - return "\n".join(stl_lines) - - -# DB Parser (using namespace iface) -def parse_interface_members(member_elements): - """ - Parsea recursivamente una lista de elementos de una interfaz o estructura. - Maneja miembros simples, structs anidados y arrays con valores iniciales. - """ - members_data = [] - if not member_elements: - return members_data - - for member in member_elements: - member_name = member.get("Name") - member_dtype = member.get("Datatype") - member_remanence = member.get("Remanence", "NonRetain") - member_accessibility = member.get("Accessibility", "Public") - - if not member_name or not member_dtype: - print( - "Advertencia: Miembro sin nombre o tipo de dato encontrado. Saltando." - ) - continue - - member_info = { - "name": member_name, - "datatype": member_dtype, - "remanence": member_remanence, - "accessibility": member_accessibility, - "start_value": None, - "comment": None, - "children": [], - "array_elements": {}, - } - - comment_node = member.xpath("./iface:Comment", namespaces=ns) - if comment_node: - member_info["comment"] = get_multilingual_text(comment_node[0]) - - start_value_node = member.xpath("./iface:StartValue", namespaces=ns) - if start_value_node: - constant_name = start_value_node[0].get("ConstantName") - member_info["start_value"] = ( - constant_name - if constant_name - else ( - start_value_node[0].text - if start_value_node[0].text is not None - else "" - ) - ) - - # --- Structs Anidados --- - nested_sections = member.xpath( - "./iface:Sections/iface:Section/iface:Member", namespaces=ns - ) - if nested_sections: - member_info["children"] = parse_interface_members(nested_sections) - - # --- Arrays --- - if isinstance(member_dtype, str) and member_dtype.lower().startswith("array["): - subelements = member.xpath("./iface:Subelement", namespaces=ns) - for sub in subelements: - path = sub.get("Path") # Path is usually the index '0', '1', ... - sub_start_value_node = sub.xpath("./iface:StartValue", namespaces=ns) - if path and sub_start_value_node: - constant_name = sub_start_value_node[0].get("ConstantName") - value = ( - constant_name - if constant_name - else ( - sub_start_value_node[0].text - if sub_start_value_node[0].text is not None - else "" - ) - ) - member_info["array_elements"][path] = value - # Optionally parse subelement comments if needed - - members_data.append(member_info) - return members_data - - -# --- Main Network Parsing Function --- -def parse_network(network_element): - """ - Parsea una red LAD/FBD, extrae lógica y añade conexiones EN implícitas. - Devuelve None o un diccionario con 'error' si falla FlgNet. - """ - if network_element is None: - return { - "id": "ERROR", - "title": "Invalid Network Element", - "logic": [], - "error": "Input element was None", - } - - network_id = network_element.get("ID") - title_element = network_element.xpath( - ".//iface:MultilingualText[@CompositionName='Title']", namespaces=ns - ) - network_title = ( - get_multilingual_text(title_element[0]) - if title_element - else f"Network {network_id}" - ) - comment_element = network_element.xpath( - "./ObjectList/MultilingualText[@CompositionName='Comment']", namespaces=ns - ) # Corrected path? - network_comment = ( - get_multilingual_text(comment_element[0]) if comment_element else "" - ) - - # Buscar NetworkSource y luego FlgNet (ambos usan namespace flg) - network_source_node = network_element.xpath(".//flg:NetworkSource", namespaces=ns) - if not network_source_node: - # Try finding FlgNet directly under CompileUnit if NetworkSource is missing (less common) - flgnet_list = network_element.xpath(".//flg:FlgNet", namespaces=ns) - if not flgnet_list: - return { - "id": network_id, - "title": network_title, - "comment": network_comment, - "logic": [], - "error": "NetworkSource/FlgNet not found", - } - else: - flgnet = flgnet_list[0] - else: - flgnet_list = network_source_node[0].xpath("./flg:FlgNet", namespaces=ns) - if not flgnet_list: - return { - "id": network_id, - "title": network_title, - "comment": network_comment, - "logic": [], - "error": "FlgNet not found inside NetworkSource", - } - else: - flgnet = flgnet_list[0] - - # 1. Parse Access, Parts, Calls (use namespace flg) - access_map = { - acc_info["uid"]: acc_info - for acc in flgnet.xpath(".//flg:Access", namespaces=ns) - if (acc_info := parse_access(acc)) and acc_info["type"] != "unknown" - } - parts_and_calls_map = {} - instruction_elements = flgnet.xpath(".//flg:Part | .//flg:Call", namespaces=ns) - for element in instruction_elements: - parsed_info = None - tag_name = etree.QName(element.tag).localname - if tag_name == "Part": - parsed_info = parse_part(element) - elif tag_name == "Call": - parsed_info = parse_call(element) # parse_call ahora busca flg:CallInfo - - if parsed_info and "uid" in parsed_info: - # Verifica si parse_call tuvo éxito (si no, devuelve None) - if tag_name == "Call" and parsed_info is None: - print( - f"Advertencia: Falló el parseo de Call UID={element.get('UId')}. Ignorando." - ) - continue # Saltar esta instrucción si parse_call falló - parts_and_calls_map[parsed_info["uid"]] = parsed_info - elif tag_name == "Call" and parsed_info is None: - # Si parse_call devolvió None directamente - print( - f"Advertencia: Part/Call inválido ignorado en red {network_id} (UID={element.get('UId')})" - ) - - # 2. Parse Wires (use namespace flg) - wire_connections = defaultdict(list) - source_connections = defaultdict(list) - eno_outputs = defaultdict(list) - qname_powerrail = etree.QName(ns["flg"], "Powerrail") - qname_identcon = etree.QName(ns["flg"], "IdentCon") - qname_namecon = etree.QName(ns["flg"], "NameCon") - for wire in flgnet.xpath(".//flg:Wire", namespaces=ns): - children = wire.getchildren() - if len(children) < 2: - continue - source_elem = children[0] - source_uid, source_pin = None, None - if source_elem.tag == qname_powerrail: - source_uid, source_pin = "POWERRAIL", "out" - elif source_elem.tag == qname_identcon: - source_uid, source_pin = ( - source_elem.get("UId"), - "value", - ) # IdentCon usually represents an Access node output - elif source_elem.tag == qname_namecon: - source_uid, source_pin = source_elem.get("UId"), source_elem.get("Name") - if source_uid is None: - continue - - source_info = (source_uid, source_pin) - for dest_elem in children[1:]: - dest_uid, dest_pin = None, None - # Destination can also be IdentCon (Access node input) or NameCon (Instruction pin input) - if dest_elem.tag == qname_identcon: - dest_uid, dest_pin = ( - dest_elem.get("UId"), - "value", - ) # Input to an Access node? Unlikely. Usually NameCon. Let's assume NameCon primarily for destination. - elif dest_elem.tag == qname_namecon: - dest_uid, dest_pin = dest_elem.get("UId"), dest_elem.get("Name") - - if dest_uid is not None and dest_pin is not None: - dest_key = (dest_uid, dest_pin) - # Check if dest_uid is an instruction or an access node - # This logic seems okay, maps source to destination key - if source_info not in wire_connections[dest_key]: - wire_connections[dest_key].append(source_info) - - # Build reverse map: source -> list of destinations - source_key = (source_uid, source_pin) - dest_info = (dest_uid, dest_pin) - if dest_info not in source_connections[source_key]: - source_connections[source_key].append(dest_info) - - # Track ENO outputs specifically - if source_pin == "eno" and source_uid in parts_and_calls_map: - if dest_info not in eno_outputs[source_uid]: - eno_outputs[source_uid].append(dest_info) - - # 3. Build Initial Logic Structure - all_logic_steps = {} - SCL_SUFFIX = "_sympy_processed" # Define suffix - functional_block_types = [ - "Move", - "Add", - "Sub", - "Mul", - "Div", - "Mod", - "Convert", - "Call", - "Se", - "Sd", - "BLKMOV", - "TON", - "TOF", - "TP", - "CTU", - "CTD", - "CTUD", - ] - rlo_generators = [ - "Contact", - "O", - "Eq", - "Ne", - "Gt", - "Lt", - "Ge", - "Le", - "And", - "Xor", - "PBox", - "NBox", - "Not", - ] - - # --- CORRECCIÓN: Iterar sobre los UIDs que SÍ están en parts_and_calls_map --- - # --- Esto evita procesar UIDs de Calls que fallaron en parse_call --- - valid_instruction_uids = list(parts_and_calls_map.keys()) - - for instruction_uid in valid_instruction_uids: - instruction_info = parts_and_calls_map[instruction_uid] - # Make a deep copy to avoid modifying the original map entry - instruction_repr = copy.deepcopy(instruction_info) - instruction_repr["instruction_uid"] = instruction_uid # Ensure UID is present - instruction_repr["inputs"] = {} - instruction_repr["outputs"] = {} - - original_type = instruction_repr["type"] # Type from parse_part/parse_call - current_type = original_type - input_pin_mapping = {} - output_pin_mapping = {} - - # Base set of possible pins - can be expanded - possible_input_pins = set(["en", "in", "in1", "in2", "pre"]) - - # Dynamically add pins based on instruction type (simplified list) - if original_type in ["Contact", "Coil", "SCoil", "RCoil"]: - possible_input_pins.add("operand") - elif original_type in [ - "Add", - "Sub", - "Mul", - "Div", - "Mod", - "Eq", - "Ne", - "Gt", - "Lt", - "Ge", - "Le", - ]: - possible_input_pins.update(["in1", "in2"]) - elif original_type in ["TON", "TOF", "TP", "Se", "Sd", "SdCoil"]: - possible_input_pins.update( - ["s", "tv", "r", "timer", "pt", "value", "operand"] - ) - elif original_type in ["CTU", "CTD", "CTUD"]: - possible_input_pins.update(["cu", "cd", "r", "ld", "pv", "counter"]) - elif original_type in ["PBox", "NBox"]: - possible_input_pins.update(["bit", "clk"]) - elif original_type == "BLKMOV": - possible_input_pins.add("SRCBLK") - - # Special Handling for Call Parameters - elif original_type == "Call": - # Find the original XML element for this call using the correct namespace - call_xml_element_list = flgnet.xpath( - f".//flg:Call[@UId='{instruction_uid}']", namespaces=ns - ) - if call_xml_element_list: - call_xml_element = call_xml_element_list[0] - # --- USAR flg:CallInfo y flg:Parameter --- - call_info_node_list = call_xml_element.xpath( - "./flg:CallInfo", namespaces=ns - ) - if call_info_node_list: - call_info_node = call_info_node_list[0] - call_param_names = call_info_node.xpath( - "./flg:Parameter/@Name", namespaces=ns - ) - if call_param_names: - possible_input_pins.update(call_param_names) - # print(f"DEBUG Call UID={instruction_uid}: Found params: {call_param_names}. Possible pins now: {possible_input_pins}") - else: - print( - f"Advertencia: Call UID={instruction_uid}: No tags found under ." - ) - else: - # Try without namespace as fallback? Unlikely needed if flg is default - call_info_node_list_no_ns = call_xml_element.xpath("./CallInfo") - if call_info_node_list_no_ns: - print( - f"Advertencia: Call UID={instruction_uid}: Found WITHOUT namespace." - ) - call_param_names_no_ns = call_info_node_list_no_ns[0].xpath( - "./Parameter/@Name" - ) - if call_param_names_no_ns: - possible_input_pins.update(call_param_names_no_ns) - print( - f"DEBUG Call UID={instruction_uid} (no NS): Found params: {call_param_names_no_ns}. Possible pins now: {possible_input_pins}" - ) - else: - print( - f"Advertencia: Call UID={instruction_uid}: No tags found under (no NS)." - ) - - else: - print( - f"Error: Call UID={instruction_uid}: No (or CallInfo) element found." - ) - else: - print( - f"Error: No se pudo encontrar el elemento para UID={instruction_uid} en el XPath." - ) - - # Populate Inputs from Wire Connections - for pin_name in possible_input_pins: - dest_key = (instruction_uid, pin_name) - if dest_key in wire_connections: - sources_list = wire_connections[dest_key] - input_sources_repr = [] - # print(f"DEBUG Wire Input: Instr={instruction_uid}, Pin={pin_name}, Sources={sources_list}") # Debug - for source_uid, source_pin in sources_list: - if source_uid == "POWERRAIL": - input_sources_repr.append({"type": "powerrail"}) - elif source_uid in access_map: - input_sources_repr.append(copy.deepcopy(access_map[source_uid])) - elif ( - source_uid in parts_and_calls_map - ): # Check if source is a valid instruction - source_instr_info = parts_and_calls_map[source_uid] - input_sources_repr.append( - { - "type": "connection", - "source_instruction_type": source_instr_info["type"], - "source_instruction_uid": source_uid, - "source_pin": source_pin, # Use the actual source pin name - } - ) - else: - # Source UID not found in instructions or access nodes - print( - f"Advertencia: Fuente desconocida UID={source_uid} conectada a {instruction_uid}.{pin_name}" - ) - input_sources_repr.append( - {"type": "unknown_source", "uid": source_uid} - ) - - # Apply input pin mapping if needed (e.g. SdCoil) - json_pin_name = input_pin_mapping.get(pin_name, pin_name) - instruction_repr["inputs"][json_pin_name] = ( - input_sources_repr[0] - if len(input_sources_repr) == 1 - else input_sources_repr - ) - # print(f"DEBUG Populated Input: Instr={instruction_uid}, Pin={json_pin_name}, Value={instruction_repr['inputs'][json_pin_name]}") # Debug - - # Populate Outputs (Simplified - just record direct variable assignments) - possible_output_pins = set( - [ - "out", - "out1", - "Q", - "q", - "eno", - "RET_VAL", - "DSTBLK", - "rt", - "cv", - "QU", - "QD", - "ET", - ] - ) - if original_type == "BLKMOV": - possible_output_pins.add("DSTBLK") - - for pin_name in possible_output_pins: - source_key = (instruction_uid, pin_name) - if source_key in source_connections: - json_pin_name = output_pin_mapping.get(pin_name, pin_name) - if json_pin_name not in instruction_repr["outputs"]: - instruction_repr["outputs"][json_pin_name] = [] - for dest_uid, dest_pin in source_connections[source_key]: - if ( - dest_uid in access_map - ): # Only track connections to variables/constants - dest_operand_copy = copy.deepcopy(access_map[dest_uid]) - if ( - dest_operand_copy - not in instruction_repr["outputs"][json_pin_name] - ): - instruction_repr["outputs"][json_pin_name].append( - dest_operand_copy - ) - - all_logic_steps[instruction_uid] = instruction_repr - - # 4. EN Inference (Simplified logic as before) - # --- (Esta sección puede permanecer igual, opera sobre all_logic_steps) --- - processed_blocks_en_inference = set() - try: - sorted_uids_for_en = sorted( - all_logic_steps.keys(), - key=lambda x: int(x) if x.isdigit() else float("inf"), - ) - except ValueError: - sorted_uids_for_en = sorted(all_logic_steps.keys()) # Fallback sort - - ordered_logic_list_for_en = [ - all_logic_steps[uid] for uid in sorted_uids_for_en if uid in all_logic_steps - ] - - for i, instruction in enumerate(ordered_logic_list_for_en): - part_uid = instruction["instruction_uid"] - # Leer el tipo actual de la instrucción ya parseada - part_type_original = ( - instruction.get("type", "").replace(SCL_SUFFIX, "").replace("_error", "") - ) - # La lógica de inferencia EN no cambia - if ( - part_type_original in functional_block_types - and "en" not in instruction["inputs"] - and part_uid not in processed_blocks_en_inference - ): - inferred_en_source = None - if i > 0: - # Look backwards - for j in range(i - 1, -1, -1): - prev_instr = ordered_logic_list_for_en[j] - prev_uid = prev_instr["instruction_uid"] - prev_type_original = ( - prev_instr.get("type", "") - .replace(SCL_SUFFIX, "") - .replace("_error", "") - ) - if prev_type_original in rlo_generators: # Found RLO source - inferred_en_source = { - "type": "connection", - "source_instruction_uid": prev_uid, - "source_instruction_type": prev_type_original, - "source_pin": "out", - } - break - elif ( - prev_type_original in functional_block_types - ): # Found block with potential ENO - if (prev_uid, "eno") in source_connections: - inferred_en_source = { - "type": "connection", - "source_instruction_uid": prev_uid, - "source_instruction_type": prev_type_original, - "source_pin": "eno", - } - break # Stop searching - elif prev_type_original in [ - "Coil", - "SCoil", - "RCoil", - "SetCoil", - "ResetCoil", - "SdCoil", - ]: - break # Coils terminate flow - - if inferred_en_source is None: - inferred_en_source = {"type": "powerrail"} - - # Update the instruction in the main dictionary - if part_uid in all_logic_steps: - all_logic_steps[part_uid]["inputs"]["en"] = inferred_en_source - processed_blocks_en_inference.add(part_uid) - - # 5. ENO Logic (Simplified as before) - # --- (Esta sección puede permanecer igual) --- - for source_instr_uid, eno_destinations in eno_outputs.items(): - if source_instr_uid not in all_logic_steps: - continue - all_logic_steps[source_instr_uid]["eno_destinations"] = eno_destinations - - # 6. Order and Return - # --- (Esta sección puede permanecer igual) --- - final_logic_list = [ - all_logic_steps[uid] for uid in sorted_uids_for_en if uid in all_logic_steps - ] - network_lang = "Unknown" - if network_element is not None: - attr_list_net = network_element.xpath("./AttributeList") - if attr_list_net: - lang_node_net = attr_list_net[0].xpath("./ProgrammingLanguage/text()") - if lang_node_net: - network_lang = lang_node_net[0].strip() - - return { - "id": network_id, - "title": network_title, - "comment": network_comment, - "language": network_lang, - "logic": final_logic_list, - } - - -# --- Main Conversion Function (convert_xml_to_json) --- -# --- (Mantén la función convert_xml_to_json como antes, --- -# --- asegurándote de que llama a la versión actualizada de parse_network) --- -def convert_xml_to_json(xml_filepath, json_filepath): + except ImportError as e: + print(f"Error importando {full_module_name}: {e}") + except Exception as e: + print(f"Error procesando {full_module_name}: {e}") + traceback.print_exc() + + print(f"\nTotal de lenguajes con parser cargado: {len(parser_map)}") + print(f"Lenguajes soportados: {list(parser_map.keys())}") + return parser_map + +# --- Función Principal de Conversión (MODIFICADA) --- +def convert_xml_to_json(xml_filepath, json_filepath, parser_map): + """Convierte XML a JSON, detectando tipo de bloque (FC/FB/OB/DB/UDT/TagTable).""" print(f"Iniciando conversión de '{xml_filepath}' a '{json_filepath}'...") if not os.path.exists(xml_filepath): print(f"Error Crítico: Archivo XML no encontrado: '{xml_filepath}'") - return + return False # Indicar fallo + try: print("Paso 1: Parseando archivo XML...") - parser = etree.XMLParser(remove_blank_text=True) + parser = etree.XMLParser(remove_blank_text=True, recover=True) # recover=True puede ayudar tree = etree.parse(xml_filepath, parser) root = tree.getroot() print("Paso 1: Parseo XML completado.") - # --- Buscar bloque principal (FC, FB, GlobalDB, OB) --- - print( - "Paso 2: Buscando el bloque SW.Blocks.FC, SW.Blocks.FB, SW.Blocks.GlobalDB o SW.Blocks.OB..." - ) - block_list = root.xpath( - "//*[local-name()='SW.Blocks.FC' or local-name()='SW.Blocks.FB' or local-name()='SW.Blocks.GlobalDB' or local-name()='SW.Blocks.OB']" - ) + result = None # Inicializar resultado - block_type_found = None - the_block = None + # --- Detección del tipo de bloque/objeto principal --- + print("Paso 2: Detectando tipo de objeto principal...") - if block_list: - the_block = block_list[0] - block_tag_name = etree.QName(the_block.tag).localname - if block_tag_name == "SW.Blocks.FC": - block_type_found = "FC" - elif block_tag_name == "SW.Blocks.FB": - block_type_found = "FB" - elif block_tag_name == "SW.Blocks.GlobalDB": - block_type_found = "GlobalDB" - elif block_tag_name == "SW.Blocks.OB": - block_type_found = "OB" - print( - f"Paso 2: Bloque {block_tag_name} encontrado (ID={the_block.get('ID')})." + # Buscar UDT + udt_element = root.find(".//SW.Types.PlcStruct", namespaces=root.nsmap) + if udt_element is not None: + result = parse_udt(udt_element) + + # Buscar Tag Table si no es UDT + if result is None: + tag_table_element = root.find(".//SW.Tags.PlcTagTable", namespaces=root.nsmap) + if tag_table_element is not None: + result = parse_tag_table(tag_table_element) + + # Buscar bloque FC/FB/OB/GlobalDB si no es UDT ni Tag Table + if result is None: + print("Paso 2: No es UDT ni Tag Table. Buscando SW.Blocks.* ...") + # Usar local-name() para ignorar namespaces en esta búsqueda inicial + block_list = root.xpath( + "//*[local-name()='SW.Blocks.FC' or local-name()='SW.Blocks.FB' or local-name()='SW.Blocks.GlobalDB' or local-name()='SW.Blocks.OB']" ) - else: - print( - "Error Crítico: No se encontró el elemento raíz del bloque (, , o ) usando XPath." - ) - # ... (debug info) ... - return + # (Resto de la lógica de detección de bloques FC/FB/OB/DB como estaba antes...) + block_type_found = None + the_block = None - # --- Extraer atributos del bloque --- - print("Paso 3: Extrayendo atributos del bloque...") - # AttributeList no parece tener namespace en los ejemplos - attribute_list_node = the_block.xpath("./AttributeList") - block_name_val, block_number_val, block_lang_val = "Unknown", None, "Unknown" - if attribute_list_node: - attr_list = attribute_list_node[0] - # Name, Number, ProgrammingLanguage no parecen tener namespace - name_node = attr_list.xpath("./Name/text()") - block_name_val = name_node[0].strip() if name_node else block_name_val - num_node = attr_list.xpath("./Number/text()") - try: - block_number_val = int(num_node[0]) if num_node else None - except ValueError: - block_number_val = None - lang_node = attr_list.xpath("./ProgrammingLanguage/text()") - block_lang_val = ( - lang_node[0].strip() - if lang_node - else ("DB" if block_type_found == "GlobalDB" else "Unknown") - ) - print( - f"Paso 3: Atributos: Nombre='{block_name_val}', Número={block_number_val}, Lenguaje='{block_lang_val}'" - ) - else: - print( - f"Advertencia: No se encontró AttributeList para el bloque {block_type_found}." - ) - if block_type_found == "GlobalDB": - block_lang_val = "DB" - - # --- Extraer comentario del bloque --- - # ObjectList y MultilingualText no parecen tener namespace - block_comment_val = "" - comment_node_list = the_block.xpath( - "./ObjectList/MultilingualText[@CompositionName='Comment']" - ) - if comment_node_list: - block_comment_val = get_multilingual_text( - comment_node_list[0] - ) # Usa namespaces iface internamente - print(f"Paso 3b: Comentario bloque: '{block_comment_val[:50]}...'") - - # --- Crear diccionario resultado --- - result = { - "block_name": block_name_val, - "block_number": block_number_val, - "language": block_lang_val, - "block_type": block_type_found, - "block_comment": block_comment_val, - "interface": {}, - "networks": [], - } - - # --- Extraer interfaz --- - print("Paso 4: Extrayendo la interfaz del bloque...") - # Interface está dentro de AttributeList, no tiene namespace. Sections/Member sí usan iface. - interface_node_list = ( - attribute_list_node[0].xpath("./Interface") if attribute_list_node else [] - ) - - if interface_node_list: - interface_node = interface_node_list[0] - print("Paso 4: Nodo Interface encontrado.") - # Sections/Section/Member usan namespace iface - all_sections = interface_node.xpath(".//iface:Section", namespaces=ns) - if all_sections: - for section in all_sections: - section_name = section.get("Name") - if not section_name: - continue - members_in_section = section.xpath("./iface:Member", namespaces=ns) - if members_in_section: - result["interface"][section_name] = parse_interface_members( - members_in_section - ) + if block_list: + the_block = block_list[0] + block_tag_name = etree.QName(the_block.tag).localname + if block_tag_name == "SW.Blocks.FC": block_type_found = "FC" + elif block_tag_name == "SW.Blocks.FB": block_type_found = "FB" + elif block_tag_name == "SW.Blocks.GlobalDB": block_type_found = "GlobalDB" + elif block_tag_name == "SW.Blocks.OB": block_type_found = "OB" + print(f"Paso 2b: Bloque {block_tag_name} (Tipo: {block_type_found}) encontrado (ID={the_block.get('ID')}).") else: - print( - "Advertencia: Nodo Interface no contiene secciones ." - ) + print("Error Crítico: No se encontró el elemento raíz del bloque () ni UDT ni Tag Table.") + return False # Fallo si no se encuentra ningún objeto principal - if not result["interface"]: - print( - "Advertencia: Interface encontrada pero sin secciones procesables." - ) - else: # Manejo especial para DB si no hay - if block_type_found == "GlobalDB": - static_members = the_block.xpath( - ".//iface:Section[@Name='Static']/iface:Member", namespaces=ns - ) - if static_members: - print("Paso 4: Encontrada sección Static para GlobalDB.") - result["interface"]["Static"] = parse_interface_members( - static_members - ) + # --- Si es FC/FB/OB/DB, continuar con el parseo original --- + if the_block is not None: + print("Paso 3: Extrayendo atributos del bloque...") + # (Extracción de atributos Name, Number, Language como antes...) + attribute_list_node = the_block.xpath("./AttributeList") + block_name_val, block_number_val, block_lang_val = "Unknown", None, "Unknown" + if attribute_list_node: + attr_list = attribute_list_node[0] + name_node = attr_list.xpath("./Name/text()") + block_name_val = name_node[0].strip() if name_node else block_name_val + num_node = attr_list.xpath("./Number/text()") + try: block_number_val = int(num_node[0]) if num_node else None + except (ValueError, TypeError): block_number_val = None + lang_node = attr_list.xpath("./ProgrammingLanguage/text()") + block_lang_val = (lang_node[0].strip() if lang_node else ("DB" if block_type_found == "GlobalDB" else "Unknown")) + print(f"Paso 3: Atributos: Nombre='{block_name_val}', Número={block_number_val}, Lenguaje Bloque='{block_lang_val}'") else: - print("Advertencia: No se encontró sección 'Static' para GlobalDB.") - else: - print( - f"Advertencia: No se encontró para bloque {block_type_found}." - ) + print(f"Advertencia: No se encontró AttributeList para el bloque {block_type_found}.") + if block_type_found == "GlobalDB": block_lang_val = "DB" - if not result["interface"]: - print("Advertencia: No se pudo extraer información de la interfaz.") + # (Extracción de comentario como antes...) + block_comment_val = "" + comment_node_list = the_block.xpath("./ObjectList/MultilingualText[@CompositionName='Comment']") + if comment_node_list: block_comment_val = get_multilingual_text(comment_node_list[0]) + else: # Fallback + comment_attr_node = the_block.xpath("./AttributeList/Comment") # Buscar desde AttributeList + if comment_attr_node : block_comment_val = get_multilingual_text(comment_attr_node[0]) - # --- Procesar redes (CompileUnits) --- - print("Paso 5: Extrayendo y PROCESANDO lógica de redes (CompileUnits)...") - networks_processed_count = 0 - result["networks"] = [] - # ObjectList no parece tener namespace, SW.Blocks.CompileUnit tampoco - object_list_node = the_block.xpath("./ObjectList") + print(f"Paso 3b: Comentario bloque: '{block_comment_val[:50]}...'") - if object_list_node: - compile_units = object_list_node[0].xpath("./SW.Blocks.CompileUnit") - print( - f"Paso 5: Se encontraron {len(compile_units)} elementos SW.Blocks.CompileUnit." - ) + # Crear diccionario resultado + result = { + "block_name": block_name_val, + "block_number": block_number_val, + "language": block_lang_val, + "block_type": block_type_found, + "block_comment": block_comment_val, + "interface": {}, + "networks": [], # Inicializar networks aquí + } - for network_elem in compile_units: - networks_processed_count += 1 - network_id = network_elem.get("ID") - if not network_id: - continue + # (Extracción de interfaz como antes...) + print("Paso 4: Extrayendo la interfaz del bloque...") + interface_node_list = attribute_list_node[0].xpath("./Interface") if attribute_list_node else [] + if interface_node_list: + interface_node = interface_node_list[0] + all_sections = interface_node.xpath(".//iface:Section", namespaces=ns) + if all_sections: + processed_sections = set() + for section in all_sections: + section_name = section.get("Name") + if not section_name or section_name in processed_sections: continue + members_in_section = section.xpath("./iface:Member", namespaces=ns) + if members_in_section: + result["interface"][section_name] = parse_interface_members(members_in_section) + processed_sections.add(section_name) + else: print("Advertencia: Nodo Interface no contiene secciones .") + if not result["interface"]: print("Advertencia: Interface encontrada pero sin secciones procesables.") + elif block_type_found == "GlobalDB": + static_members = the_block.xpath(".//iface:Section[@Name='Static']/iface:Member", namespaces=ns) + if static_members: + print("Paso 4: Encontrada sección Static para GlobalDB (sin nodo Interface).") + result["interface"]["Static"] = parse_interface_members(static_members) + else: print("Advertencia: No se encontró sección 'Static' para GlobalDB.") + else: print(f"Advertencia: No se encontró para bloque {block_type_found}.") + if not result["interface"]: print("Advertencia: No se pudo extraer información de la interfaz.") - # Detectar lenguaje de la red (AttributeList/ProgrammingLanguage sin namespace) - attribute_list = network_elem.xpath("./AttributeList") - programming_language = "LAD" # Default - if attribute_list: - lang_node = attribute_list[0].xpath("./ProgrammingLanguage/text()") - if lang_node: - programming_language = lang_node[0].strip() + # (Procesamiento de redes como antes, SOLO si NO es GlobalDB) + if block_type_found != "GlobalDB": + print("Paso 5: Buscando y PROCESANDO redes (CompileUnits)...") + networks_processed_count = 0 + result["networks"] = [] + object_list_node = the_block.xpath("./ObjectList") + if object_list_node: + compile_units = object_list_node[0].xpath("./SW.Blocks.CompileUnit") + print(f"Paso 5: Se encontraron {len(compile_units)} elementos SW.Blocks.CompileUnit.") - print( - f" - Procesando Red ID={network_id}, Lenguaje={programming_language}" - ) + # Bucle de parseo de redes (igual que antes) + for network_elem in compile_units: + networks_processed_count += 1 + network_id = network_elem.get("ID") + if not network_id: continue + network_lang = "LAD" + net_attr_list = network_elem.xpath("./AttributeList") + if net_attr_list: + lang_node = net_attr_list[0].xpath("./ProgrammingLanguage/text()") + if lang_node: network_lang = lang_node[0].strip() + print(f" - Procesando Red ID={network_id}, Lenguaje Red={network_lang}") + parser_func = parser_map.get(network_lang.upper()) + parsed_network_data = None + if parser_func: + try: + parsed_network_data = parser_func(network_elem) + except Exception as e_parse: + print(f" ERROR durante el parseo de Red {network_id} ({network_lang}): {e_parse}") + traceback.print_exc() + parsed_network_data = {"id": network_id, "language": network_lang, "logic": [], "error": f"Parser failed: {e_parse}"} + else: + print(f" Advertencia: Lenguaje de red '{network_lang}' no soportado.") + parsed_network_data = {"id": network_id, "language": network_lang, "logic": [], "error": f"Unsupported language: {network_lang}"} - # Procesar según lenguaje - parsed_network_data = None - if programming_language in ["LAD", "FBD", "GRAPH"]: - # Llamar a parse_network (que ahora maneja errores de FlgNet) - parsed_network_data = parse_network(network_elem) - if parsed_network_data and not parsed_network_data.get("error"): - parsed_network_data["language"] = programming_language - elif parsed_network_data and parsed_network_data.get("error"): - print( - f" Error parseando Red {network_id}: {parsed_network_data['error']}" - ) - # Mantener la red con el error para x2/x3 - parsed_network_data["language"] = ( - programming_language # Asegurar lenguaje - ) - else: # parse_network devolvió None (error inesperado) - print( - f" Error fatal: parse_network devolvió None para Red {network_id}" - ) - parsed_network_data = { - "id": network_id, - "language": programming_language, - "logic": [], - "error": "parse_network failed", - } + if parsed_network_data: + title_element = network_elem.xpath(".//iface:MultilingualText[@CompositionName='Title']",namespaces=ns) + parsed_network_data["title"] = (get_multilingual_text(title_element[0]) if title_element else f"Network {network_id}") + comment_elem_net = network_elem.xpath("./ObjectList/MultilingualText[@CompositionName='Comment']", namespaces=ns) + if not comment_elem_net: comment_elem_net = network_elem.xpath(".//MultilingualText[@CompositionName='Comment']", namespaces=ns) # Fallback + parsed_network_data["comment"] = (get_multilingual_text(comment_elem_net[0]) if comment_elem_net else "") + result["networks"].append(parsed_network_data) - elif programming_language == "SCL": - network_source_node = network_elem.xpath( - ".//flg:NetworkSource", namespaces=ns - ) # NetworkSource sí usa flg - structured_text_node = ( - network_source_node[0].xpath( - "./st:StructuredText", namespaces=ns - ) - if network_source_node - else None - ) - reconstructed_scl = f"// SCL extraction failed: Node not found.\n" - if structured_text_node: - reconstructed_scl = reconstruct_scl_from_tokens( - structured_text_node[0] - ) - parsed_network_data = { - "id": network_id, - "language": "SCL", - "logic": [ - { - "instruction_uid": f"SCL_{network_id}", - "type": "RAW_SCL_CHUNK", - "scl": reconstructed_scl, - } - ], - } + if networks_processed_count == 0: print(f"Advertencia: ObjectList para {block_type_found} sin SW.Blocks.CompileUnit.") + else: print(f"Advertencia: No se encontró ObjectList para el bloque {block_type_found}.") + else: # Es GlobalDB + print("Paso 5: Saltando procesamiento de redes para GlobalDB.") - elif programming_language == "STL": - network_source_node = network_elem.xpath( - ".//flg:NetworkSource", namespaces=ns - ) - statement_list_node = ( - network_source_node[0].xpath( - "./stl:StatementList", namespaces=ns - ) - if network_source_node - else None - ) - reconstructed_stl = f"// STL extraction failed: Node not found.\n" - if statement_list_node: - reconstructed_stl = reconstruct_stl_from_statementlist( - statement_list_node[0] - ) - parsed_network_data = { - "id": network_id, - "language": "STL", - "logic": [ - { - "instruction_uid": f"STL_{network_id}", - "type": "RAW_STL_CHUNK", - "stl": reconstructed_stl, - } - ], - } - else: # Lenguaje no soportado - parsed_network_data = { - "id": network_id, - "language": programming_language, - "logic": [ - { - "instruction_uid": f"UNS_{network_id}", - "type": "UNSUPPORTED_LANG", - "info": f"Language {programming_language} not supported", - } - ], - "error": "Unsupported language", - } + # --- Escritura del JSON (si se encontró un objeto) --- + if result: + print("Paso 6: Escribiendo el resultado en el archivo JSON...") + # Validaciones finales + if result.get("block_type") not in ["PlcUDT", "PlcTagTable"] and not result["interface"]: + print("ADVERTENCIA FINAL: 'interface' está vacía en el JSON.") + if result.get("block_type") not in ["PlcUDT", "PlcTagTable", "GlobalDB"] and not result["networks"]: + print("ADVERTENCIA FINAL: 'networks' está vacía en el JSON.") - # Añadir título y comentario a la red parseada - if parsed_network_data: - title_element = network_elem.xpath( - ".//iface:MultilingualText[@CompositionName='Title']", - namespaces=ns, - ) - parsed_network_data["title"] = ( - get_multilingual_text(title_element[0]) - if title_element - else f"Network {network_id}" - ) - comment_element = network_elem.xpath( - "./ObjectList/MultilingualText[@CompositionName='Comment']", - namespaces=ns, - ) # Path relativo a CompileUnit - parsed_network_data["comment"] = ( - get_multilingual_text(comment_element[0]) - if comment_element - else "" - ) - result["networks"].append(parsed_network_data) + try: + with open(json_filepath, "w", encoding="utf-8") as f: + json.dump(result, f, indent=4, ensure_ascii=False) + print("Paso 6: Escritura JSON completada.") + print(f"Conversión finalizada. JSON guardado en: '{os.path.relpath(json_filepath)}'") + return True # Indicar éxito - if networks_processed_count == 0 and block_type_found != "GlobalDB": - print( - f"Advertencia: ObjectList para {block_type_found} sin SW.Blocks.CompileUnit." - ) - elif block_type_found == "GlobalDB": - print("Paso 5: Saltando búsqueda de CompileUnits para GlobalDB.") + except IOError as e: print(f"Error Crítico: No se pudo escribir JSON en '{json_filepath}'. Error: {e}"); return False + except TypeError as e: print(f"Error Crítico: Problema al serializar a JSON. Error: {e}"); return False else: - print( - f"Advertencia: No se encontró ObjectList para el bloque {block_type_found}." - ) + print("Error Crítico: No se pudo determinar el tipo de objeto principal en el XML.") + return False - # --- Escribir JSON --- - print("Paso 6: Escribiendo el resultado en el archivo JSON...") - # ... (resto del código de escritura y manejo de errores igual) ... - if not result["interface"]: - print("ADVERTENCIA FINAL: 'interface' está vacía.") - if not result["networks"] and block_type_found != "GlobalDB": - print("ADVERTENCIA FINAL: 'networks' está vacía.") - try: - with open(json_filepath, "w", encoding="utf-8") as f: - json.dump(result, f, indent=4, ensure_ascii=False) - print("Paso 6: Escritura completada.") - print(f"Conversión finalizada. JSON guardado en: '{json_filepath}'") - except IOError as e: - print( - f"Error Crítico: No se pudo escribir JSON en '{json_filepath}'. Error: {e}" - ) - except TypeError as e: - print(f"Error Crítico: Problema al serializar a JSON. Error: {e}") - # ... (debug de serialización) ... except etree.XMLSyntaxError as e: - print( - f"Error Crítico: Sintaxis XML inválida en '{xml_filepath}'. Detalles: {e}" - ) + print(f"Error Crítico: Sintaxis XML inválida en '{xml_filepath}'. Detalles: {e}") + return False # Indicar fallo except Exception as e: print(f"Error Crítico: Error inesperado durante la conversión: {e}") traceback.print_exc() + return False # Indicar fallo - +# --- Punto de Entrada Principal (__main__) --- if __name__ == "__main__": - # --- (La sección __main__ permanece igual que en la respuesta anterior) --- - import argparse - import os - import sys - import traceback - parser = argparse.ArgumentParser( - description="Convert Simatic XML (LAD/FBD/SCL/STL/OB/DB) to simplified JSON. Expects XML filepath as argument." + description="Convert Simatic XML (FC/FB/OB/DB/UDT/TagTable) to simplified JSON using dynamic parsers." # Actualizado ) parser.add_argument( "xml_filepath", @@ -1625,28 +426,30 @@ if __name__ == "__main__": xml_input_file = args.xml_filepath if not os.path.exists(xml_input_file): - print( - f"Error Crítico (x1): Archivo XML no encontrado: '{xml_input_file}'", - file=sys.stderr, - ) + print(f"Error Crítico (x1): Archivo XML no encontrado: '{xml_input_file}'", file=sys.stderr) sys.exit(1) + # --- Cargar Parsers Dinámicamente --- + loaded_parsers = load_parsers() # Carga parsers LAD/FBD/STL/SCL + if not loaded_parsers: + # Continuar incluso sin parsers de red, ya que podríamos estar parseando UDT/TagTable + print("Advertencia (x1): No se cargaron parsers de red. Se continuará para UDT/TagTable/DB.") + #sys.exit(1) # Ya no salimos si no hay parsers de red + + # Derivar nombre de salida JSON xml_filename_base = os.path.splitext(os.path.basename(xml_input_file))[0] output_dir = os.path.dirname(xml_input_file) os.makedirs(output_dir, exist_ok=True) json_output_file = os.path.join(output_dir, f"{xml_filename_base}_simplified.json") - print( - f"(x1) Convirtiendo: '{os.path.relpath(xml_input_file)}' -> '{os.path.relpath(json_output_file)}'" - ) + print(f"(x1) Convirtiendo: '{os.path.relpath(xml_input_file)}' -> '{os.path.relpath(json_output_file)}'") - try: - convert_xml_to_json(xml_input_file, json_output_file) - sys.exit(0) # Éxito - except Exception as e: - print( - f"Error Crítico (x1) durante la conversión de '{xml_input_file}': {e}", - file=sys.stderr, - ) - traceback.print_exc(file=sys.stderr) - sys.exit(1) # Fallo + # Llamar a la función de conversión principal + success = convert_xml_to_json(xml_input_file, json_output_file, loaded_parsers) + + # Salir con código de error apropiado + if success: + sys.exit(0) # Éxito + else: + print(f"\nError durante la conversión de '{os.path.relpath(xml_input_file)}'.", file=sys.stderr) + sys.exit(1) # Fallo \ No newline at end of file diff --git a/x2_process.py b/x2_process.py index 748b417..19d5202 100644 --- a/x2_process.py +++ b/x2_process.py @@ -7,25 +7,25 @@ import traceback import re import importlib import sys -import sympy # Import sympy +import sympy # Import sympy # Import necessary components from processors directory from processors.processor_utils import ( - format_variable_name, # Keep if used outside processors - sympy_expr_to_scl, # Needed for IF grouping and maybe others + format_variable_name, # Keep if used outside processors + sympy_expr_to_scl, # Needed for IF grouping and maybe others # get_target_scl_name might be used here? Unlikely. ) -from processors.symbol_manager import SymbolManager # Import the manager +from processors.symbol_manager import SymbolManager # Import the manager # --- Constantes y Configuración --- -SCL_SUFFIX = "_sympy_processed" # New suffix to indicate processing method +SCL_SUFFIX = "_sympy_processed" GROUPED_COMMENT = "// Logic included in grouped IF" -SIMPLIFIED_IF_COMMENT = "// Simplified IF condition by script" # May still be useful +SIMPLIFIED_IF_COMMENT = "// Simplified IF condition by script" # Global data dictionary data = {} -# --- (Incluye aquí las funciones process_group_ifs y load_processors SIN CAMBIOS) --- +# --- (process_group_ifs y load_processors SIN CAMBIOS) --- def process_group_ifs(instruction, network_id, sympy_map, symbol_manager, data): """ Busca condiciones (ya procesadas -> tienen expr SymPy en sympy_map) @@ -203,19 +203,18 @@ def process_group_ifs(instruction, network_id, sympy_map, symbol_manager, data): return made_change - def load_processors(processors_dir="processors"): """ Escanea el directorio, importa módulos, construye el mapa y una lista ordenada por prioridad. """ processor_map = {} - processor_list_unsorted = [] # Lista para guardar (priority, type_name, func) - default_priority = 10 # Prioridad si no se define en get_processor_info + processor_list_unsorted = [] # Lista para guardar (priority, type_name, func) + default_priority = 10 # Prioridad si no se define en get_processor_info if not os.path.isdir(processors_dir): print(f"Error: Directorio de procesadores no encontrado: '{processors_dir}'") - return processor_map, [] # Devuelve mapa vacío y lista vacía + return processor_map, [] # Devuelve mapa vacío y lista vacía print(f"Cargando procesadores desde: '{processors_dir}'") processors_package = os.path.basename(processors_dir) @@ -300,10 +299,11 @@ def load_processors(processors_dir="processors"): # Devolver el mapa (para lookup rápido si es necesario) y la lista ordenada return processor_map, processor_list_sorted -# --- Bucle Principal de Procesamiento (Modificado para STL y tipo de bloque) --- + +# --- Bucle Principal de Procesamiento (MODIFICADO) --- def process_json_to_scl(json_filepath): """ - Lee JSON simplificado, aplica procesadores dinámicos (ignorando redes STL y bloques DB), + Lee JSON simplificado, aplica procesadores dinámicos (ignorando STL, UDT, TagTable, DB), y guarda JSON procesado. """ global data @@ -320,32 +320,30 @@ def process_json_to_scl(json_filepath): traceback.print_exc() return - # --- MODIFICADO: Obtener tipo de bloque (FC, FB, GlobalDB, OB) --- - block_type = data.get("block_type", "Unknown") # FC, FB, GlobalDB, OB - print(f"Procesando bloque tipo: {block_type}, Lenguaje principal: {data.get('language', 'Unknown')}") + # --- MODIFICADO: Obtener tipo de bloque (FC, FB, GlobalDB, OB, PlcUDT, PlcTagTable) --- + block_type = data.get("block_type", "Unknown") + print(f"Procesando bloque tipo: {block_type}") - # --- MODIFICADO: SI ES UN GlobalDB, SALTAR EL PROCESAMIENTO LÓGICO --- - if block_type == "GlobalDB": # <-- Comprobar tipo de bloque - print( - "INFO: El bloque es un Data Block (GlobalDB). Saltando procesamiento lógico de x2." - ) - # Simplemente guardamos una copia (o el mismo archivo si no se requiere sufijo) + # --- MODIFICADO: SALTAR PROCESAMIENTO PARA DB, UDT, TAG TABLE --- + if block_type in ["GlobalDB", "PlcUDT", "PlcTagTable"]: # <-- Comprobar tipos a saltar + print(f"INFO: El bloque es {block_type}. Saltando procesamiento lógico de x2.") output_filename = json_filepath.replace( "_simplified.json", "_simplified_processed.json" ) - print(f"Guardando JSON de DB (sin cambios lógicos) en: {output_filename}") + print(f"Guardando JSON de {block_type} (sin cambios lógicos) en: {output_filename}") try: with open(output_filename, "w", encoding="utf-8") as f: json.dump(data, f, indent=4, ensure_ascii=False) - print("Guardado de DB completado.") + print(f"Guardado de {block_type} completado.") except Exception as e: - print(f"Error Crítico al guardar JSON del DB: {e}") + print(f"Error Crítico al guardar JSON de {block_type}: {e}") traceback.print_exc() - return # <<< SALIR TEMPRANO PARA DBs + return # <<< SALIR TEMPRANO PARA DB/UDT/TAG TABLE - # --- SI NO ES DB (FC, FB, OB), CONTINUAR CON EL PROCESAMIENTO LÓGICO --- - print(f"INFO: El bloque es {block_type}. Iniciando procesamiento lógico...") # <-- Mensaje actualizado + # --- SI NO ES DB/UDT/TAG TABLE (FC, FB, OB), CONTINUAR CON EL PROCESAMIENTO LÓGICO --- + print(f"INFO: El bloque es {block_type}. Iniciando procesamiento lógico...") + # (Carga de procesadores y mapas de acceso SIN CAMBIOS) script_dir = os.path.dirname(__file__) processors_dir_path = os.path.join(script_dir, "processors") processor_map, sorted_processors = load_processors(processors_dir_path) @@ -354,42 +352,30 @@ def process_json_to_scl(json_filepath): return network_access_maps = {} - # Crear mapas de acceso por red (copiado/adaptado de versión anterior) for network in data.get("networks", []): net_id = network["id"] current_access_map = {} for instr in network.get("logic", []): for _, source in instr.get("inputs", {}).items(): - sources_to_check = ( - source - if isinstance(source, list) - else ([source] if isinstance(source, dict) else []) - ) + sources_to_check = (source if isinstance(source, list) else ([source] if isinstance(source, dict) else [])) for src in sources_to_check: - if ( - isinstance(src, dict) - and src.get("uid") - and src.get("type") in ["variable", "constant"] - ): + if (isinstance(src, dict) and src.get("uid") and src.get("type") in ["variable", "constant"]): current_access_map[src["uid"]] = src for _, dest_list in instr.get("outputs", {}).items(): if isinstance(dest_list, list): for dest in dest_list: - if ( - isinstance(dest, dict) - and dest.get("uid") - and dest.get("type") in ["variable", "constant"] - ): + if (isinstance(dest, dict) and dest.get("uid") and dest.get("type") in ["variable", "constant"]): current_access_map[dest["uid"]] = dest network_access_maps[net_id] = current_access_map + # (Inicialización de SymbolManager y bucle iterativo SIN CAMBIOS) symbol_manager = SymbolManager() sympy_map = {} max_passes = 30 passes = 0 processing_complete = False - print(f"\n--- Iniciando Bucle de Procesamiento Iterativo ({block_type}) ---") # <-- Mensaje actualizado + print(f"\n--- Iniciando Bucle de Procesamiento Iterativo ({block_type}) ---") while passes < max_passes and not processing_complete: passes += 1 made_change_in_base_pass = False @@ -398,246 +384,140 @@ def process_json_to_scl(json_filepath): num_sympy_processed_this_pass = 0 num_grouped_this_pass = 0 - # --- FASE 1: Procesadores Base (Ignorando STL) --- + # FASE 1: Procesadores Base (Ignorando STL) print(f" Fase 1 (SymPy Base - Orden por Prioridad):") - num_sympy_processed_this_pass = 0 # Resetear contador para el pase + num_sympy_processed_this_pass = 0 for processor_info in sorted_processors: current_type_name = processor_info["type_name"] func_to_call = processor_info["func"] for network in data.get("networks", []): network_id = network["id"] - network_lang = network.get("language", "LAD") # Lenguaje de la red - if network_lang == "STL": # Saltar redes STL - continue + network_lang = network.get("language", "LAD") + if network_lang == "STL": continue access_map = network_access_maps.get(network_id, {}) network_logic = network.get("logic", []) for instruction in network_logic: instr_uid = instruction.get("instruction_uid") - # Usar el tipo *actual* de la instrucción para el lookup instr_type_current = instruction.get("type", "Unknown") - # Saltar si ya está procesado, es error, agrupado, o tipo crudo - if ( - instr_type_current.endswith(SCL_SUFFIX) - or "_error" in instr_type_current - or instruction.get("grouped", False) - or instr_type_current - in ["RAW_STL_CHUNK", "RAW_SCL_CHUNK", "UNSUPPORTED_LANG", "UNSUPPORTED_CONTENT", "PARSING_ERROR"] - ): + if (instr_type_current.endswith(SCL_SUFFIX) or "_error" in instr_type_current or instruction.get("grouped", False) or + instr_type_current in ["RAW_STL_CHUNK", "RAW_SCL_CHUNK", "UNSUPPORTED_LANG", "UNSUPPORTED_CONTENT", "PARSING_ERROR"]): continue - # El lookup usa el tipo actual (que aún no tiene el sufijo) lookup_key = instr_type_current.lower() effective_type_name = lookup_key - - # Mapeo especial para llamadas FC/FB if instr_type_current == "Call": call_block_type = instruction.get("block_type", "").upper() - if call_block_type == "FC": - effective_type_name = "call_fc" - elif call_block_type == "FB": - effective_type_name = "call_fb" - # Añadir otros tipos de llamada si es necesario + if call_block_type == "FC": effective_type_name = "call_fc" + elif call_block_type == "FB": effective_type_name = "call_fb" - # Si el tipo efectivo coincide con el procesador actual if effective_type_name == current_type_name: try: - # Pasar 'data' a la función del procesador - changed = func_to_call( - instruction, network_id, sympy_map, symbol_manager, data - ) + changed = func_to_call(instruction, network_id, sympy_map, symbol_manager, data) if changed: made_change_in_base_pass = True num_sympy_processed_this_pass += 1 except Exception as e: - print( - f"ERROR(SymPy Base) al procesar {instr_type_current} UID {instr_uid}: {e}" - ) + print(f"ERROR(SymPy Base) al procesar {instr_type_current} UID {instr_uid}: {e}") traceback.print_exc() - instruction["scl"] = ( - f"// ERROR en SymPy procesador base: {e}" - ) - # Añadir sufijo de error al tipo actual + instruction["scl"] = f"// ERROR en SymPy procesador base: {e}" instruction["type"] = instr_type_current + "_error" - made_change_in_base_pass = True # Se hizo un cambio (marcar como error) - print( - f" -> {num_sympy_processed_this_pass} instrucciones (no STL) procesadas con SymPy." - ) + made_change_in_base_pass = True + print(f" -> {num_sympy_processed_this_pass} instrucciones (no STL) procesadas con SymPy.") - - # --- FASE 2: Agrupación IF (Ignorando STL) --- - if ( - made_change_in_base_pass or passes == 1 - ): # Ejecutar siempre en el primer pase o si hubo cambios + # FASE 2: Agrupación IF (Ignorando STL) + if made_change_in_base_pass or passes == 1: print(f" Fase 2 (Agrupación IF con Simplificación):") - num_grouped_this_pass = 0 # Resetear contador para el pase + num_grouped_this_pass = 0 for network in data.get("networks", []): network_id = network["id"] network_lang = network.get("language", "LAD") - if network_lang == "STL": - continue # Saltar STL + if network_lang == "STL": continue network_logic = network.get("logic", []) - # Iterar en orden por UID puede ser más estable para agrupación uids_in_network = sorted([instr.get("instruction_uid", "Z") for instr in network_logic if instr.get("instruction_uid")]) for uid_to_process in uids_in_network: instruction = next((instr for instr in network_logic if instr.get("instruction_uid") == uid_to_process), None) if not instruction: continue - - # Saltar si ya está agrupada, es error, etc. - if instruction.get("grouped") or "_error" in instruction.get("type", ""): - continue - # La agrupación sólo aplica a instrucciones que generan condiciones booleanas - # y que ya fueron procesadas (tienen el sufijo) + if instruction.get("grouped") or "_error" in instruction.get("type", ""): continue if instruction.get("type", "").endswith(SCL_SUFFIX): try: - group_changed = process_group_ifs( - instruction, network_id, sympy_map, symbol_manager, data - ) + group_changed = process_group_ifs(instruction, network_id, sympy_map, symbol_manager, data) if group_changed: made_change_in_group_pass = True num_grouped_this_pass += 1 except Exception as e: - print( - f"ERROR(GroupLoop) al intentar agrupar desde UID {instruction.get('instruction_uid')}: {e}" - ) + print(f"ERROR(GroupLoop) al intentar agrupar desde UID {instruction.get('instruction_uid')}: {e}") traceback.print_exc() - print( - f" -> {num_grouped_this_pass} agrupaciones realizadas (en redes no STL)." - ) + print(f" -> {num_grouped_this_pass} agrupaciones realizadas (en redes no STL).") - # --- Comprobar si se completó el procesamiento --- + # Comprobar si se completó if not made_change_in_base_pass and not made_change_in_group_pass: - print( - f"\n--- No se hicieron más cambios en el pase {passes}. Proceso iterativo completado. ---" - ) + print(f"\n--- No se hicieron más cambios en el pase {passes}. Proceso iterativo completado. ---") processing_complete = True else: - print( - f"--- Fin Pase {passes}: {num_sympy_processed_this_pass} proc SymPy, {num_grouped_this_pass} agrup. Continuando..." - ) - - # --- Comprobar límite de pases --- + print(f"--- Fin Pase {passes}: {num_sympy_processed_this_pass} proc SymPy, {num_grouped_this_pass} agrup. Continuando...") if passes == max_passes and not processing_complete: print(f"\n--- ADVERTENCIA: Límite de {max_passes} pases alcanzado...") # --- FIN BUCLE ITERATIVO --- - # --- Verificación Final (Ajustada para RAW_STL_CHUNK) --- - print(f"\n--- Verificación Final de Instrucciones No Procesadas ({block_type}) ---") # <-- Mensaje actualizado + # (Verificación Final y Guardado JSON SIN CAMBIOS) + print(f"\n--- Verificación Final de Instrucciones No Procesadas ({block_type}) ---") unprocessed_count = 0 unprocessed_details = [] - ignored_types = [ - "raw_scl_chunk", - "unsupported_lang", - "raw_stl_chunk", - "unsupported_content", # Añadido de x1 - "parsing_error", # Añadido de x1 - ] + ignored_types = ["raw_scl_chunk", "unsupported_lang", "raw_stl_chunk", "unsupported_content", "parsing_error"] for network in data.get("networks", []): network_id = network.get("id", "Unknown ID") network_title = network.get("title", f"Network {network_id}") network_lang = network.get("language", "LAD") - if network_lang == "STL": - continue # No verificar redes STL + if network_lang == "STL": continue for instruction in network.get("logic", []): instr_uid = instruction.get("instruction_uid", "Unknown UID") instr_type = instruction.get("type", "Unknown Type") is_grouped = instruction.get("grouped", False) - if ( - not instr_type.endswith(SCL_SUFFIX) - and "_error" not in instr_type - and not is_grouped - and instr_type.lower() not in ignored_types - ): + if (not instr_type.endswith(SCL_SUFFIX) and "_error" not in instr_type and not is_grouped and instr_type.lower() not in ignored_types): unprocessed_count += 1 - unprocessed_details.append( - f" - Red '{network_title}' (ID: {network_id}, Lang: {network_lang}), " - f"Instrucción UID: {instr_uid}, Tipo: '{instr_type}'" - ) + unprocessed_details.append(f" - Red '{network_title}' (ID: {network_id}, Lang: {network_lang}), Instrucción UID: {instr_uid}, Tipo: '{instr_type}'") if unprocessed_count > 0: - print( - f"ADVERTENCIA: Se encontraron {unprocessed_count} instrucciones (no STL) que parecen no haber sido procesadas:" - ) - for detail in unprocessed_details: - print(detail) - else: - print( - "INFO: Todas las instrucciones relevantes (no STL) parecen haber sido procesadas o agrupadas." - ) + print(f"ADVERTENCIA: Se encontraron {unprocessed_count} instrucciones (no STL) que parecen no haber sido procesadas:") + for detail in unprocessed_details: print(detail) + else: print("INFO: Todas las instrucciones relevantes (no STL) parecen haber sido procesadas o agrupadas.") - # --- Guardar JSON Final --- - output_filename = json_filepath.replace( - "_simplified.json", "_simplified_processed.json" - ) - print(f"\nGuardando JSON procesado ({block_type}) en: {output_filename}") # <-- Mensaje actualizado + output_filename = json_filepath.replace("_simplified.json", "_simplified_processed.json") + print(f"\nGuardando JSON procesado ({block_type}) en: {output_filename}") try: - with open(output_filename, "w", encoding="utf-8") as f: - json.dump(data, f, indent=4, ensure_ascii=False) + with open(output_filename, "w", encoding="utf-8") as f: json.dump(data, f, indent=4, ensure_ascii=False) print("Guardado completado.") - except Exception as e: - print(f"Error Crítico al guardar JSON procesado: {e}") - traceback.print_exc() + except Exception as e: print(f"Error Crítico al guardar JSON procesado: {e}"); traceback.print_exc() -# --- Ejecución (sin cambios en esta parte) --- +# --- Ejecución (SIN CAMBIOS) --- if __name__ == "__main__": - # Imports necesarios solo para la ejecución como script principal - import argparse - import os - import sys + parser = argparse.ArgumentParser(description="Process simplified JSON (_simplified.json) to embed SCL logic (SymPy version). Expects original XML filepath as argument.") + parser.add_argument("source_xml_filepath", help="Path to the original source XML file (passed from x0_main.py, used to derive JSON input name).") + args = parser.parse_args() + source_xml_file = args.source_xml_filepath - # Configurar ArgumentParser para recibir la ruta del XML original obligatoria - parser = argparse.ArgumentParser( - description="Process simplified JSON (_simplified.json) to embed SCL logic (SymPy version). Expects original XML filepath as argument." - ) - parser.add_argument( - "source_xml_filepath", # Argumento posicional obligatorio - help="Path to the original source XML file (passed from x0_main.py, used to derive JSON input name).", - ) - args = parser.parse_args() # Parsea los argumentos de sys.argv - - source_xml_file = args.source_xml_filepath # Obtiene la ruta del XML original - - # Verificar si el archivo XML original existe (como referencia, útil para depuración) if not os.path.exists(source_xml_file): - print( - f"Advertencia (x2): Archivo XML original no encontrado: '{source_xml_file}', pero se intentará encontrar el JSON correspondiente." - ) + print(f"Advertencia (x2): Archivo XML original no encontrado: '{source_xml_file}', pero se intentará encontrar el JSON correspondiente.") - # Derivar nombre del archivo JSON de entrada (_simplified.json) xml_filename_base = os.path.splitext(os.path.basename(source_xml_file))[0] - # Asumir que el JSON simplificado está en el mismo directorio que el XML original - input_dir = os.path.dirname(source_xml_file) # Directorio del XML original + input_dir = os.path.dirname(source_xml_file) input_json_file = os.path.join(input_dir, f"{xml_filename_base}_simplified.json") + output_json_file = os.path.join(input_dir, f"{xml_filename_base}_simplified_processed.json") - # Determinar el nombre esperado del archivo JSON procesado de salida - output_json_file = os.path.join( - input_dir, f"{xml_filename_base}_simplified_processed.json" - ) + print(f"(x2) Procesando: '{os.path.relpath(input_json_file)}' -> '{os.path.relpath(output_json_file)}'") - print( - f"(x2) Procesando: '{os.path.relpath(input_json_file)}' -> '{os.path.relpath(output_json_file)}'" - ) - - # Verificar si el archivo JSON de entrada (_simplified.json) EXISTE antes de procesar if not os.path.exists(input_json_file): - print( - f"Error Fatal (x2): El archivo de entrada JSON simplificado no existe: '{input_json_file}'" - ) - print( - f"Asegúrate de que 'x1_to_json.py' se ejecutó correctamente para '{os.path.relpath(source_xml_file)}'." - ) - sys.exit(1) # Salir si el archivo necesario no está + print(f"Error Fatal (x2): El archivo de entrada JSON simplificado no existe: '{input_json_file}'") + print(f"Asegúrate de que 'x1_to_json.py' se ejecutó correctamente para '{os.path.relpath(source_xml_file)}'.") + sys.exit(1) else: - # Llamar a la función principal de procesamiento del script try: process_json_to_scl(input_json_file) except Exception as e: - print( - f"Error Crítico (x2) durante el procesamiento de '{input_json_file}': {e}" - ) - import traceback # Asegurar que traceback está importado - + print(f"Error Crítico (x2) durante el procesamiento de '{input_json_file}': {e}") + import traceback traceback.print_exc() - sys.exit(1) # Salir con error si la función principal falla \ No newline at end of file + sys.exit(1) \ No newline at end of file diff --git a/x3_generate_scl.py b/x3_generate_scl.py index e20b9d2..5c3290a 100644 --- a/x3_generate_scl.py +++ b/x3_generate_scl.py @@ -381,11 +381,103 @@ def generate_scl_declarations(variables, indent_level=1): return scl_lines +# --- NUEVAS FUNCIONES para generar Markdown --- +def generate_udt_markdown(data): + """Genera contenido Markdown para un UDT.""" + md_lines = [] + udt_name = data.get("block_name", "UnknownUDT") + udt_comment = data.get("block_comment", "") + md_lines.append(f"# UDT: {udt_name}") + md_lines.append("") + if udt_comment: + md_lines.append(f"**Comment:**") + for line in udt_comment.splitlines(): + md_lines.append(f"> {line}") + md_lines.append("") -# --- Función Principal de Generación SCL --- -def generate_scl(processed_json_filepath, output_scl_filepath): - """Genera un archivo SCL a partir del JSON procesado (FC/FB/OB o DB).""" # Actualizado + # Extraer miembros (asumiendo que están en interface['None']) + members = data.get("interface", {}).get("None", []) + if members: + md_lines.append("## Members") + md_lines.append("") + md_lines.append("| Name | Datatype | Start Value | Comment |") + md_lines.append("|---|---|---|---|") + # Usar una función auxiliar recursiva para manejar structs anidados + md_lines.extend(generate_markdown_member_rows(members)) + md_lines.append("") + else: + md_lines.append("No members found in the UDT interface.") + md_lines.append("") + return md_lines + +# --- generate_markdown_member_rows (MODIFICADA) --- +def generate_markdown_member_rows(members, level=0): + """Función auxiliar recursiva para generar filas Markdown para miembros de UDT.""" + md_rows = []; prefix = "    " * level + for member in members: + name = member.get("name", "N/A"); datatype = member.get("datatype", "N/A") + start_value_raw = member.get("start_value") + start_value_fmt = format_scl_start_value(start_value_raw, datatype) if start_value_raw is not None else "" + # CORRECCIÓN: Manejar el caso en que comment sea None + comment_raw = member.get("comment") + comment = comment_raw.replace('|', '\|').replace('\n', ' ') if comment_raw else "" # Usar "" si es None + + md_rows.append(f"| {prefix}`{name}` | `{datatype}` | `{start_value_fmt}` | {comment} |") + children = member.get("children") + if children: md_rows.extend(generate_markdown_member_rows(children, level + 1)) + array_elements = member.get("array_elements") + if array_elements: + base_type_for_init = datatype + if isinstance(datatype, str) and datatype.lower().startswith("array["): + match = re.match(r"(Array\[.*\]\s+of\s+)(.*)", datatype, re.IGNORECASE) + if match: base_type_for_init = match.group(2).strip() + md_rows.append(f"| {prefix}  *(Initial Values)* | | | |") + try: + indices_numeric = {int(k): v for k, v in array_elements.items()} + sorted_indices_str = [str(k) for k in sorted(indices_numeric.keys())] + except ValueError: sorted_indices_str = sorted(array_elements.keys()) + for idx_str in sorted_indices_str: + val_raw = array_elements[idx_str] + val_fmt = format_scl_start_value(val_raw, base_type_for_init) if val_raw is not None else "" + md_rows.append(f"| {prefix}  `[{idx_str}]` | | `{val_fmt}` | |") + return md_rows + +def generate_tag_table_markdown(data): + """Genera contenido Markdown para una tabla de tags.""" + md_lines = [] + table_name = data.get("block_name", "UnknownTagTable") + tags = data.get("tags", []) + + md_lines.append(f"# Tag Table: {table_name}") + md_lines.append("") + + if tags: + md_lines.append("| Name | Datatype | Address | Comment |") + md_lines.append("|---|---|---|---|") + for tag in tags: + name = tag.get("name", "N/A") + datatype = tag.get("datatype", "N/A") + address = tag.get("address", "N/A") or " " # Evitar None en la tabla + comment = ( + tag.get("comment", "").replace("|", "\|").replace("\n", " ") + ) # Escapar pipes + + md_lines.append(f"| `{name}` | `{datatype}` | `{address}` | {comment} |") + md_lines.append("") + else: + md_lines.append("No tags found in this table.") + md_lines.append("") + + return md_lines + + +# --- Función Principal de Generación (MODIFICADA) --- +def generate_scl_or_markdown(processed_json_filepath, output_directory): + """ + Genera un archivo SCL o Markdown a partir del JSON procesado, + eligiendo el formato y la extensión según el tipo de bloque. + """ if not os.path.exists(processed_json_filepath): print( f"Error: Archivo JSON procesado no encontrado en '{processed_json_filepath}'" @@ -401,106 +493,98 @@ def generate_scl(processed_json_filepath, output_scl_filepath): traceback.print_exc() return - # --- Extracción de Información del Bloque (Común) --- + # --- Extracción de Información y Determinación de Tipo --- block_name = data.get("block_name", "UnknownBlock") block_number = data.get("block_number") - # block_lang_original = data.get("language", "Unknown") # Lenguaje original (SCL, LAD, DB...) block_type = data.get( "block_type", "Unknown" - ) # Tipo de bloque (FC, FB, GlobalDB, OB) <-- Usar este + ) # FC, FB, OB, GlobalDB, PlcUDT, PlcTagTable block_comment = data.get("block_comment", "") - scl_block_name = format_variable_name(block_name) # Nombre SCL seguro - print( - f"Generando SCL para: {block_type} '{scl_block_name}' (Original: {block_name})" # Quitado lenguaje original del log - ) - scl_output = [] + scl_block_name = format_variable_name(block_name) + output_content = [] + output_extension = ".scl" # Default - # --- MODIFICADO: GENERACIÓN PARA DATA BLOCK (GlobalDB) --- - if block_type == "GlobalDB": # <-- Comprobar tipo de bloque - print("Modo de generación: DATA_BLOCK") - scl_output.append(f"// Block Type: {block_type}") - scl_output.append(f"// Block Name (Original): {block_name}") + print( + f"Generando salida para: {block_type} '{scl_block_name}' (Original: {block_name})" + ) + + # --- Selección del Generador y Extensión --- + if block_type == "PlcUDT": + print(" -> Modo de generación: UDT Markdown") + output_content = generate_udt_markdown(data) + output_extension = ".md" + elif block_type == "PlcTagTable": + print(" -> Modo de generación: Tag Table Markdown") + output_content = generate_tag_table_markdown(data) + output_extension = ".md" + elif block_type == "GlobalDB": + print(" -> Modo de generación: DATA_BLOCK SCL") + output_extension = ".scl" + # (Lógica de generación SCL para DB como estaba antes) + output_content.append(f"// Block Type: {block_type}") + if block_name != scl_block_name: + output_content.append(f"// Block Name (Original): {block_name}") if block_number: - scl_output.append(f"// Block Number: {block_number}") + output_content.append(f"// Block Number: {block_number}") if block_comment: - # Dividir comentarios largos en múltiples líneas - comment_lines = block_comment.splitlines() - scl_output.append(f"// Block Comment:") - for line in comment_lines: - scl_output.append(f"// {line}") - scl_output.append("") - scl_output.append(f'DATA_BLOCK "{scl_block_name}"') - scl_output.append("{ S7_Optimized_Access := 'TRUE' }") # Asumir optimizado - scl_output.append("VERSION : 0.1") - scl_output.append("") + output_content.append(f"// Block Comment:") + for line in block_comment.splitlines(): + output_content.append(f"// {line}") + output_content.append("") + output_content.append(f'DATA_BLOCK "{scl_block_name}"') + output_content.append("{ S7_Optimized_Access := 'TRUE' }") + output_content.append("VERSION : 0.1") + output_content.append("") interface_data = data.get("interface", {}) - # En DBs, la sección relevante suele ser 'Static' static_vars = interface_data.get("Static", []) if static_vars: - scl_output.append("VAR") - # Usar la función recursiva para generar declaraciones - scl_output.extend(generate_scl_declarations(static_vars, indent_level=1)) - scl_output.append("END_VAR") - scl_output.append("") + output_content.append("VAR") + output_content.extend( + generate_scl_declarations(static_vars, indent_level=1) + ) + output_content.append("END_VAR") else: print( "Advertencia: No se encontró sección 'Static' o está vacía en la interfaz del DB." ) - # Añadir bloque VAR vacío si no hay variables - scl_output.append("VAR") - scl_output.append("END_VAR") - scl_output.append("") - scl_output.append("BEGIN") - scl_output.append( - " // Los Data Blocks no tienen código ejecutable en BEGIN/END" - ) - scl_output.append("END_DATA_BLOCK") + output_content.append("VAR\nEND_VAR") # Añadir vacío + output_content.append("") + output_content.append("BEGIN") + output_content.append(" // Data Blocks have no executable code") + output_content.append("END_DATA_BLOCK") - # --- MODIFICADO: GENERACIÓN PARA FC/FB/OB --- - else: - # Determinar palabra clave SCL - scl_block_keyword = "FUNCTION_BLOCK" # Default + elif block_type in ["FC", "FB", "OB"]: + print(f" -> Modo de generación: {block_type} SCL") + output_extension = ".scl" + # (Lógica de generación SCL para FC/FB/OB como estaba antes) + scl_block_keyword = "FUNCTION_BLOCK" if block_type == "FC": scl_block_keyword = "FUNCTION" elif block_type == "OB": scl_block_keyword = "ORGANIZATION_BLOCK" - elif block_type == "FB": - scl_block_keyword = "FUNCTION_BLOCK" - else: # Fallback - print( - f"Advertencia: Tipo de bloque desconocido '{block_type}', usando FUNCTION_BLOCK." - ) - scl_block_keyword = "FUNCTION_BLOCK" # O quizás lanzar error? - print(f"Modo de generación: {scl_block_keyword}") - - # Cabecera del Bloque - scl_output.append(f"// Block Type: {block_type}") - scl_output.append(f"// Block Name (Original): {block_name}") + output_content.append(f"// Block Type: {block_type}") + if block_name != scl_block_name: + output_content.append(f"// Block Name (Original): {block_name}") if block_number: - scl_output.append(f"// Block Number: {block_number}") - # Indicar lenguaje original de las redes si es relevante + output_content.append(f"// Block Number: {block_number}") original_net_langs = set( n.get("language", "Unknown") for n in data.get("networks", []) ) - scl_output.append( + output_content.append( f"// Original Network Languages: {', '.join(l for l in original_net_langs if l != 'Unknown')}" ) if block_comment: - comment_lines = block_comment.splitlines() - scl_output.append(f"// Block Comment:") - for line in comment_lines: - scl_output.append(f"// {line}") - scl_output.append("") + output_content.append(f"// Block Comment:") + for line in block_comment.splitlines(): + output_content.append(f"// {line}") + output_content.append("") - # Manejar tipo de retorno para FUNCTION (FC) - return_type = "Void" # Default + return_type = "Void" interface_data = data.get("interface", {}) if scl_block_keyword == "FUNCTION" and interface_data.get("Return"): - # Asumir un solo valor de retorno return_member = interface_data["Return"][0] return_type_raw = return_member.get("datatype", "Void") - # Limpiar comillas si es UDT/String return_type = ( return_type_raw[1:-1] if isinstance(return_type_raw, str) @@ -508,177 +592,127 @@ def generate_scl(processed_json_filepath, output_scl_filepath): and return_type_raw.endswith('"') else return_type_raw ) - # Añadir comillas si es UDT y no las tenía if ( return_type != return_type_raw and not return_type_raw.lower().startswith("array") ): return_type = f'"{return_type}"' - else: # Mantener raw si es tipo básico o ya tenía comillas + else: return_type = return_type_raw - - # Línea de declaración del bloque if scl_block_keyword == "FUNCTION": - scl_output.append(f'{scl_block_keyword} "{scl_block_name}" : {return_type}') - else: # FB y OB - scl_output.append(f'{scl_block_keyword} "{scl_block_name}"') + output_content.append( + f'{scl_block_keyword} "{scl_block_name}" : {return_type}' + ) + else: + output_content.append(f'{scl_block_keyword} "{scl_block_name}"') - # Atributos y versión - scl_output.append("{ S7_Optimized_Access := 'TRUE' }") # Asumir optimizado - scl_output.append("VERSION : 0.1") - scl_output.append("") + output_content.append("{ S7_Optimized_Access := 'TRUE' }") + output_content.append("VERSION : 0.1") + output_content.append("") - # Declaraciones de Interfaz (Input, Output, InOut, Static, Temp, Constant) - # Orden estándar SCL section_order = ["Input", "Output", "InOut", "Static", "Temp", "Constant"] - declared_temps = set() # Para rastrear temps ya declaradas + declared_temps = set() has_declarations = False - for section_name in section_order: vars_in_section = interface_data.get(section_name, []) if vars_in_section: has_declarations = True - # Mapeo de nombres de sección JSON a palabras clave SCL VAR_ scl_section_keyword = f"VAR_{section_name.upper()}" if section_name == "Static": - scl_section_keyword = "VAR_STAT" # Para FBs + scl_section_keyword = "VAR_STAT" if section_name == "Temp": scl_section_keyword = "VAR_TEMP" if section_name == "Constant": - scl_section_keyword = "CONSTANT" # CONSTANT no usa VAR_ - - scl_output.append(scl_section_keyword) - # Usar la función recursiva para generar declaraciones - scl_output.extend( + scl_section_keyword = "CONSTANT" + output_content.append(scl_section_keyword) + output_content.extend( generate_scl_declarations(vars_in_section, indent_level=1) ) - # Añadir END_VAR (o END_CONSTANT) - scl_output.append( + output_content.append( "END_VAR" if section_name != "Constant" else "END_CONSTANT" ) - scl_output.append("") # Línea en blanco - - # Guardar nombres de Temp declarados explícitamente + output_content.append("") if section_name == "Temp": declared_temps.update( format_variable_name(v.get("name")) for v in vars_in_section if v.get("name") ) - # Declaraciones VAR_TEMP adicionales (auto-detectadas) - # Buscar variables que empiecen con #_temp_ en el SCL generado - temp_vars_detected = set() - # Patrón para encontrar #variable o "#variable" - temp_pattern = re.compile( - r'"?(#\w+)"?' - ) # Busca # seguido de caracteres alfanuméricos + temp_vars_detected = set() + temp_pattern = re.compile(r'"?(#\w+)"?') for network in data.get("networks", []): for instruction in network.get("logic", []): - # Revisar el SCL final y el SCL de actualización de memoria si existe scl_code = instruction.get("scl", "") - edge_update_code = instruction.get( - "_edge_mem_update_scl", "" - ) # Para flancos + edge_update_code = instruction.get("_edge_mem_update_scl", "") code_to_scan = ( (scl_code if scl_code else "") + "\n" + (edge_update_code if edge_update_code else "") ) - if code_to_scan: - # Usar findall para encontrar todas las ocurrencias found_temps = temp_pattern.findall(code_to_scan) for temp_name in found_temps: - # findall devuelve el grupo capturado (#...) if temp_name: temp_vars_detected.add(temp_name) - - # Filtrar las que ya estaban declaradas additional_temps = sorted(list(temp_vars_detected - declared_temps)) - if additional_temps: print(f"INFO: Detectadas {len(additional_temps)} VAR_TEMP adicionales.") - # Si no se declaró la sección Temp antes, añadirla ahora if "Temp" not in interface_data or not interface_data["Temp"]: - scl_output.append("VAR_TEMP") - + output_content.append("VAR_TEMP") for temp_name in additional_temps: - # Formatear por si acaso, aunque el patrón ya debería dar #nombre scl_name = format_variable_name(temp_name) - # Inferir tipo (Bool es lo más común para temporales internos) - # Se podría mejorar si el nombre da pistas (ej. _temp_r para Real) - inferred_type = "Bool" # Asumir Bool por defecto - scl_output.append( + inferred_type = "Bool" + output_content.append( f" {scl_name} : {inferred_type}; // Auto-generated temporary" ) - - # Si abrimos la sección aquí, cerrarla if "Temp" not in interface_data or not interface_data["Temp"]: - scl_output.append("END_VAR") - scl_output.append("") + output_content.append("END_VAR") + output_content.append("") - # --- Cuerpo del Bloque (BEGIN...END) --- - scl_output.append("BEGIN") - scl_output.append("") - # Iterar por redes y lógica (incluyendo manejo STL/SCL crudo) + output_content.append("BEGIN") + output_content.append("") for i, network in enumerate(data.get("networks", [])): - network_title = network.get( - "title", f'Network {network.get("id", i+1)}' - ) # Usar i+1 si falta ID + network_title = network.get("title", f'Network {network.get("id", i+1)}') network_comment = network.get("comment", "") - network_lang = network.get("language", "LAD") # Lenguaje original de la red - scl_output.append( + network_lang = network.get("language", "LAD") + output_content.append( f" // Network {i+1}: {network_title} (Original Language: {network_lang})" ) if network_comment: - # Indentar comentarios de red for line in network_comment.splitlines(): - scl_output.append(f" // {line}") - scl_output.append("") # Línea en blanco antes del código de red - + output_content.append(f" // {line}") + output_content.append("") network_has_code = False logic_in_network = network.get("logic", []) - if not logic_in_network: - scl_output.append(f" // Network {i+1} has no logic elements.") - scl_output.append("") + output_content.append(f" // Network {i+1} has no logic elements.") + output_content.append("") continue - # --- Manejo Especial Redes STL --- if network_lang == "STL": - # Asumir que la lógica STL está en el primer elemento como RAW_STL_CHUNK if logic_in_network[0].get("type") == "RAW_STL_CHUNK": network_has_code = True raw_stl_code = logic_in_network[0].get( "stl", "// ERROR: STL code missing" ) - # Incrustar STL como comentario multi-línea o delimitado - scl_output.append(f" // --- BEGIN STL Network {i+1} ---") - # Comentar cada línea STL + output_content.append(f" // --- BEGIN STL Network {i+1} ---") for stl_line in raw_stl_code.splitlines(): - scl_output.append(f" // {stl_line}") - scl_output.append(f" // --- END STL Network {i+1} ---") - scl_output.append("") # Línea en blanco después + output_content.append(f" // {stl_line}") + output_content.append(f" // --- END STL Network {i+1} ---") + output_content.append("") else: - scl_output.append( + output_content.append( f" // ERROR: Contenido STL inesperado en Network {i+1}." ) - scl_output.append("") - - # --- Manejo Redes SCL/LAD/FBD procesadas --- - else: - # Iterar por las instrucciones procesadas + output_content.append("") + else: # SCL/LAD/FBD for instruction in logic_in_network: instruction_type = instruction.get("type", "") scl_code = instruction.get("scl", "") is_grouped = instruction.get("grouped", False) - - # Saltar instrucciones agrupadas (su lógica está en el IF) if is_grouped: continue - - # Incluir SCL si la instrucción fue procesada o es un chunk crudo/error/placeholder if ( instruction_type.endswith(SCL_SUFFIX) or instruction_type @@ -688,18 +722,14 @@ def generate_scl(processed_json_filepath, output_scl_filepath): "UNSUPPORTED_CONTENT", "PARSING_ERROR", ] - or "_error" in instruction_type # Incluir errores comentados + or "_error" in instruction_type ) and scl_code: - - # Comprobar si el SCL es solo un comentario (a menos que sea un bloque IF) is_only_comment = all( line.strip().startswith("//") for line in scl_code.splitlines() if line.strip() ) is_if_block = scl_code.strip().startswith("IF") - - # Añadir el SCL indentado si no es solo un comentario (o si es un IF/Error) if ( not is_only_comment or is_if_block @@ -713,78 +743,72 @@ def generate_scl(processed_json_filepath, output_scl_filepath): ): network_has_code = True for line in scl_code.splitlines(): - scl_output.append(f" {line}") # Indentar código - # Añadir línea en blanco después de cada bloque SCL para legibilidad - scl_output.append("") - - # Si la red no produjo código SCL imprimible (ej. solo lógica interna) - if ( - not network_has_code and network_lang != "STL" - ): # No añadir para STL ya comentado - scl_output.append( - f" // Network {i+1} did not produce printable SCL code." + output_content.append(f" {line}") + output_content.append("") + if not network_has_code and network_lang != "STL": + output_content.append( + f" // Network {i+1} did not produce printable SCL/MD code." ) - scl_output.append("") + output_content.append("") + output_content.append(f"END_{scl_block_keyword}") - # Fin del bloque FC/FB/OB - scl_output.append(f"END_{scl_block_keyword}") # <-- Usar keyword determinada + else: # Tipo desconocido + print( + f"Error: Tipo de bloque desconocido '{block_type}' encontrado en JSON. No se generará archivo." + ) + return - # --- Escritura del Archivo SCL (Común) --- - print(f"Escribiendo archivo SCL en: {output_scl_filepath}") + # --- Escritura del Archivo de Salida (.scl o .md) --- + # Construir nombre de archivo de salida + output_filename_base = ( + f"{scl_block_name}{output_extension}" # Usar nombre SCL seguro + ) + output_filepath = os.path.join(output_directory, output_filename_base) + + print(f" -> Escribiendo archivo de salida en: {output_filepath}") try: - with open(output_scl_filepath, "w", encoding="utf-8") as f: - for line in scl_output: + # Crear directorio si no existe + os.makedirs(output_directory, exist_ok=True) + with open(output_filepath, "w", encoding="utf-8") as f: + for line in output_content: f.write(line + "\n") - print("Generación de SCL completada.") + print(f"Generación de {output_extension.upper()} completada.") except Exception as e: - print(f"Error al escribir el archivo SCL: {e}") + print(f"Error al escribir el archivo {output_extension.upper()}: {e}") traceback.print_exc() # --- Ejecución --- if __name__ == "__main__": - # Imports necesarios solo para la ejecución como script principal - import argparse - import os - import sys - import traceback # Asegurarse que traceback está importado - - # Configurar ArgumentParser para recibir la ruta del XML original obligatoria parser = argparse.ArgumentParser( - description="Generate final SCL file from processed JSON (_simplified_processed.json). Expects original XML filepath as argument." + description="Generate final SCL or Markdown file from processed JSON (_simplified_processed.json)." # Actualizado ) parser.add_argument( - "source_xml_filepath", # Argumento posicional obligatorio + "source_xml_filepath", help="Path to the original source XML file (passed from x0_main.py, used to derive input/output names).", ) - args = parser.parse_args() # Parsea los argumentos de sys.argv + args = parser.parse_args() + source_xml_file = args.source_xml_filepath - source_xml_file = args.source_xml_filepath # Obtiene la ruta del XML original - - # Verificar si el archivo XML original existe (como referencia) if not os.path.exists(source_xml_file): print( f"Advertencia (x3): Archivo XML original no encontrado: '{source_xml_file}', pero se intentará encontrar el JSON procesado." ) - # Derivar nombres de archivos de entrada (JSON procesado) y salida (SCL) xml_filename_base = os.path.splitext(os.path.basename(source_xml_file))[0] - # Asumir que los archivos están en el mismo directorio que el XML original - base_dir = os.path.dirname(source_xml_file) # Directorio del XML original + base_dir = os.path.dirname(source_xml_file) input_json_file = os.path.join( base_dir, f"{xml_filename_base}_simplified_processed.json" ) - # Cambiar extensión de salida a .scl - output_scl_file = os.path.join( - base_dir, f"{xml_filename_base}_generated.scl" # Cambiado nombre de salida - ) + + # MODIFICADO: El directorio de salida ahora es el mismo que el de entrada + output_dir = base_dir # Escribir .scl/.md en el mismo directorio print( - f"(x3) Generando SCL: '{os.path.relpath(input_json_file)}' -> '{os.path.relpath(output_scl_file)}'" - ) + f"(x3) Generando SCL/MD desde: '{os.path.relpath(input_json_file)}' en directorio: '{os.path.relpath(output_dir)}'" + ) # Log actualizado - # Verificar si el archivo JSON procesado de entrada EXISTE if not os.path.exists(input_json_file): print( f"Error Fatal (x3): Archivo JSON procesado no encontrado: '{input_json_file}'" @@ -792,16 +816,15 @@ if __name__ == "__main__": print( f"Asegúrate de que 'x2_process.py' se ejecutó correctamente para '{os.path.relpath(source_xml_file)}'." ) - sys.exit(1) # Salir si el archivo necesario no está + sys.exit(1) else: - # Llamar a la función principal de generación SCL del script try: - generate_scl(input_json_file, output_scl_file) - sys.exit(0) # Salir con éxito explícitamente + # Pasar el directorio de salida a la función principal + generate_scl_or_markdown(input_json_file, output_dir) + sys.exit(0) except Exception as e: print( - f"Error Crítico (x3) durante la generación de SCL desde '{input_json_file}': {e}" + f"Error Crítico (x3) durante la generación de SCL/MD desde '{input_json_file}': {e}" ) - # traceback ya debería estar importado traceback.print_exc() - sys.exit(1) # Salir con error si la función principal falla + sys.exit(1)