829 lines
35 KiB
Python
829 lines
35 KiB
Python
"""
|
|
LadderToSCL - Conversor de Siemens LAD/FUP XML a SCL
|
|
|
|
Este script genera documentacion MD de Cross Reference para Obsidian
|
|
|
|
"""
|
|
# ToUpload/x4_cross_reference.py
|
|
# -*- coding: utf-8 -*-
|
|
import json
|
|
import os
|
|
import argparse
|
|
import sys
|
|
import traceback
|
|
import glob
|
|
import re
|
|
import urllib.parse
|
|
import xml.etree.ElementTree as ET # <-- NUEVO: Para parsear XML
|
|
import shutil # <-- NUEVO: Para copiar archivos
|
|
from generators.generator_utils import format_variable_name
|
|
from collections import defaultdict
|
|
script_root = os.path.dirname(
|
|
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
|
)
|
|
sys.path.append(script_root)
|
|
from backend.script_utils import load_configuration
|
|
|
|
# --- Constantes ---
|
|
INDENT_STEP = " "
|
|
|
|
# --- Funciones de Análisis (find_calls_in_scl, find_db_tag_usage, find_plc_tag_usage sin cambios) ---
|
|
# <-- MODIFICADO: Añadir instance_db_to_fb_map como parámetro -->
|
|
def find_calls_in_scl(scl_code, block_data, instance_db_to_fb_map):
|
|
calls = defaultdict(int)
|
|
known_blocks = set(block_data.keys())
|
|
# La lógica de known_instances puede ser menos relevante ahora, pero la dejamos por si acaso
|
|
known_instances = set()
|
|
for name, data in block_data.items():
|
|
block_info = data.get("data", {})
|
|
if block_info.get("block_type") == "FB":
|
|
static_vars = block_info.get("interface", {}).get("Static", [])
|
|
for var in static_vars:
|
|
var_type = var.get("datatype", "")
|
|
base_type = var_type.replace('"', "").split("[")[0].strip()
|
|
if (
|
|
base_type in known_blocks
|
|
and block_data[base_type]["data"].get("block_type") == "FB"
|
|
):
|
|
known_instances.add(f'"{name}"."{var.get("name")}"')
|
|
known_instances.add(f'"{var.get("name")}"') # Mejorable
|
|
general_call_pattern = re.compile(
|
|
r'\b(?<![:=<>])("?([a-zA-Z_#][a-zA-Z0-9_."]*)"?)\s*\('
|
|
)
|
|
system_funcs = {
|
|
"IF",
|
|
"WHILE",
|
|
"FOR",
|
|
"CASE",
|
|
"REPEAT",
|
|
"RETURN",
|
|
"EXIT",
|
|
"TRUE",
|
|
"FALSE",
|
|
"AND",
|
|
"OR",
|
|
"XOR",
|
|
"NOT",
|
|
"MOD",
|
|
"ABS",
|
|
"SQRT",
|
|
"LN",
|
|
"EXP",
|
|
"SIN",
|
|
"COS",
|
|
"TAN",
|
|
"ASIN",
|
|
"ACOS",
|
|
"ATAN",
|
|
"CONCAT",
|
|
"LEN",
|
|
"LEFT",
|
|
"RIGHT",
|
|
"MID",
|
|
"DELETE",
|
|
"INSERT",
|
|
"FIND",
|
|
"REPLACE",
|
|
"INT_TO_STRING",
|
|
"STRING_TO_INT",
|
|
"TON",
|
|
"TOF",
|
|
"TP",
|
|
"CTU",
|
|
"CTD",
|
|
"CTUD",
|
|
"BLKMOV",
|
|
"ARRAY",
|
|
"STRUCT",
|
|
"VAR",
|
|
"FUNCTION",
|
|
"FUNCTION_BLOCK",
|
|
"DATA_BLOCK",
|
|
"BOOL",
|
|
"INT",
|
|
"DINT",
|
|
"REAL",
|
|
"STRING",
|
|
"TIME",
|
|
"DATE",
|
|
"WORD",
|
|
"BYTE",
|
|
}
|
|
for match in general_call_pattern.finditer(scl_code):
|
|
potential_name_quoted = match.group(1)
|
|
potential_name_clean = match.group(2)
|
|
if potential_name_clean.upper() in system_funcs:
|
|
continue # Ignorar palabras clave del lenguaje y funciones estándar
|
|
|
|
# <-- NUEVO: Comprobar si es una llamada a un DB de instancia conocido -->
|
|
fb_type_name = instance_db_to_fb_map.get(potential_name_clean)
|
|
if fb_type_name:
|
|
# ¡Encontrado! Es una llamada vía DB de instancia. Contabilizar para el FB base.
|
|
calls[fb_type_name] += 1
|
|
else:
|
|
# <-- Lógica Original (modificada para else) -->
|
|
# No es un DB de instancia conocido, ¿es una llamada a FC/FB directamente o una instancia local (#)?
|
|
is_local_instance_call = potential_name_clean.startswith("#")
|
|
# La comprobación 'potential_name_quoted in known_instances' es menos fiable, priorizamos el mapa.
|
|
|
|
if is_local_instance_call:
|
|
# Podríamos intentar resolver el tipo de la instancia local si tuviéramos esa info aquí,
|
|
# pero por ahora, simplemente la ignoramos para no contarla incorrectamente.
|
|
pass
|
|
elif potential_name_clean in known_blocks:
|
|
# Es un nombre de bloque conocido, ¿es FC o FB?
|
|
callee_type = block_data[potential_name_clean]["data"].get("block_type")
|
|
if callee_type in ["FC", "FB"]:
|
|
calls[potential_name_clean] += 1 # Llamada directa a FC o FB
|
|
return calls
|
|
|
|
|
|
def find_db_tag_usage(scl_code):
|
|
usage = defaultdict(lambda: defaultdict(int))
|
|
db_tag_pattern = re.compile(
|
|
r'("([a-zA-Z0-9_ ]+)"|(DB\d+))\."?([a-zA-Z0-9_ ]+)"?(\s*\[.*?\]|\.\w+)*'
|
|
)
|
|
write_pattern = re.compile(r"^\s*(.*?)\s*:=")
|
|
lines = scl_code.splitlines()
|
|
for line in lines:
|
|
line_strip = line.strip()
|
|
is_write = False
|
|
match_write = write_pattern.match(line_strip)
|
|
target_part = ""
|
|
if match_write:
|
|
is_write = True
|
|
target_part = match_write.group(1).strip()
|
|
for match in db_tag_pattern.finditer(line):
|
|
db_part = match.group(1)
|
|
tag_part = match.group(3)
|
|
full_match = match.group(0)
|
|
db_name = match.group(2) if match.group(2) else db_part
|
|
tag_name = match.group(4) if match.group(4) else tag_part
|
|
db_tag_key = f"{db_name}.{tag_name}"
|
|
access_type = (
|
|
"write" if (is_write and target_part.startswith(full_match)) else "read"
|
|
)
|
|
usage[db_tag_key][access_type] += 1
|
|
return usage
|
|
|
|
|
|
def find_plc_tag_usage(scl_code, plc_tag_names_set):
|
|
usage = defaultdict(lambda: defaultdict(int))
|
|
identifier_pattern = re.compile(
|
|
r"""(?<![."#\d])("([a-zA-Z_][a-zA-Z0-9_ .]*)"|([a-zA-Z_][a-zA-Z0-9_.]*))(?![(\[])""",
|
|
re.VERBOSE,
|
|
)
|
|
write_pattern = re.compile(r"^\s*(.*?)\s*:=")
|
|
lines = scl_code.splitlines()
|
|
for line in lines:
|
|
line_strip = line.strip()
|
|
is_write = False
|
|
match_write = write_pattern.match(line_strip)
|
|
target_part = ""
|
|
if match_write:
|
|
is_write = True
|
|
target_part = match_write.group(1).strip()
|
|
for match in identifier_pattern.finditer(line):
|
|
full_match = match.group(1)
|
|
tag_name_candidate = match.group(2) if match.group(2) else match.group(3)
|
|
if (
|
|
tag_name_candidate in plc_tag_names_set
|
|
or full_match in plc_tag_names_set
|
|
):
|
|
tag_key = full_match
|
|
access_type = (
|
|
"write"
|
|
if (is_write and target_part.startswith(full_match))
|
|
else "read"
|
|
)
|
|
usage[tag_key][access_type] += 1
|
|
return usage
|
|
|
|
|
|
# <-- NUEVA FUNCION -->
|
|
def copy_and_prepare_source_files(project_root_dir, xref_output_dir, scl_output_dirname, xref_source_subdir):
|
|
"""
|
|
Copia archivos .scl y .md desde scl_output a xref_output/source,
|
|
convirtiendo .scl a .md con formato de bloque de código.
|
|
Usa los nombres de directorios pasados como argumentos.
|
|
"""
|
|
scl_source_dir = os.path.join(project_root_dir, scl_output_dirname)
|
|
md_target_dir = os.path.join(xref_output_dir, xref_source_subdir)
|
|
|
|
if not os.path.isdir(scl_source_dir):
|
|
print(
|
|
f"Advertencia: Directorio '{scl_source_dir}' no encontrado. No se copiarán archivos fuente.",
|
|
file=sys.stderr,
|
|
)
|
|
return
|
|
|
|
try:
|
|
os.makedirs(md_target_dir, exist_ok=True)
|
|
print(
|
|
f"Copiando y preparando archivos fuente para Obsidian en: {md_target_dir}"
|
|
)
|
|
except OSError as e:
|
|
print(
|
|
f"Error creando directorio de destino '{md_target_dir}': {e}",
|
|
file=sys.stderr,
|
|
)
|
|
return
|
|
|
|
copied_count = 0
|
|
converted_count = 0
|
|
errors_count = 0
|
|
|
|
# Procesar archivos .scl
|
|
scl_files = glob.glob(os.path.join(scl_source_dir, "*.scl"))
|
|
for scl_path in scl_files:
|
|
base_name = os.path.basename(scl_path)
|
|
md_name = os.path.splitext(base_name)[0] + ".md"
|
|
md_path = os.path.join(md_target_dir, md_name)
|
|
try:
|
|
with open(scl_path, "r", encoding="utf-8") as f_scl:
|
|
scl_content = f_scl.read()
|
|
# <-- MODIFICADO: Limpiar contenido SCL antes de envolverlo -->
|
|
# Quitar posibles bloques de código Markdown anidados o incorrectos dentro del SCL
|
|
scl_content_cleaned = scl_content.replace("```stl", "").replace("```", "")
|
|
# Crear contenido Markdown
|
|
md_content = f"```pascal\n{scl_content_cleaned}\n```\n"
|
|
# <-- FIN MODIFICADO -->
|
|
with open(md_path, "w", encoding="utf-8") as f_md:
|
|
f_md.write(md_content)
|
|
converted_count += 1
|
|
except Exception as e:
|
|
print(f" Error procesando SCL '{base_name}': {e}", file=sys.stderr)
|
|
errors_count += 1
|
|
|
|
# Procesar archivos .md (UDT, TagTable)
|
|
md_files = glob.glob(os.path.join(scl_source_dir, "*.md"))
|
|
for md_src_path in md_files:
|
|
base_name = os.path.basename(md_src_path)
|
|
md_dest_path = os.path.join(md_target_dir, base_name)
|
|
try:
|
|
# Simplemente copiar el archivo .md existente
|
|
shutil.copy2(md_src_path, md_dest_path) # copy2 preserva metadatos
|
|
copied_count += 1
|
|
except Exception as e:
|
|
print(f" Error copiando MD '{base_name}': {e}", file=sys.stderr)
|
|
errors_count += 1
|
|
|
|
print(
|
|
f"Archivos fuente preparados: {converted_count} SCL convertidos, {copied_count} MD copiados."
|
|
)
|
|
if errors_count > 0:
|
|
print(
|
|
f"ADVERTENCIA: Hubo {errors_count} errores durante la preparación de archivos fuente.",
|
|
file=sys.stderr,
|
|
)
|
|
|
|
|
|
# --- Funciones Árbol de Llamadas Modificadas (para apuntar a xref_output/source/*.md) ---
|
|
|
|
|
|
# <-- MODIFICADO: get_scl_link -->
|
|
def get_scl_link(
|
|
block_name, block_entry, xref_source_subdir
|
|
): # Ya no necesita project_root_dir
|
|
"""
|
|
Genera un enlace Markdown relativo al archivo .md correspondiente DENTRO de xref_output/source.
|
|
"""
|
|
if not block_entry:
|
|
return f"`{block_name}`"
|
|
|
|
# El nombre del archivo destino siempre será .md
|
|
md_filename = format_variable_name(block_name) + ".md" # Asegurar que format_variable_name esté disponible
|
|
|
|
# La ruta siempre estará dentro del subdirectorio fuente de xref
|
|
link_target_path = f"{xref_source_subdir}/{md_filename}"
|
|
|
|
# Codificar para URL/Markdown
|
|
try:
|
|
# La ruta relativa desde xref_output_dir a xref_output_dir/source/file.md es solo source/file.md
|
|
encoded_path = urllib.parse.quote(
|
|
link_target_path
|
|
) # No necesita replace(os.sep, '/')
|
|
return f"[`{block_name}`]({encoded_path})"
|
|
except Exception as e:
|
|
print(f"Error generando enlace para {block_name}: {e}")
|
|
return f"`{block_name}` (error al generar enlace)"
|
|
|
|
|
|
# <-- MODIFICADO: build_call_tree_recursive (ya no necesita project_root_dir) -->
|
|
def build_call_tree_recursive( # Añadido max_call_depth, xref_source_subdir
|
|
current_node,
|
|
call_graph,
|
|
block_data,
|
|
output_lines,
|
|
visited_in_path,
|
|
base_xref_dir,
|
|
current_depth=0,
|
|
max_call_depth=5,
|
|
xref_source_subdir="source"
|
|
):
|
|
"""
|
|
Función recursiva para construir el árbol de llamadas indentado CON ENLACES
|
|
a los archivos .md en xref_output/source.
|
|
"""
|
|
indent = INDENT_STEP * current_depth
|
|
block_entry = block_data.get(current_node)
|
|
# Llamar a get_scl_link modificado
|
|
node_link = get_scl_link(current_node, block_entry, xref_source_subdir)
|
|
output_lines.append(f"{indent}- {node_link}")
|
|
|
|
if current_depth >= max_call_depth:
|
|
output_lines.append(
|
|
f"{indent}{INDENT_STEP}[... Profundidad máxima alcanzada ...]"
|
|
)
|
|
return
|
|
if current_node in visited_in_path:
|
|
output_lines.append(f"{indent}{INDENT_STEP}[... Recursión detectada ...]")
|
|
return
|
|
|
|
visited_in_path.add(current_node)
|
|
if current_node in call_graph:
|
|
callees = sorted(call_graph[current_node])
|
|
for callee in callees:
|
|
# Llamada recursiva
|
|
build_call_tree_recursive(
|
|
callee,
|
|
call_graph,
|
|
block_data,
|
|
output_lines,
|
|
visited_in_path.copy(),
|
|
base_xref_dir, # base_xref_dir no se usa en la recursión, podría quitarse
|
|
current_depth + 1,
|
|
max_call_depth=max_call_depth, # Pasar parámetro
|
|
xref_source_subdir=xref_source_subdir # Pasar parámetro
|
|
)
|
|
|
|
|
|
# <-- MODIFICADO: generate_call_tree_output (ya no necesita project_root_dir) -->
|
|
def generate_call_tree_output(call_graph, block_data, base_xref_dir, max_call_depth, xref_source_subdir): # Añadido max_call_depth, xref_source_subdir
|
|
"""
|
|
Genera las líneas de texto para el archivo de árbol de llamadas CON ENLACES
|
|
a los archivos .md en xref_output/source.
|
|
"""
|
|
output_lines = ["# Árbol de Referencias Cruzadas de Llamadas\n"]
|
|
output_lines.append(f"(Profundidad máxima: {max_call_depth})\n") # <-- Usar el parámetro
|
|
|
|
# ------------------------------------------------------------
|
|
# Aviso cuando NO se han detectado llamadas entre bloques
|
|
# ------------------------------------------------------------
|
|
has_any_call = any(len(callees) > 0 for callees in call_graph.values())
|
|
if not has_any_call:
|
|
output_lines.append(
|
|
"\n> ⚠️ Nota: No se detectaron referencias cruzadas de llamadas. "
|
|
"Es posible que no existan los archivos '*_XRef.xml' o que aún no "
|
|
"se haya ejecutado el análisis de fallback sobre los bloques SCL.\n"
|
|
)
|
|
|
|
root_nodes = sorted( # Encontrar OBs
|
|
[
|
|
name
|
|
for name, data in block_data.items()
|
|
if data.get("data", {}).get("block_type") == "OB"
|
|
]
|
|
)
|
|
|
|
if not root_nodes:
|
|
output_lines.append("\nNo se encontraron OBs como puntos de entrada.")
|
|
else:
|
|
output_lines.append("\n## Puntos de Entrada (OBs)\n")
|
|
for ob_name in root_nodes:
|
|
ob_entry = block_data.get(ob_name)
|
|
ob_link = get_scl_link(
|
|
ob_name, ob_entry, xref_source_subdir
|
|
) # Llamar a get_scl_link modificado
|
|
output_lines.append(f"\n### Iniciando desde: {ob_link}\n")
|
|
build_call_tree_recursive(
|
|
ob_name,
|
|
call_graph,
|
|
block_data,
|
|
output_lines,
|
|
set(),
|
|
base_xref_dir, # No se usa en recursión
|
|
current_depth=0,
|
|
max_call_depth=max_call_depth, # Pasar parámetro
|
|
xref_source_subdir=xref_source_subdir # Pasar parámetro
|
|
)
|
|
|
|
all_callers = set(call_graph.keys())
|
|
all_callees = set(c for v in call_graph.values() for c in v)
|
|
all_in_graph = all_callers.union(all_callees)
|
|
code_blocks = {
|
|
n
|
|
for n, d in block_data.items()
|
|
if d.get("data", {}).get("block_type") in ["FC", "FB"]
|
|
}
|
|
unreached = sorted(list(code_blocks - all_in_graph - set(root_nodes)))
|
|
if unreached:
|
|
output_lines.append(
|
|
"\n## Bloques (FC/FB) No Referenciados Directamente desde OBs\n"
|
|
)
|
|
for block_name in unreached:
|
|
block_entry = block_data.get(block_name)
|
|
block_link = get_scl_link(
|
|
block_name, block_entry, xref_source_subdir
|
|
) # Llamar a get_scl_link modificado
|
|
output_lines.append(f"- {block_link}")
|
|
return output_lines
|
|
|
|
|
|
# --- Funciones para Salida Resumida (generate_db_usage_summary_output, generate_plc_tag_summary_output SIN CAMBIOS) ---
|
|
# (Se omiten por brevedad)
|
|
def generate_db_usage_summary_output(db_users, max_users_list): # Añadido max_users_list
|
|
"""Genera las líneas para el archivo Markdown de resumen de uso de DBs."""
|
|
output_lines = ["# Resumen de Uso de DB Globales por Bloque\n\n"]
|
|
if not db_users:
|
|
output_lines.append(
|
|
"Ningún DB global parece ser utilizado por bloques de código.\n"
|
|
)
|
|
else:
|
|
for db_name in sorted(db_users.keys()):
|
|
users_set = db_users[db_name]
|
|
users_list = sorted(list(users_set))
|
|
output_lines.append(f"## DB: `{db_name}`\n")
|
|
if not users_list:
|
|
output_lines.append("- No utilizado directamente.\n")
|
|
else:
|
|
output_lines.append("Utilizado por:\n")
|
|
display_users = users_list[:max_users_list] # Usar parámetro
|
|
remaining_count = len(users_list) - len(display_users)
|
|
for user_block in display_users:
|
|
output_lines.append(f"- `{user_block}`")
|
|
if remaining_count > 0:
|
|
output_lines.append(f"- ... (y {remaining_count} más)")
|
|
output_lines.append("")
|
|
return output_lines
|
|
|
|
|
|
def generate_plc_tag_summary_output(plc_tag_users, max_users_list): # Añadido max_users_list
|
|
"""Genera las líneas para el archivo Markdown de resumen de uso de PLC Tags."""
|
|
output_lines = ["# Resumen de Uso de PLC Tags Globales por Bloque\n\n"]
|
|
if not plc_tag_users:
|
|
output_lines.append(
|
|
"Ningún PLC Tag global parece ser utilizado por bloques de código.\n"
|
|
)
|
|
else:
|
|
for tag_name in sorted(plc_tag_users.keys()):
|
|
users_set = plc_tag_users[tag_name]
|
|
users_list = sorted(list(users_set))
|
|
output_lines.append(f"## PLC Tag: `{tag_name}`\n")
|
|
if not users_list:
|
|
output_lines.append("- No utilizado.\n")
|
|
else:
|
|
output_lines.append("Utilizado por:\n")
|
|
display_users = users_list[:max_users_list] # Usar parámetro
|
|
remaining_count = len(users_list) - len(display_users)
|
|
for user_block in display_users:
|
|
output_lines.append(f"- `{user_block}`")
|
|
if remaining_count > 0:
|
|
output_lines.append(f"- ... (y {remaining_count} más)")
|
|
output_lines.append("")
|
|
return output_lines
|
|
|
|
# --- NUEVA FUNCION: Parseador de XML XRef ---
|
|
def parse_xref_xml_for_calls(xml_file_path):
|
|
"""
|
|
Parsea un archivo _XRef.xml de TIA Portal y extrae las relaciones de llamada (Caller -> Callee).
|
|
Se basa en la estructura descrita en xref_info.md.
|
|
Devuelve un diccionario: {caller_name: [callee_name1, callee_name2, ...]}
|
|
"""
|
|
calls = defaultdict(list)
|
|
try:
|
|
tree = ET.parse(xml_file_path)
|
|
root = tree.getroot()
|
|
|
|
# Determinar el namespace (puede variar, esto es un intento común)
|
|
# Si el namespace es diferente, habrá que ajustarlo aquí.
|
|
ns_match = re.match(r'\{([^}]+)\}', root.tag)
|
|
ns = {'ns': ns_match.group(1)} if ns_match else {}
|
|
ns_prefix = f"{{{ns['ns']}}}" if ns else ""
|
|
|
|
# Encuentra el SourceObject (el llamador en este archivo)
|
|
source_object = root.find(f'.//{ns_prefix}SourceObject')
|
|
if source_object is None:
|
|
print(f"Advertencia: No se encontró SourceObject en {xml_file_path}", file=sys.stderr)
|
|
return {} # Devuelve diccionario vacío si no hay SourceObject
|
|
|
|
caller_name_elem = source_object.find(f'{ns_prefix}Name')
|
|
caller_name = caller_name_elem.text if caller_name_elem is not None and caller_name_elem.text else f"UnknownCaller_{os.path.basename(xml_file_path)}"
|
|
|
|
# Itera sobre los objetos referenciados (potenciales llamados)
|
|
references = source_object.find(f'{ns_prefix}References')
|
|
if references is not None:
|
|
for ref_object in references.findall(f'{ns_prefix}ReferenceObject'):
|
|
ref_name_elem = ref_object.find(f'{ns_prefix}Name')
|
|
ref_name = ref_name_elem.text if ref_name_elem is not None and ref_name_elem.text else None
|
|
ref_type_name_elem = ref_object.find(f'{ns_prefix}TypeName')
|
|
ref_type_name = ref_type_name_elem.text if ref_type_name_elem is not None and ref_type_name_elem.text else ""
|
|
|
|
if not ref_name: continue # Saltar si el objeto referenciado no tiene nombre
|
|
|
|
# Itera sobre las localizaciones de la referencia
|
|
locations = ref_object.find(f'{ns_prefix}Locations')
|
|
if locations is not None:
|
|
for location in locations.findall(f'{ns_prefix}Location'):
|
|
# <-- NUEVO: Comprobar primero el ReferenceType -->
|
|
ref_type_elem = location.find(f'{ns_prefix}ReferenceType')
|
|
ref_type_text = ref_type_elem.text if ref_type_elem is not None else ""
|
|
|
|
# Solo procesar si el SourceObject 'Uses' el ReferenceObject en esta Location
|
|
if ref_type_text == 'Uses':
|
|
access_elem = location.find(f'{ns_prefix}Access')
|
|
access_type = access_elem.text if access_elem is not None and access_elem.text else ""
|
|
|
|
callee_name = None
|
|
if access_type == 'Call':
|
|
# Llamada directa a FC
|
|
callee_name = ref_name
|
|
elif access_type == 'InstanceDB':
|
|
# Llamada a FB via DB de Instancia
|
|
# Extraer nombre/número del FB desde TypeName (ej: "Instance DB of BlockName [FB123]")
|
|
match = re.search(r'Instance DB of\s+(.*?)\s+\[([A-Za-z]+[0-9]+)\]', ref_type_name)
|
|
if match:
|
|
# Preferir nombre simbólico si existe, si no, el número (FBxxx)
|
|
callee_name = match.group(1) if match.group(1) else match.group(2)
|
|
elif 'Instance DB of' in ref_type_name: # Fallback si regex falla
|
|
callee_name = ref_type_name.split('Instance DB of ')[-1].strip()
|
|
|
|
if callee_name and callee_name not in calls[caller_name]:
|
|
calls[caller_name].append(callee_name)
|
|
|
|
except ET.ParseError as e:
|
|
print(f"Error parseando XML {xml_file_path}: {e}", file=sys.stderr)
|
|
except FileNotFoundError:
|
|
print(f"Error: Archivo XML no encontrado {xml_file_path}", file=sys.stderr)
|
|
except Exception as e:
|
|
print(f"Error inesperado procesando XML {xml_file_path}: {e}", file=sys.stderr)
|
|
traceback.print_exc(file=sys.stderr)
|
|
|
|
return dict(calls) # Convertir de defaultdict a dict
|
|
|
|
|
|
# --- Función Principal (MODIFICADA para llamar a copy_and_prepare_source_files) ---
|
|
def generate_cross_references(
|
|
project_root_dir,
|
|
output_dir,
|
|
scl_output_dirname,
|
|
xref_source_subdir,
|
|
call_xref_filename,
|
|
db_usage_xref_filename,
|
|
plc_tag_xref_filename,
|
|
max_call_depth,
|
|
max_users_list
|
|
):
|
|
"""
|
|
Genera archivos de referencias cruzadas y prepara archivos fuente (.md)
|
|
para visualización en Obsidian.
|
|
Utiliza los parámetros de configuración pasados como argumentos.
|
|
"""
|
|
print(f"--- Iniciando Generación de Referencias Cruzadas y Fuentes MD (x4) ---")
|
|
print(f"Buscando archivos JSON procesados en: {project_root_dir}")
|
|
print(f"Directorio de salida XRef: {output_dir}")
|
|
print(f"Directorio fuente SCL/MD (para análisis DB/Tag y copia): {scl_output_dirname}")
|
|
print(f"Subdirectorio fuentes MD para XRef: {xref_source_subdir}")
|
|
output_dir_abs = os.path.abspath(output_dir)
|
|
|
|
# <-- NUEVO: Crear directorio y preparar archivos fuente ANTES de generar XRefs -->
|
|
# Pasar los nombres de directorios leídos de la config
|
|
copy_and_prepare_source_files(project_root_dir, output_dir_abs, scl_output_dirname, xref_source_subdir)
|
|
|
|
# <-- NUEVO: Definir directorio donde buscar los XML de XRef -->
|
|
# <-- MODIFICADO: Buscar dentro del directorio del PLC actual (project_root_dir) -->
|
|
# xref_xml_dir = os.path.join(os.path.dirname(project_root_dir), "cross_ref", "PLC", "ProgramBlocks_CR") # Ruta anterior incorrecta
|
|
xref_xml_dir = os.path.join(project_root_dir, "ProgramBlocks_CR") # Ruta correcta: <working_dir>/<PLC_Name>/ProgramBlocks_CR/
|
|
print(f"Buscando archivos XML XRef en: {xref_xml_dir}")
|
|
|
|
|
|
# <-- FIN NUEVO -->
|
|
json_files = glob.glob(
|
|
os.path.join(project_root_dir, "**", "*_processed.json"), recursive=True
|
|
)
|
|
if not json_files:
|
|
print("Error: No se encontraron archivos '*_processed.json'.", file=sys.stderr)
|
|
return False
|
|
print(f"Archivos JSON encontrados: {len(json_files)}")
|
|
|
|
# 1. Cargar datos de JSON (sigue siendo útil para metadatos, enlaces, y análisis DB/Tag)
|
|
block_data = {}
|
|
all_db_names = set()
|
|
plc_tag_names = set()
|
|
for f_path in json_files:
|
|
try:
|
|
with open(f_path, "r", encoding="utf-8") as f:
|
|
data = json.load(f)
|
|
block_name = data.get("block_name")
|
|
block_type = data.get("block_type")
|
|
if block_name:
|
|
block_data[block_name] = {"data": data, "json_path": f_path}
|
|
if block_type == "GlobalDB":
|
|
all_db_names.add(block_name)
|
|
elif block_type == "PlcTagTable":
|
|
[
|
|
plc_tag_names.add(tag["name"])
|
|
for tag in data.get("tags", [])
|
|
if tag.get("name")
|
|
]
|
|
else:
|
|
print(
|
|
f"Advertencia: JSON sin 'block_name': {f_path}", file=sys.stderr
|
|
)
|
|
except Exception as e:
|
|
print(f"Error procesando {f_path}: {e}", file=sys.stderr)
|
|
traceback.print_exc(file=sys.stderr)
|
|
if not block_data:
|
|
print("Error: No se pudieron cargar datos.", file=sys.stderr)
|
|
return False
|
|
print(
|
|
f"Datos cargados para {len(block_data)} bloques."
|
|
)
|
|
|
|
# <-- NUEVO: Crear mapa de DB de Instancia a FB -->
|
|
instance_db_to_fb_map = {}
|
|
for block_name, block_entry in block_data.items():
|
|
b_data = block_entry.get("data", {})
|
|
if b_data.get("block_type") == "InstanceDB":
|
|
instance_of_name = b_data.get("InstanceOfName") # Clave añadida en x1
|
|
if instance_of_name and instance_of_name in block_data: # Verificar que el FB existe
|
|
instance_db_to_fb_map[block_name] = instance_of_name
|
|
elif instance_of_name:
|
|
print(f"Advertencia: InstanceDB '{block_name}' instancia a '{instance_of_name}', pero ese FB no se encontró en los datos cargados.", file=sys.stderr)
|
|
print(f"Mapa InstanciaDB -> FB creado con {len(instance_db_to_fb_map)} entradas.")
|
|
print(
|
|
f"Datos cargados para {len(block_data)} bloques ({len(plc_tag_names)} PLC Tags globales)."
|
|
)
|
|
|
|
# 2. Construir Grafo de Llamadas desde XML XRef
|
|
print("Construyendo grafo de llamadas desde archivos XML XRef...")
|
|
call_graph = defaultdict(list) # Usamos lista, no necesitamos contar llamadas múltiples aquí
|
|
# Buscar recursivamente en todas las subcarpetas (algunos proyectos guardan los XML en estructuras anidadas)
|
|
xref_xml_files = glob.glob(os.path.join(xref_xml_dir, "**", "*_XRef.xml"), recursive=True)
|
|
if not xref_xml_files:
|
|
print(f"ADVERTENCIA: No se encontraron archivos '*_XRef.xml' en {xref_xml_dir}. El árbol de llamadas estará vacío.", file=sys.stderr)
|
|
else:
|
|
print(f"Archivos XML XRef encontrados: {len(xref_xml_files)}")
|
|
for xml_file in xref_xml_files:
|
|
file_calls = parse_xref_xml_for_calls(xml_file)
|
|
for caller, callees in file_calls.items():
|
|
if caller not in call_graph:
|
|
call_graph[caller] = []
|
|
for callee in callees:
|
|
if callee not in call_graph[caller]: # Evitar duplicados si un bloque llama varias veces al mismo
|
|
call_graph[caller].append(callee)
|
|
|
|
# 3. Analizar uso de DBs y PLC Tags desde SCL (esta parte no cambia)
|
|
db_users = defaultdict(set)
|
|
plc_tag_users = defaultdict(set)
|
|
for block_name, block_entry in block_data.items():
|
|
data = block_entry["data"]
|
|
block_type = data.get("block_type")
|
|
if block_type not in ["OB", "FC", "FB"]:
|
|
continue
|
|
caller_name = block_name
|
|
|
|
# Leer el archivo SCL para análisis de DB/Tags
|
|
scl_filename = format_variable_name(caller_name) + ".scl"
|
|
# Construir la ruta al archivo SCL dentro del directorio scl_output
|
|
scl_filepath = os.path.join(project_root_dir, scl_output_dirname, scl_filename)
|
|
full_scl_content = ""
|
|
if os.path.exists(scl_filepath):
|
|
try:
|
|
with open(scl_filepath, "r", encoding="utf-8") as f_scl:
|
|
full_scl_content = f_scl.read()
|
|
except Exception as read_err:
|
|
print(f" Advertencia: No se pudo leer el archivo SCL '{scl_filepath}' para análisis: {read_err}", file=sys.stderr)
|
|
else:
|
|
print(f" Advertencia: No se encontró el archivo SCL '{scl_filepath}' para análisis. El bloque podría no tener código ejecutable o hubo un error previo.", file=sys.stderr)
|
|
|
|
if full_scl_content:
|
|
# Ya no usamos find_calls_in_scl para el grafo principal
|
|
# Analizar uso de DBs
|
|
db_usage_found = find_db_tag_usage(full_scl_content)
|
|
for db_tag, access_counts in db_usage_found.items():
|
|
db_name_part = db_tag.split(".")[0].strip('"') # Limpiar comillas
|
|
if db_name_part in all_db_names or (
|
|
db_name_part.startswith("DB") and db_name_part[2:].isdigit()
|
|
):
|
|
db_users[db_name_part].add(caller_name)
|
|
plc_usage_found = find_plc_tag_usage(full_scl_content, plc_tag_names)
|
|
# Analizar uso de PLC Tags
|
|
for plc_tag, access_counts in plc_usage_found.items():
|
|
plc_tag_users[plc_tag].add(caller_name)
|
|
|
|
# 4. Generar Archivos de Salida XRef
|
|
os.makedirs(output_dir_abs, exist_ok=True)
|
|
call_xref_path = os.path.join(output_dir_abs, call_xref_filename) # Usar parámetro
|
|
db_usage_xref_path = os.path.join(output_dir_abs, db_usage_xref_filename) # Usar parámetro
|
|
plc_tag_xref_path = os.path.join(output_dir_abs, plc_tag_xref_filename) # Usar parámetro
|
|
|
|
print(f"Generando ÁRBOL XRef de llamadas en: {call_xref_path}")
|
|
try:
|
|
# <-- MODIFICADO: Llamar a la nueva función sin project_root_dir -->
|
|
call_tree_lines = generate_call_tree_output( # Pasar parámetros (el grafo ya está construido desde XML)
|
|
call_graph, block_data, output_dir_abs, max_call_depth, xref_source_subdir # <-- Pasar max_call_depth
|
|
)
|
|
with open(call_xref_path, "w", encoding="utf-8") as f:
|
|
[f.write(line + "\n") for line in call_tree_lines]
|
|
except Exception as e:
|
|
print(
|
|
f"Error al generar/escribir el ÁRBOL XRef de llamadas: {e}", file=sys.stderr
|
|
)
|
|
traceback.print_exc(file=sys.stderr)
|
|
|
|
# Generar Resumen de Uso de DB (sin cambios aquí)
|
|
print(f"Generando RESUMEN XRef de uso de DBs en: {db_usage_xref_path}")
|
|
try:
|
|
db_summary_lines = generate_db_usage_summary_output(db_users, max_users_list) # Pasar parámetro
|
|
with open(db_usage_xref_path, "w", encoding="utf-8") as f:
|
|
[f.write(line + "\n") for line in db_summary_lines]
|
|
except Exception as e:
|
|
print(
|
|
f"Error al generar/escribir el RESUMEN XRef de uso de DB: {e}",
|
|
file=sys.stderr,
|
|
)
|
|
traceback.print_exc(file=sys.stderr)
|
|
|
|
# Generar Resumen de Uso de PLC Tags (sin cambios aquí)
|
|
print(f"Generando RESUMEN XRef de uso de PLC Tags en: {plc_tag_xref_path}")
|
|
try:
|
|
plc_tag_lines = generate_plc_tag_summary_output(plc_tag_users, max_users_list) # Pasar parámetro
|
|
with open(plc_tag_xref_path, "w", encoding="utf-8") as f:
|
|
[f.write(line + "\n") for line in plc_tag_lines]
|
|
except Exception as e:
|
|
print(
|
|
f"Error al generar/escribir el RESUMEN XRef de uso de PLC Tags: {e}",
|
|
file=sys.stderr,
|
|
)
|
|
traceback.print_exc(file=sys.stderr)
|
|
|
|
print("--- Generación de Referencias Cruzadas y Fuentes MD (x4) Completada ---")
|
|
return True
|
|
|
|
|
|
# --- Punto de Entrada (sin cambios) ---
|
|
if __name__ == "__main__":
|
|
print("(x4 - Standalone) Ejecutando generación de referencias cruzadas...")
|
|
|
|
# Cargar configuración para obtener rutas
|
|
configs = load_configuration()
|
|
working_directory = configs.get("working_directory")
|
|
|
|
# Acceder a la configuración específica del grupo
|
|
group_config = configs.get("level2", {})
|
|
|
|
# Leer parámetros con valores por defecto (usando los defaults del esquema como guía)
|
|
# Parámetros necesarios para x4
|
|
cfg_scl_output_dirname = group_config.get("scl_output_dir", "scl_output")
|
|
cfg_xref_output_dirname = group_config.get("xref_output_dir", "xref_output")
|
|
cfg_xref_source_subdir = group_config.get("xref_source_subdir", "source")
|
|
cfg_call_xref_filename = group_config.get("call_xref_filename", "xref_calls_tree.md")
|
|
cfg_db_usage_xref_filename = group_config.get("db_usage_xref_filename", "xref_db_usage_summary.md")
|
|
cfg_plc_tag_xref_filename = group_config.get("plc_tag_xref_filename", "xref_plc_tags_summary.md")
|
|
# <-- MODIFICADO: Convertir a int y manejar posible error -->
|
|
try:
|
|
cfg_max_call_depth = int(group_config.get("max_call_depth", 5))
|
|
except (ValueError, TypeError):
|
|
print("Advertencia: Valor inválido para 'max_call_depth' en la configuración. Usando valor por defecto 5.", file=sys.stderr)
|
|
cfg_max_call_depth = 5
|
|
try:
|
|
cfg_max_users_list = int(group_config.get("max_users_list", 20))
|
|
except (ValueError, TypeError):
|
|
print("Advertencia: Valor inválido para 'max_users_list' en la configuración. Usando valor por defecto 20.", file=sys.stderr)
|
|
cfg_max_users_list = 20
|
|
|
|
# Calcular rutas
|
|
if not working_directory:
|
|
print("Error: 'working_directory' no encontrado en la configuración.", file=sys.stderr)
|
|
# No usamos sys.exit(1)
|
|
else:
|
|
# Calcular rutas basadas en la configuración
|
|
plc_subdir_name = "PLC" # Asumir nombre estándar
|
|
project_root_dir = os.path.join(working_directory, plc_subdir_name)
|
|
# El directorio de salida XRef ahora estará probablemente al mismo nivel que 'PLC'
|
|
# o dentro de él, según la configuración. Usemos la configuración directamente.
|
|
# xref_output_dir = os.path.join(working_directory, cfg_xref_output_dirname) # <-- Opción 1: Al nivel de working_dir
|
|
xref_output_dir = os.path.join(project_root_dir, cfg_xref_output_dirname) # <-- Opción 2: Dentro de PLC (como estaba antes) - Mantenemos esta por consistencia con el código original
|
|
|
|
if not os.path.isdir(project_root_dir):
|
|
print(f"Error: Directorio del proyecto '{project_root_dir}' no encontrado.", file=sys.stderr)
|
|
else:
|
|
# Llamar a la función principal
|
|
success = generate_cross_references(
|
|
project_root_dir,
|
|
xref_output_dir,
|
|
cfg_scl_output_dirname,
|
|
cfg_xref_source_subdir,
|
|
cfg_call_xref_filename,
|
|
cfg_db_usage_xref_filename,
|
|
cfg_plc_tag_xref_filename,
|
|
cfg_max_call_depth,
|
|
cfg_max_users_list
|
|
)
|
|
|
|
if success:
|
|
print("\n(x4 - Standalone) Proceso completado exitosamente.")
|
|
else:
|
|
print("\n(x4 - Standalone) Proceso finalizado con errores.", file=sys.stderr)
|