521 lines
19 KiB
Python
521 lines
19 KiB
Python
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
"""
|
|
Script para generar documentación de adaptación de IOs
|
|
entre TwinCAT y TIA Portal - Proyecto SIDEL
|
|
|
|
Autor: Generado automáticamente
|
|
Proyecto: E5.007560 - Modifica O&U - SAE235
|
|
"""
|
|
|
|
import re
|
|
import os
|
|
import sys
|
|
import pandas as pd
|
|
import json
|
|
from pathlib import Path
|
|
from typing import Dict, List, Tuple, Optional
|
|
import argparse
|
|
from collections import defaultdict
|
|
|
|
# Configurar el path al directorio raíz del proyecto
|
|
script_root = os.path.dirname(
|
|
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
|
)
|
|
sys.path.append(script_root)
|
|
|
|
# Importar la función de configuración
|
|
from backend.script_utils import load_configuration
|
|
|
|
|
|
def load_tiaportal_adaptations(working_directory, file_path="IO Adapted.md"):
|
|
"""Carga las adaptaciones de TIA Portal desde el archivo markdown"""
|
|
full_file_path = os.path.join(working_directory, file_path)
|
|
print(f"Cargando adaptaciones de TIA Portal desde: {full_file_path}")
|
|
|
|
adaptations = {}
|
|
|
|
if not os.path.exists(full_file_path):
|
|
print(f"⚠️ Archivo {full_file_path} no encontrado")
|
|
return adaptations
|
|
|
|
with open(full_file_path, "r", encoding="utf-8") as f:
|
|
content = f.read()
|
|
|
|
# Patrones mejorados para diferentes tipos de IOs
|
|
patterns = [
|
|
# Digitales: E0.0, A0.0
|
|
r"\|\s*([EA]\d+\.\d+)\s*\|\s*([^|]+?)\s*\|",
|
|
# Analógicos: PEW100, PAW100
|
|
r"\|\s*(P[EA]W\d+)\s*\|\s*([^|]+?)\s*\|",
|
|
# Profibus: EW 1640, AW 1640
|
|
r"\|\s*([EA]W\s+\d+)\s*\|\s*([^|]+?)\s*\|",
|
|
]
|
|
|
|
for pattern in patterns:
|
|
matches = re.findall(pattern, content, re.MULTILINE)
|
|
for io_addr, master_tag in matches:
|
|
io_addr = io_addr.strip()
|
|
master_tag = master_tag.strip()
|
|
if io_addr and master_tag and not master_tag.startswith("-"):
|
|
adaptations[io_addr] = master_tag
|
|
print(f" 📍 {io_addr} → {master_tag}")
|
|
|
|
print(f"✅ Cargadas {len(adaptations)} adaptaciones de TIA Portal")
|
|
return adaptations
|
|
|
|
|
|
def scan_twincat_definitions(working_directory, directory="TwinCat"):
|
|
"""Escanea archivos TwinCAT para encontrar definiciones de variables AT %"""
|
|
full_directory = os.path.join(working_directory, directory)
|
|
print(f"\n🔍 Escaneando definiciones TwinCAT en: {full_directory}")
|
|
|
|
definitions = {}
|
|
|
|
if not os.path.exists(full_directory):
|
|
print(f"⚠️ Directorio {full_directory} no encontrado")
|
|
return definitions
|
|
|
|
# Patrones para definiciones AT %
|
|
definition_patterns = [
|
|
# Solo se buscan definiciones activas. Se ignoran las comentadas.
|
|
# Ejemplo Válido: DO_CIP_DrainCompleted AT %QX2.1 : BOOL ;
|
|
# Ejemplo a Ignorar: DO_FillerNextRecipe_1 (* AT %QX2.1 *) : BOOL;
|
|
r"(\w+)\s+AT\s+%([IQ][XWB]\d+(?:\.\d+)?)\s*:\s*(\w+);"
|
|
]
|
|
|
|
for file_path in Path(full_directory).glob("*.scl"):
|
|
print(f" 📄 Procesando: {file_path.name}")
|
|
|
|
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
content = f.read()
|
|
|
|
for pattern in definition_patterns:
|
|
matches = re.findall(pattern, content, re.MULTILINE | re.IGNORECASE)
|
|
for var_name, io_addr, data_type in matches:
|
|
var_name = var_name.strip()
|
|
io_addr = io_addr.strip()
|
|
data_type = data_type.strip()
|
|
|
|
definitions[var_name] = {
|
|
"address": io_addr,
|
|
"type": data_type,
|
|
"file": file_path.name,
|
|
"definition_line": content[: content.find(var_name)].count("\n")
|
|
+ 1,
|
|
}
|
|
print(f" 🔗 {var_name} AT %{io_addr} : {data_type}")
|
|
|
|
print(f"✅ Encontradas {len(definitions)} definiciones TwinCAT")
|
|
return definitions
|
|
|
|
|
|
def scan_twincat_usage(working_directory, directory="TwinCat"):
|
|
"""Escanea archivos TwinCAT para encontrar uso de variables"""
|
|
full_directory = os.path.join(working_directory, directory)
|
|
print(f"\n🔍 Escaneando uso de variables TwinCAT en: {full_directory}")
|
|
|
|
usage_data = defaultdict(list)
|
|
|
|
if not os.path.exists(full_directory):
|
|
print(f"⚠️ Directorio {full_directory} no encontrado")
|
|
return usage_data
|
|
|
|
for file_path in Path(full_directory).glob("*.scl"):
|
|
print(f" 📄 Analizando uso en: {file_path.name}")
|
|
|
|
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
lines = f.readlines()
|
|
|
|
for line_num, line in enumerate(lines, 1):
|
|
# Buscar variables que empiecen con DI_, DO_, AI_, AO_
|
|
var_matches = re.findall(r"\b([DA][IO]_\w+)\b", line)
|
|
for var_name in var_matches:
|
|
usage_data[var_name].append(
|
|
{
|
|
"file": file_path.name,
|
|
"line": line_num,
|
|
"context": line.strip()[:100]
|
|
+ ("..." if len(line.strip()) > 100 else ""),
|
|
}
|
|
)
|
|
|
|
print(f"✅ Encontrado uso de {len(usage_data)} variables diferentes")
|
|
return usage_data
|
|
|
|
|
|
def convert_tia_to_twincat(tia_addr):
|
|
"""Convierte direcciones TIA Portal a formato TwinCAT"""
|
|
conversions = []
|
|
|
|
# Digitales
|
|
if re.match(r"^E\d+\.\d+$", tia_addr): # E0.0 → IX0.0
|
|
twincat_addr = tia_addr.replace("E", "IX")
|
|
conversions.append(twincat_addr)
|
|
elif re.match(r"^A\d+\.\d+$", tia_addr): # A0.0 → QX0.0
|
|
twincat_addr = tia_addr.replace("A", "QX")
|
|
conversions.append(twincat_addr)
|
|
|
|
# Analógicos
|
|
elif re.match(r"^PEW\d+$", tia_addr): # PEW100 → IW100
|
|
twincat_addr = tia_addr.replace("PEW", "IW")
|
|
conversions.append(twincat_addr)
|
|
elif re.match(r"^PAW\d+$", tia_addr): # PAW100 → QW100
|
|
twincat_addr = tia_addr.replace("PAW", "QW")
|
|
conversions.append(twincat_addr)
|
|
|
|
# Profibus
|
|
elif re.match(r"^EW\s+\d+$", tia_addr): # EW 1234 → IB1234
|
|
addr_num = re.search(r"\d+", tia_addr).group()
|
|
conversions.append(f"IB{addr_num}")
|
|
elif re.match(r"^AW\s+\d+$", tia_addr): # AW 1234 → QB1234
|
|
addr_num = re.search(r"\d+", tia_addr).group()
|
|
conversions.append(f"QB{addr_num}")
|
|
|
|
return conversions
|
|
|
|
|
|
def find_variable_by_address(definitions, target_address):
|
|
"""Busca variable por dirección exacta"""
|
|
for var_name, info in definitions.items():
|
|
if info["address"] == target_address:
|
|
return var_name, info
|
|
return None, None
|
|
|
|
|
|
def find_variable_by_name_similarity(definitions, usage_data, master_tag):
|
|
"""Busca variables por similitud de nombre"""
|
|
candidates = []
|
|
|
|
# Limpiar el master tag para comparación
|
|
clean_master = re.sub(r"^[DA][IO]_", "", master_tag).lower()
|
|
|
|
# Buscar en definiciones
|
|
for var_name, info in definitions.items():
|
|
clean_var = re.sub(r"^[DA][IO]_", "", var_name).lower()
|
|
if clean_master in clean_var or clean_var in clean_master:
|
|
candidates.append((var_name, info, "definition"))
|
|
|
|
# Buscar en uso
|
|
for var_name in usage_data.keys():
|
|
clean_var = re.sub(r"^[DA][IO]_", "", var_name).lower()
|
|
if clean_master in clean_var or clean_var in clean_master:
|
|
# Intentar encontrar la definición de esta variable
|
|
var_info = definitions.get(var_name)
|
|
if not var_info:
|
|
var_info = {
|
|
"address": "Unknown",
|
|
"type": "Unknown",
|
|
"file": "Not found",
|
|
}
|
|
candidates.append((var_name, var_info, "usage"))
|
|
|
|
return candidates
|
|
|
|
|
|
def analyze_adaptations(tia_adaptations, twincat_definitions, twincat_usage):
|
|
"""Analiza las correlaciones entre TIA Portal y TwinCAT"""
|
|
print(f"\n📊 Analizando correlaciones...")
|
|
|
|
results = []
|
|
matches_found = 0
|
|
|
|
for tia_addr, master_tag in tia_adaptations.items():
|
|
result = {
|
|
"tia_address": tia_addr,
|
|
"master_tag": master_tag,
|
|
"twincat_variable": None,
|
|
"twincat_address": None,
|
|
"twincat_type": None,
|
|
"match_type": None,
|
|
"definition_file": None,
|
|
"usage_files": [],
|
|
"usage_count": 0,
|
|
"confidence": "Low",
|
|
}
|
|
|
|
# 1. Buscar por conversión directa de dirección
|
|
twincat_addresses = convert_tia_to_twincat(tia_addr)
|
|
var_found = False
|
|
|
|
for twincat_addr in twincat_addresses:
|
|
var_name, var_info = find_variable_by_address(
|
|
twincat_definitions, twincat_addr
|
|
)
|
|
if var_name:
|
|
result.update(
|
|
{
|
|
"twincat_variable": var_name,
|
|
"twincat_address": var_info["address"],
|
|
"twincat_type": var_info["type"],
|
|
"match_type": "Address Match",
|
|
"definition_file": var_info["file"],
|
|
"confidence": "High",
|
|
}
|
|
)
|
|
var_found = True
|
|
matches_found += 1
|
|
break
|
|
|
|
# 2. Si no se encontró por dirección, buscar por nombre
|
|
if not var_found:
|
|
candidates = find_variable_by_name_similarity(
|
|
twincat_definitions, twincat_usage, master_tag
|
|
)
|
|
if candidates:
|
|
# Tomar el mejor candidato
|
|
best_candidate = candidates[0]
|
|
var_name, var_info, source = best_candidate
|
|
|
|
result.update(
|
|
{
|
|
"twincat_variable": var_name,
|
|
"twincat_address": var_info.get("address", "Unknown"),
|
|
"twincat_type": var_info.get("type", "Unknown"),
|
|
"match_type": f"Name Similarity ({source})",
|
|
"definition_file": var_info.get("file", "Unknown"),
|
|
"confidence": "Medium",
|
|
}
|
|
)
|
|
matches_found += 1
|
|
|
|
# 3. Buscar información de uso
|
|
if result["twincat_variable"]:
|
|
var_name = result["twincat_variable"]
|
|
if var_name in twincat_usage:
|
|
usage_info = twincat_usage[var_name]
|
|
result["usage_files"] = list(set([u["file"] for u in usage_info]))
|
|
result["usage_count"] = len(usage_info)
|
|
|
|
results.append(result)
|
|
|
|
# Log del progreso
|
|
status = "✅" if result["twincat_variable"] else "❌"
|
|
print(f" {status} {tia_addr} → {master_tag}")
|
|
if result["twincat_variable"]:
|
|
print(
|
|
f" 🔗 {result['twincat_variable']} AT %{result['twincat_address']}"
|
|
)
|
|
if result["usage_count"] > 0:
|
|
print(
|
|
f" 📝 Usado en {result['usage_count']} lugares: {', '.join(result['usage_files'])}"
|
|
)
|
|
|
|
print(
|
|
f"\n🎯 Resumen: {matches_found}/{len(tia_adaptations)} variables correlacionadas ({matches_found/len(tia_adaptations)*100:.1f}%)"
|
|
)
|
|
|
|
return results
|
|
|
|
|
|
def create_results_directory(working_directory):
|
|
"""Crea el directorio de resultados si no existe"""
|
|
results_dir = Path(working_directory) / "resultados"
|
|
results_dir.mkdir(exist_ok=True)
|
|
print(f"📁 Directorio de resultados: {results_dir.absolute()}")
|
|
return results_dir
|
|
|
|
|
|
def generate_json_output(
|
|
results, working_directory, output_file="io_adaptation_data.json"
|
|
):
|
|
"""Genera archivo JSON con datos estructurados para análisis posterior"""
|
|
full_output_file = os.path.join(working_directory, "resultados", output_file)
|
|
print(f"\n📄 Generando archivo JSON: {full_output_file}")
|
|
|
|
json_data = {
|
|
"metadata": {
|
|
"generated_at": pd.Timestamp.now().isoformat(),
|
|
"project": "E5.007560 - Modifica O&U - SAE235",
|
|
"total_adaptations": len(results),
|
|
"matched_variables": len([r for r in results if r["twincat_variable"]]),
|
|
"high_confidence": len([r for r in results if r["confidence"] == "High"]),
|
|
"medium_confidence": len(
|
|
[r for r in results if r["confidence"] == "Medium"]
|
|
),
|
|
},
|
|
"adaptations": [],
|
|
}
|
|
|
|
for result in results:
|
|
adaptation = {
|
|
"tia_portal": {
|
|
"address": result["tia_address"],
|
|
"tag": result["master_tag"],
|
|
},
|
|
"twincat": {
|
|
"variable": result["twincat_variable"],
|
|
"address": result["twincat_address"],
|
|
"data_type": result["twincat_type"],
|
|
"definition_file": result["definition_file"],
|
|
},
|
|
"correlation": {
|
|
"match_type": result["match_type"],
|
|
"confidence": result["confidence"],
|
|
"found": result["twincat_variable"] is not None,
|
|
},
|
|
"usage": {
|
|
"usage_count": result["usage_count"],
|
|
"usage_files": result["usage_files"],
|
|
},
|
|
}
|
|
json_data["adaptations"].append(adaptation)
|
|
|
|
with open(full_output_file, "w", encoding="utf-8") as f:
|
|
json.dump(json_data, f, indent=2, ensure_ascii=False)
|
|
|
|
print(f"✅ Archivo JSON generado: {full_output_file}")
|
|
|
|
|
|
def generate_detailed_report(
|
|
results, working_directory, output_file="IO_Detailed_Analysis_Report.md"
|
|
):
|
|
"""Genera un reporte detallado con tabla markdown"""
|
|
full_output_file = os.path.join(working_directory, "resultados", output_file)
|
|
print(f"\n📄 Generando reporte detallado: {full_output_file}")
|
|
|
|
with open(full_output_file, "w", encoding="utf-8") as f:
|
|
f.write("# Reporte Detallado de Análisis de Adaptación IO\n\n")
|
|
f.write(
|
|
f"**Fecha de generación:** {pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
|
|
)
|
|
|
|
# Estadísticas
|
|
total = len(results)
|
|
matched = len([r for r in results if r["twincat_variable"]])
|
|
high_conf = len([r for r in results if r["confidence"] == "High"])
|
|
medium_conf = len([r for r in results if r["confidence"] == "Medium"])
|
|
|
|
f.write("## 📊 Estadísticas Generales\n\n")
|
|
f.write(f"- **Total adaptaciones procesadas:** {total}\n")
|
|
f.write(f"- **Variables encontradas:** {matched} ({matched/total*100:.1f}%)\n")
|
|
f.write(f"- **Coincidencias de alta confianza:** {high_conf}\n")
|
|
f.write(f"- **Coincidencias de media confianza:** {medium_conf}\n\n")
|
|
|
|
# Tabla de variables correlacionadas exitosamente
|
|
f.write("## ✅ Variables Correlacionadas Exitosamente\n\n")
|
|
matched_results = [r for r in results if r["twincat_variable"]]
|
|
|
|
if matched_results:
|
|
# Encabezado de la tabla
|
|
f.write(
|
|
"| TIA Address | TIA Tag | TwinCAT Variable | TwinCAT Address | Tipo | Método | Confianza | Archivo Def. | Uso | Archivos Uso |\n"
|
|
)
|
|
f.write(
|
|
"|-------------|---------|------------------|-----------------|------|--------|-----------|--------------|-----|---------------|\n"
|
|
)
|
|
|
|
# Filas de datos
|
|
for result in matched_results:
|
|
usage_files_str = ", ".join(
|
|
result["usage_files"][:3]
|
|
) # Limitar a 3 archivos
|
|
if len(result["usage_files"]) > 3:
|
|
usage_files_str += "..."
|
|
|
|
f.write(
|
|
f"| {result['tia_address']} | "
|
|
f"`{result['master_tag']}` | "
|
|
f"`{result['twincat_variable']}` | "
|
|
f"`%{result['twincat_address']}` | "
|
|
f"`{result['twincat_type']}` | "
|
|
f"{result['match_type']} | "
|
|
f"{result['confidence']} | "
|
|
f"{result['definition_file']} | "
|
|
f"{result['usage_count']} | "
|
|
f"{usage_files_str} |\n"
|
|
)
|
|
|
|
f.write("\n")
|
|
|
|
# Tabla de variables no encontradas
|
|
f.write("## ❌ Variables No Encontradas\n\n")
|
|
unmatched_results = [r for r in results if not r["twincat_variable"]]
|
|
|
|
if unmatched_results:
|
|
f.write("| TIA Address | TIA Tag |\n")
|
|
f.write("|-------------|----------|\n")
|
|
|
|
for result in unmatched_results:
|
|
f.write(f"| {result['tia_address']} | `{result['master_tag']}` |\n")
|
|
|
|
f.write(f"\n**Total no encontradas:** {len(unmatched_results)}\n\n")
|
|
|
|
# Recomendaciones
|
|
f.write("## 💡 Recomendaciones\n\n")
|
|
f.write("1. **Variables de alta confianza** pueden migrarse directamente\n")
|
|
f.write("2. **Variables de media confianza** requieren verificación manual\n")
|
|
f.write(
|
|
"3. **Variables no encontradas** requieren mapeo manual o pueden ser obsoletas\n"
|
|
)
|
|
f.write("4. Variables con uso extensivo son prioritarias para la migración\n\n")
|
|
|
|
# Resumen por confianza
|
|
f.write("## 📈 Distribución por Confianza\n\n")
|
|
f.write("| Nivel de Confianza | Cantidad | Porcentaje |\n")
|
|
f.write("|--------------------|----------|------------|\n")
|
|
f.write(f"| Alta | {high_conf} | {high_conf/total*100:.1f}% |\n")
|
|
f.write(f"| Media | {medium_conf} | {medium_conf/total*100:.1f}% |\n")
|
|
f.write(
|
|
f"| No encontradas | {total-matched} | {(total-matched)/total*100:.1f}% |\n"
|
|
)
|
|
|
|
print(f"✅ Reporte detallado generado: {full_output_file}")
|
|
|
|
|
|
def main():
|
|
print("🚀 Iniciando análisis detallado de adaptación de IOs TwinCAT ↔ TIA Portal")
|
|
print("=" * 80)
|
|
|
|
# Cargar configuración
|
|
configs = load_configuration()
|
|
|
|
# Verificar que se cargó correctamente
|
|
if not configs:
|
|
print(
|
|
"Advertencia: No se pudo cargar la configuración, usando valores por defecto"
|
|
)
|
|
working_directory = "./"
|
|
else:
|
|
working_directory = configs.get("working_directory", "./")
|
|
|
|
# Verificar directorio de trabajo
|
|
if not os.path.exists(working_directory):
|
|
print(f"Error: El directorio de trabajo no existe: {working_directory}")
|
|
return
|
|
|
|
print(f"📁 Directorio de trabajo: {working_directory}")
|
|
|
|
# Crear directorio de resultados
|
|
results_dir = create_results_directory(working_directory)
|
|
|
|
# Cargar datos
|
|
tia_adaptations = load_tiaportal_adaptations(working_directory)
|
|
twincat_definitions = scan_twincat_definitions(working_directory)
|
|
twincat_usage = scan_twincat_usage(working_directory)
|
|
|
|
# Analizar correlaciones
|
|
results = analyze_adaptations(tia_adaptations, twincat_definitions, twincat_usage)
|
|
|
|
# Generar reportes en el directorio de resultados
|
|
generate_detailed_report(results, working_directory)
|
|
generate_json_output(results, working_directory)
|
|
|
|
# Generar CSV para análisis adicional
|
|
df = pd.DataFrame(results)
|
|
csv_file = results_dir / "io_detailed_analysis.csv"
|
|
df.to_csv(csv_file, index=False, encoding="utf-8")
|
|
print(f"✅ Datos exportados a CSV: {csv_file}")
|
|
|
|
print(f"\n🎉 Análisis completado exitosamente!")
|
|
print(f"📁 Archivos generados en: {results_dir.absolute()}")
|
|
print(f" 📄 {results_dir / 'IO_Detailed_Analysis_Report.md'}")
|
|
print(f" 📄 {results_dir / 'io_adaptation_data.json'}")
|
|
print(f" 📄 {results_dir / 'io_detailed_analysis.csv'}")
|
|
|
|
return results
|
|
|
|
|
|
if __name__ == "__main__":
|
|
results = main()
|