315 lines
13 KiB
Python
315 lines
13 KiB
Python
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
"""
|
|
Script para generar snippets de código de uso de variables IO
|
|
entre TwinCAT y TIA Portal - Proyecto SIDEL
|
|
|
|
Autor: Generado automáticamente
|
|
Proyecto: E5.007560 - Modifica O&U - SAE235
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import sys
|
|
import re
|
|
from pathlib import Path
|
|
from typing import Dict, List, Tuple, Optional
|
|
import pandas as pd
|
|
|
|
# Configurar el path al directorio raíz del proyecto
|
|
script_root = os.path.dirname(
|
|
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
|
)
|
|
sys.path.append(script_root)
|
|
|
|
# Importar la función de configuración
|
|
from backend.script_utils import load_configuration
|
|
|
|
|
|
def load_adaptation_data(working_directory, json_file='io_adaptation_data.json'):
|
|
"""Carga los datos de adaptación desde el archivo JSON"""
|
|
full_json_file = os.path.join(working_directory, 'resultados', json_file)
|
|
print(f"📖 Cargando datos de adaptación desde: {full_json_file}")
|
|
|
|
if not os.path.exists(full_json_file):
|
|
print(f"⚠️ Archivo {full_json_file} no encontrado")
|
|
return None
|
|
|
|
with open(full_json_file, 'r', encoding='utf-8') as f:
|
|
data = json.load(f)
|
|
|
|
print(f"✅ Cargados datos de {data['metadata']['total_adaptations']} adaptaciones")
|
|
return data
|
|
|
|
|
|
def find_variable_usage_in_file(file_path, variable_name, max_occurrences=3):
|
|
"""Encuentra el uso de una variable en un archivo específico y retorna el contexto"""
|
|
if not os.path.exists(file_path):
|
|
return []
|
|
|
|
usages = []
|
|
|
|
try:
|
|
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
lines = f.readlines()
|
|
|
|
# Buscar todas las líneas que contienen la variable
|
|
found_lines = []
|
|
for line_num, line in enumerate(lines):
|
|
# Buscar la variable como palabra completa (no como parte de otra palabra)
|
|
if re.search(rf'\b{re.escape(variable_name)}\b', line):
|
|
found_lines.append((line_num, line.strip()))
|
|
if len(found_lines) >= max_occurrences:
|
|
break
|
|
|
|
# Para cada ocurrencia, obtener contexto (línea anterior, actual, siguiente)
|
|
for line_num, line_content in found_lines:
|
|
context = {
|
|
'line_number': line_num + 1, # Convertir a 1-indexado
|
|
'before': lines[line_num - 1].strip() if line_num > 0 else "",
|
|
'current': line_content,
|
|
'after': lines[line_num + 1].strip() if line_num < len(lines) - 1 else ""
|
|
}
|
|
usages.append(context)
|
|
|
|
except Exception as e:
|
|
print(f"⚠️ Error leyendo archivo {file_path}: {e}")
|
|
|
|
return usages
|
|
|
|
|
|
def find_tia_portal_usage(adaptation, working_directory):
|
|
"""Busca el uso de variables TIA Portal en archivos markdown"""
|
|
tia_address = adaptation['tia_portal']['address']
|
|
tia_tag = adaptation['tia_portal']['tag']
|
|
|
|
# Buscar en archivos TIA Portal (principalmente en archivos .md)
|
|
tia_usages = []
|
|
|
|
# Buscar en TiaPortal/ directory
|
|
tia_portal_dir = Path(working_directory) / 'TiaPortal'
|
|
if tia_portal_dir.exists():
|
|
for md_file in tia_portal_dir.glob('*.md'):
|
|
# Buscar por dirección TIA
|
|
address_usages = find_variable_usage_in_file(md_file, tia_address, 2)
|
|
for usage in address_usages:
|
|
usage['file'] = f"TiaPortal/{md_file.name}"
|
|
usage['search_term'] = tia_address
|
|
tia_usages.append(usage)
|
|
|
|
# Buscar por tag TIA si es diferente
|
|
if tia_tag != tia_address:
|
|
tag_usages = find_variable_usage_in_file(md_file, tia_tag, 1)
|
|
for usage in tag_usages:
|
|
usage['file'] = f"TiaPortal/{md_file.name}"
|
|
usage['search_term'] = tia_tag
|
|
tia_usages.append(usage)
|
|
|
|
# Limitar total de usos TIA
|
|
if len(tia_usages) >= 3:
|
|
break
|
|
|
|
return tia_usages[:3] # Máximo 3 usos TIA
|
|
|
|
|
|
def find_twincat_usage(adaptation, working_directory):
|
|
"""Busca el uso de variables TwinCAT en archivos .scl"""
|
|
if not adaptation['correlation']['found']:
|
|
return []
|
|
|
|
variable_name = adaptation['twincat']['variable']
|
|
usage_files = adaptation['usage']['usage_files']
|
|
|
|
twincat_usages = []
|
|
|
|
# Buscar en archivos TwinCAT
|
|
twincat_dir = Path(working_directory) / 'TwinCat'
|
|
if twincat_dir.exists():
|
|
for file_name in usage_files:
|
|
file_path = twincat_dir / file_name
|
|
if file_path.exists():
|
|
usages = find_variable_usage_in_file(file_path, variable_name, 2)
|
|
for usage in usages:
|
|
usage['file'] = f"TwinCat/{file_name}"
|
|
usage['search_term'] = variable_name
|
|
twincat_usages.append(usage)
|
|
|
|
# Limitar por archivo
|
|
if len(twincat_usages) >= 3:
|
|
break
|
|
|
|
return twincat_usages[:3] # Máximo 3 usos TwinCAT
|
|
|
|
|
|
def generate_code_snippets_report(data, working_directory, output_file='IO_Code_Snippets_Report.md'):
|
|
"""Genera el reporte con snippets de código"""
|
|
full_output_file = os.path.join(working_directory, 'resultados', output_file)
|
|
print(f"\n📄 Generando reporte de snippets: {full_output_file}")
|
|
|
|
matched_adaptations = [a for a in data['adaptations'] if a['correlation']['found']]
|
|
|
|
with open(full_output_file, 'w', encoding='utf-8') as f:
|
|
f.write("# Reporte de Snippets de Código - Adaptación IO\n\n")
|
|
f.write(f"**Fecha de generación:** {pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
|
|
f.write(f"**Proyecto:** {data['metadata']['project']}\n\n")
|
|
|
|
f.write("## 📋 Resumen\n\n")
|
|
f.write(f"- **Variables analizadas:** {len(matched_adaptations)}\n")
|
|
f.write(f"- **Snippets generados:** Se muestran hasta 3 usos por plataforma\n")
|
|
f.write(f"- **Formato:** Contexto de 3 líneas (anterior, actual, siguiente)\n\n")
|
|
|
|
f.write("---\n\n")
|
|
|
|
# Procesar cada adaptación
|
|
for i, adaptation in enumerate(matched_adaptations, 1):
|
|
tia_address = adaptation['tia_portal']['address']
|
|
tia_tag = adaptation['tia_portal']['tag']
|
|
twincat_var = adaptation['twincat']['variable']
|
|
twincat_addr = adaptation['twincat']['address']
|
|
|
|
print(f" 📝 Procesando {i}/{len(matched_adaptations)}: {tia_address} → {twincat_var}")
|
|
|
|
f.write(f"## {i}. {tia_address} → {twincat_var}\n\n")
|
|
f.write(f"**TIA Portal:** `{tia_tag}` (`{tia_address}`)\n")
|
|
f.write(f"**TwinCAT:** `{twincat_var}` (`%{twincat_addr}`)\n")
|
|
f.write(f"**Tipo:** `{adaptation['twincat']['data_type']}`\n\n")
|
|
|
|
# Buscar usos en TIA Portal
|
|
f.write("### 🔵 Uso en TIA Portal\n\n")
|
|
tia_usages = find_tia_portal_usage(adaptation, working_directory)
|
|
|
|
if tia_usages:
|
|
for j, usage in enumerate(tia_usages):
|
|
f.write(f"**Uso {j+1}:** [{usage['file']}]({usage['file']}) - Línea {usage['line_number']}\n\n")
|
|
f.write("```scl\n")
|
|
if usage['before']:
|
|
f.write(f"{usage['before']}\n")
|
|
f.write(f">>> {usage['current']} // ← {usage['search_term']}\n")
|
|
if usage['after']:
|
|
f.write(f"{usage['after']}\n")
|
|
f.write("```\n\n")
|
|
else:
|
|
f.write("*No se encontraron usos específicos en archivos TIA Portal.*\n\n")
|
|
|
|
# Buscar usos en TwinCAT
|
|
f.write("### 🟢 Uso en TwinCAT\n\n")
|
|
twincat_usages = find_twincat_usage(adaptation, working_directory)
|
|
|
|
if twincat_usages:
|
|
for j, usage in enumerate(twincat_usages):
|
|
f.write(f"**Uso {j+1}:** [{usage['file']}]({usage['file']}) - Línea {usage['line_number']}\n\n")
|
|
f.write("```scl\n")
|
|
if usage['before']:
|
|
f.write(f"{usage['before']}\n")
|
|
f.write(f">>> {usage['current']} // ← {usage['search_term']}\n")
|
|
if usage['after']:
|
|
f.write(f"{usage['after']}\n")
|
|
f.write("```\n\n")
|
|
else:
|
|
f.write("*Variable definida pero no se encontraron usos específicos.*\n\n")
|
|
|
|
f.write("---\n\n")
|
|
|
|
print(f"✅ Reporte de snippets generado: {full_output_file}")
|
|
|
|
|
|
def generate_summary_statistics(data, working_directory, output_file='IO_Usage_Statistics.md'):
|
|
"""Genera estadísticas de uso de las variables"""
|
|
full_output_file = os.path.join(working_directory, 'resultados', output_file)
|
|
print(f"\n📊 Generando estadísticas de uso: {full_output_file}")
|
|
|
|
matched_adaptations = [a for a in data['adaptations'] if a['correlation']['found']]
|
|
|
|
# Calcular estadísticas
|
|
total_usage = sum(a['usage']['usage_count'] for a in matched_adaptations)
|
|
variables_with_usage = len([a for a in matched_adaptations if a['usage']['usage_count'] > 0])
|
|
|
|
# Variables más usadas
|
|
most_used = sorted(matched_adaptations, key=lambda x: x['usage']['usage_count'], reverse=True)[:10]
|
|
|
|
# Archivos más referenciados
|
|
file_usage = {}
|
|
for adaptation in matched_adaptations:
|
|
for file_name in adaptation['usage']['usage_files']:
|
|
file_usage[file_name] = file_usage.get(file_name, 0) + 1
|
|
|
|
top_files = sorted(file_usage.items(), key=lambda x: x[1], reverse=True)[:10]
|
|
|
|
with open(full_output_file, 'w', encoding='utf-8') as f:
|
|
f.write("# Estadísticas de Uso de Variables IO\n\n")
|
|
f.write(f"**Fecha de generación:** {pd.Timestamp.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
|
|
|
|
f.write("## 📊 Resumen General\n\n")
|
|
f.write(f"- **Variables correlacionadas:** {len(matched_adaptations)}\n")
|
|
f.write(f"- **Variables con uso documentado:** {variables_with_usage}\n")
|
|
f.write(f"- **Total de usos encontrados:** {total_usage}\n")
|
|
f.write(f"- **Promedio de usos por variable:** {total_usage/len(matched_adaptations):.1f}\n\n")
|
|
|
|
f.write("## 🔥 Top 10 Variables Más Usadas\n\n")
|
|
f.write("| Ranking | TIA Address | TwinCAT Variable | Usos | Archivos |\n")
|
|
f.write("|---------|-------------|------------------|------|----------|\n")
|
|
|
|
for i, adaptation in enumerate(most_used, 1):
|
|
files_str = ', '.join(adaptation['usage']['usage_files'][:3])
|
|
if len(adaptation['usage']['usage_files']) > 3:
|
|
files_str += '...'
|
|
|
|
f.write(f"| {i} | {adaptation['tia_portal']['address']} | "
|
|
f"`{adaptation['twincat']['variable']}` | "
|
|
f"{adaptation['usage']['usage_count']} | {files_str} |\n")
|
|
|
|
f.write("\n## 📁 Top 10 Archivos Más Referenciados\n\n")
|
|
f.write("| Ranking | Archivo | Variables Usadas |\n")
|
|
f.write("|---------|---------|------------------|\n")
|
|
|
|
for i, (file_name, count) in enumerate(top_files, 1):
|
|
f.write(f"| {i} | `{file_name}` | {count} |\n")
|
|
|
|
print(f"✅ Estadísticas de uso generadas: {full_output_file}")
|
|
|
|
|
|
def main():
|
|
print("🚀 Iniciando generación de snippets de código para adaptación IO")
|
|
print("=" * 70)
|
|
|
|
# Cargar configuración
|
|
configs = load_configuration()
|
|
|
|
# Verificar que se cargó correctamente
|
|
if not configs:
|
|
print("Advertencia: No se pudo cargar la configuración, usando valores por defecto")
|
|
working_directory = "./"
|
|
else:
|
|
working_directory = configs.get("working_directory", "./")
|
|
|
|
# Verificar directorio de trabajo
|
|
if not os.path.exists(working_directory):
|
|
print(f"Error: El directorio de trabajo no existe: {working_directory}")
|
|
return
|
|
|
|
print(f"📁 Directorio de trabajo: {working_directory}")
|
|
|
|
# Crear directorio de resultados si no existe
|
|
results_dir = Path(working_directory) / 'resultados'
|
|
results_dir.mkdir(exist_ok=True)
|
|
|
|
# Cargar datos de adaptación
|
|
data = load_adaptation_data(working_directory)
|
|
if not data:
|
|
print("❌ No se pudieron cargar los datos de adaptación")
|
|
return
|
|
|
|
# Generar reporte de snippets
|
|
generate_code_snippets_report(data, working_directory)
|
|
|
|
# Generar estadísticas de uso
|
|
generate_summary_statistics(data, working_directory)
|
|
|
|
print(f"\n🎉 Generación completada exitosamente!")
|
|
print(f"📁 Archivos generados en: {results_dir.absolute()}")
|
|
print(f" 📄 {results_dir / 'IO_Code_Snippets_Report.md'}")
|
|
print(f" 📄 {results_dir / 'IO_Usage_Statistics.md'}")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main() |