Refactor: Remove legacy code and tests, update system state configuration

- Deleted legacy route handling in main_cleanup_notes.py.
- Updated system_state.json to reorder active datasets and add PlotJuggler path.
- Removed outdated DB1001 specific value verification test (test_db1001_specific_values.py).
- Removed PlotJuggler path detection test (test_plotjuggler_search.py).
- Removed system_state.json path handling test (test_system_state_paths.py).
- Added new tests for PlotJuggler and system state path handling in .tests directory.
- Introduced a comprehensive user guide for the frontend dashboard tabs in help.md.
This commit is contained in:
Miguel 2025-08-22 12:42:42 +02:00
parent 192c83ebce
commit 88a6b805be
8 changed files with 4175 additions and 4671 deletions

View File

@ -43,43 +43,44 @@ from utils.json_manager import JSONManager
import struct
import ctypes
class DB1001ValueVerifier:
"""
Verificador específico para valores DB1001.
Valida que ambos métodos lean exactamente los valores hexadecimales esperados.
"""
def __init__(self):
self.json_manager = JSONManager()
self.plc = None
# Tabla de valores de referencia del PLC
self.reference_values = {
0: 0x0006, # DB1001.DBW 0
2: 0x3EE6, # DB1001.DBW 2
4: 0x6666, # DB1001.DBW 4
6: 0x3F4C, # DB1001.DBW 6
8: 0xCCCD, # DB1001.DBW 8
10: 0x5121, # DB1001.DBW 10
12: 0x0006, # DB1001.DBW 12
14: 0x6800, # DB1001.DBW 14
16: 0x0001, # DB1001.DBW 16
18: 0x0100, # DB1001.DBW 18
20: 0x0004, # DB1001.DBW 20
24: 0x2095, # DB1001.DBW 24
26: 0x0000, # DB1001.DBW 26
28: 0x0002, # DB1001.DBW 28
30: 0x8121, # DB1001.DBW 30
32: 0x0001, # DB1001.DBW 32
36: 0x0001, # DB1001.DBW 36
0: 0x0006, # DB1001.DBW 0
2: 0x3EE6, # DB1001.DBW 2
4: 0x6666, # DB1001.DBW 4
6: 0x3F4C, # DB1001.DBW 6
8: 0xCCCD, # DB1001.DBW 8
10: 0x5121, # DB1001.DBW 10
12: 0x0006, # DB1001.DBW 12
14: 0x6800, # DB1001.DBW 14
16: 0x0001, # DB1001.DBW 16
18: 0x0100, # DB1001.DBW 18
20: 0x0004, # DB1001.DBW 20
24: 0x2095, # DB1001.DBW 24
26: 0x0000, # DB1001.DBW 26
28: 0x0002, # DB1001.DBW 28
30: 0x8121, # DB1001.DBW 30
32: 0x0001, # DB1001.DBW 32
36: 0x0001, # DB1001.DBW 36
}
self.test_results = {
"test_info": {
"start_time": datetime.now().isoformat(),
"plc_ip": None,
"total_words": len(self.reference_values),
"db_number": 1001
"db_number": 1001,
},
"legacy_results": {},
"optimized_results": {},
@ -90,170 +91,186 @@ class DB1001ValueVerifier:
"optimized_incorrect": [],
"legacy_accuracy": 0.0,
"optimized_accuracy": 0.0,
"methods_match": True
}
"methods_match": True,
},
}
def connect_plc(self) -> bool:
"""Conectar al PLC."""
try:
print("🔌 Conectando al PLC...")
# Cargar configuración
config_data = self.json_manager.read_json("plc")
plc_config = config_data.get("plc_config", {})
ip = plc_config.get("ip")
rack = plc_config.get("rack", 0)
slot = plc_config.get("slot", 2)
self.test_results["test_info"]["plc_ip"] = ip
# Conectar
self.plc = snap7.client.Client()
self.plc.connect(ip, rack, slot)
print(f"✅ Conectado a PLC: {ip}:{rack}.{slot}")
return True
except Exception as e:
print(f"❌ Error conectando PLC: {e}")
return False
def read_db1001_legacy(self) -> Dict[int, int]:
"""Leer DB1001 usando método legacy (lecturas individuales)."""
print("📖 Leyendo DB1001 con método LEGACY...")
results = {}
for offset, expected_value in self.reference_values.items():
try:
# Leer WORD (2 bytes) desde DB1001
data = self.plc.db_read(1001, offset, 2)
# Convertir a WORD (unsigned 16-bit big-endian)
word_value = struct.unpack(">H", data)[0]
results[offset] = word_value
print(f" DBW {offset:2d}: 0x{word_value:04X} (esperado: 0x{expected_value:04X})")
print(
f" DBW {offset:2d}: 0x{word_value:04X} (esperado: 0x{expected_value:04X})"
)
# Pequeña pausa entre lecturas
time.sleep(0.001)
except Exception as e:
print(f" ❌ Error leyendo DBW {offset}: {e}")
results[offset] = None
successful = len([v for v in results.values() if v is not None])
print(f"{successful}/{len(self.reference_values)} WORDs leídas exitosamente")
print(
f"{successful}/{len(self.reference_values)} WORDs leídas exitosamente"
)
return results
def read_db1001_optimized(self) -> Dict[int, int]:
"""Leer DB1001 usando método optimizado (read_multi_vars)."""
print("🚀 Leyendo DB1001 con método OPTIMIZADO...")
results = {}
CHUNK_SIZE = 19 # Límite seguro para S7
try:
# Preparar S7DataItems para todas las WORDs
all_items = []
all_offsets = []
for offset in self.reference_values.keys():
try:
item = S7DataItem()
item.Area = 132 # DB area
item.WordLen = 4 # WORD type
item.DBNumber = 1001 # DB1001
item.Start = offset # Byte offset
item.Amount = 1 # 1 WORD
item.Area = 132 # DB area
item.WordLen = 4 # WORD type
item.DBNumber = 1001 # DB1001
item.Start = offset # Byte offset
item.Amount = 1 # 1 WORD
# Allocar buffer para WORD (2 bytes)
buffer = (ctypes.c_ubyte * 2)()
item.pData = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_ubyte))
all_items.append(item)
all_offsets.append(offset)
except Exception as e:
print(f" ❌ Error preparando DBW {offset}: {e}")
results[offset] = None
if not all_items:
return results
print(f" 📊 Procesando {len(all_items)} WORDs en chunks de {CHUNK_SIZE}")
# Procesar en chunks
for chunk_start in range(0, len(all_items), CHUNK_SIZE):
chunk_end = min(chunk_start + CHUNK_SIZE, len(all_items))
chunk_items = all_items[chunk_start:chunk_end]
chunk_offsets = all_offsets[chunk_start:chunk_end]
print(f" 🔄 Procesando chunk {chunk_start//CHUNK_SIZE + 1}: DBW {chunk_offsets[0]}-{chunk_offsets[-1]}")
print(
f" 🔄 Procesando chunk {chunk_start//CHUNK_SIZE + 1}: DBW {chunk_offsets[0]}-{chunk_offsets[-1]}"
)
# Convertir chunk a ctypes array
items_array = (S7DataItem * len(chunk_items))(*chunk_items)
# Llamar read_multi_vars para este chunk
result = self.plc.read_multi_vars(items_array)
if isinstance(result, tuple) and len(result) == 2:
ret_code, returned_items = result
if ret_code == 0:
for i, item in enumerate(returned_items):
offset = chunk_offsets[i]
expected_value = self.reference_values[offset]
if item.Result == 0:
try:
# Extraer WORD usando snap7.util
word_value = snap7.util.get_word(item.pData, 0)
results[offset] = word_value
print(f" DBW {offset:2d}: 0x{word_value:04X} (esperado: 0x{expected_value:04X})")
print(
f" DBW {offset:2d}: 0x{word_value:04X} (esperado: 0x{expected_value:04X})"
)
except Exception as e:
print(f" ❌ Error extrayendo DBW {offset}: {e}")
results[offset] = None
else:
print(f" ❌ Error leyendo DBW {offset}: código {item.Result}")
print(
f" ❌ Error leyendo DBW {offset}: código {item.Result}"
)
results[offset] = None
else:
print(f" ❌ Chunk falló: código {ret_code}")
for offset in chunk_offsets:
results[offset] = None
else:
print(f" ❌ Formato de resultado inesperado para chunk: {type(result)}")
print(
f" ❌ Formato de resultado inesperado para chunk: {type(result)}"
)
for offset in chunk_offsets:
results[offset] = None
# Pequeña pausa entre chunks
time.sleep(0.01)
except Exception as e:
print(f" ❌ Error en método optimizado: {e}")
import traceback
traceback.print_exc()
for offset in self.reference_values.keys():
if offset not in results:
results[offset] = None
successful = len([v for v in results.values() if v is not None])
print(f"{successful}/{len(self.reference_values)} WORDs leídas exitosamente")
print(
f"{successful}/{len(self.reference_values)} WORDs leídas exitosamente"
)
return results
def verify_results(self, legacy_results: Dict[int, int],
optimized_results: Dict[int, int]) -> Dict[str, Any]:
def verify_results(
self, legacy_results: Dict[int, int], optimized_results: Dict[int, int]
) -> Dict[str, Any]:
"""Verificar resultados contra valores de referencia y entre métodos."""
print("🔍 Verificando resultados contra valores de referencia...")
verification = {
"legacy_correct": [],
"legacy_incorrect": [],
@ -262,111 +279,133 @@ class DB1001ValueVerifier:
"methods_mismatch": [],
"legacy_accuracy": 0.0,
"optimized_accuracy": 0.0,
"methods_match": True
"methods_match": True,
}
for offset, expected_value in self.reference_values.items():
legacy_val = legacy_results.get(offset)
optimized_val = optimized_results.get(offset)
# Verificar método legacy
if legacy_val is not None:
if legacy_val == expected_value:
verification["legacy_correct"].append(offset)
else:
verification["legacy_incorrect"].append({
"offset": offset,
"expected": f"0x{expected_value:04X}",
"actual": f"0x{legacy_val:04X}"
})
verification["legacy_incorrect"].append(
{
"offset": offset,
"expected": f"0x{expected_value:04X}",
"actual": f"0x{legacy_val:04X}",
}
)
# Verificar método optimizado
if optimized_val is not None:
if optimized_val == expected_value:
verification["optimized_correct"].append(offset)
else:
verification["optimized_incorrect"].append({
"offset": offset,
"expected": f"0x{expected_value:04X}",
"actual": f"0x{optimized_val:04X}"
})
verification["optimized_incorrect"].append(
{
"offset": offset,
"expected": f"0x{expected_value:04X}",
"actual": f"0x{optimized_val:04X}",
}
)
# Verificar que ambos métodos coincidan
if legacy_val is not None and optimized_val is not None:
if legacy_val != optimized_val:
verification["methods_mismatch"].append({
"offset": offset,
"legacy": f"0x{legacy_val:04X}",
"optimized": f"0x{optimized_val:04X}"
})
verification["methods_mismatch"].append(
{
"offset": offset,
"legacy": f"0x{legacy_val:04X}",
"optimized": f"0x{optimized_val:04X}",
}
)
verification["methods_match"] = False
# Calcular precisión
total_values = len(self.reference_values)
verification["legacy_accuracy"] = len(verification["legacy_correct"]) / total_values * 100
verification["optimized_accuracy"] = len(verification["optimized_correct"]) / total_values * 100
verification["legacy_accuracy"] = (
len(verification["legacy_correct"]) / total_values * 100
)
verification["optimized_accuracy"] = (
len(verification["optimized_correct"]) / total_values * 100
)
# Mostrar resultados
print(f"📊 Resultados de Verificación:")
print(f" 📖 Legacy - Correctos: {len(verification['legacy_correct'])}/{total_values} ({verification['legacy_accuracy']:.1f}%)")
print(f" 🚀 Optimizado - Correctos: {len(verification['optimized_correct'])}/{total_values} ({verification['optimized_accuracy']:.1f}%)")
print(f" 🔄 Métodos coinciden: {'✅ SÍ' if verification['methods_match'] else '❌ NO'}")
print(
f" 📖 Legacy - Correctos: {len(verification['legacy_correct'])}/{total_values} ({verification['legacy_accuracy']:.1f}%)"
)
print(
f" 🚀 Optimizado - Correctos: {len(verification['optimized_correct'])}/{total_values} ({verification['optimized_accuracy']:.1f}%)"
)
print(
f" 🔄 Métodos coinciden: {'✅ SÍ' if verification['methods_match'] else '❌ NO'}"
)
# Mostrar valores incorrectos si los hay
if verification["legacy_incorrect"]:
print(f" ❌ Legacy - Valores incorrectos:")
for item in verification["legacy_incorrect"]:
print(f" DBW {item['offset']}: {item['actual']} (esperado: {item['expected']})")
print(
f" DBW {item['offset']}: {item['actual']} (esperado: {item['expected']})"
)
if verification["optimized_incorrect"]:
print(f" ❌ Optimizado - Valores incorrectos:")
for item in verification["optimized_incorrect"]:
print(f" DBW {item['offset']}: {item['actual']} (esperado: {item['expected']})")
print(
f" DBW {item['offset']}: {item['actual']} (esperado: {item['expected']})"
)
if verification["methods_mismatch"]:
print(f" ⚠️ Diferencias entre métodos:")
for item in verification["methods_mismatch"]:
print(f" DBW {item['offset']}: Legacy={item['legacy']}, Optimizado={item['optimized']}")
print(
f" DBW {item['offset']}: Legacy={item['legacy']}, Optimizado={item['optimized']}"
)
return verification
def save_detailed_report(self) -> str:
"""Guardar reporte detallado."""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"db1001_verification_report_{timestamp}.json"
# Completar información del test
self.test_results["test_info"]["end_time"] = datetime.now().isoformat()
# Agregar tabla de referencia al reporte
self.test_results["reference_table"] = {
f"DBW_{offset}": f"0x{value:04X}"
for offset, value in self.reference_values.items()
}
with open(filename, 'w', encoding='utf-8') as f:
with open(filename, "w", encoding="utf-8") as f:
json.dump(self.test_results, f, indent=2, ensure_ascii=False)
print(f"📄 Reporte detallado guardado en: {filename}")
return filename
def run_verification(self) -> bool:
"""Ejecutar verificación completa de DB1001."""
print("🔍 === VERIFICACIÓN ESPECÍFICA DB1001 ===")
print("Validación de valores hexadecimales específicos")
print("=" * 50)
# Mostrar tabla de valores de referencia
print("📋 Tabla de valores de referencia:")
for offset, value in self.reference_values.items():
print(f" DB1001.DBW {offset:2d} = W#16#{value:04X}")
print()
try:
# 1. Conectar PLC
if not self.connect_plc():
return False
# 2. Leer con método legacy
print("🔄 FASE 1: Lectura Legacy")
print("-" * 25)
@ -375,7 +414,7 @@ class DB1001ValueVerifier:
f"DBW_{k}": f"0x{v:04X}" if v is not None else None
for k, v in legacy_results.items()
}
# 3. Leer con método optimizado
print(f"\n🔄 FASE 2: Lectura Optimizada")
print("-" * 30)
@ -384,46 +423,47 @@ class DB1001ValueVerifier:
f"DBW_{k}": f"0x{v:04X}" if v is not None else None
for k, v in optimized_results.items()
}
# 4. Verificar resultados
print(f"\n🔍 FASE 3: Verificación")
print("-" * 20)
verification = self.verify_results(legacy_results, optimized_results)
self.test_results["verification"] = verification
# 5. Guardar reporte
report_file = self.save_detailed_report()
# 6. Mostrar resumen final
self.print_final_summary(verification)
# 7. Cleanup
self.plc.disconnect()
print("✅ PLC desconectado")
# Evaluar éxito
success = (
verification["legacy_accuracy"] >= 95.0 and
verification["optimized_accuracy"] >= 95.0 and
verification["methods_match"]
verification["legacy_accuracy"] >= 95.0
and verification["optimized_accuracy"] >= 95.0
and verification["methods_match"]
)
return success
except Exception as e:
print(f"❌ Error durante verificación: {e}")
import traceback
traceback.print_exc()
return False
def print_final_summary(self, verification: Dict[str, Any]):
"""Imprimir resumen final."""
print(f"\n🎯 === RESUMEN FINAL DB1001 ===")
legacy_acc = verification["legacy_accuracy"]
optimized_acc = verification["optimized_accuracy"]
methods_match = verification["methods_match"]
# Estado general
if legacy_acc >= 99.0 and optimized_acc >= 99.0 and methods_match:
status_icon = "🟢"
@ -434,18 +474,18 @@ class DB1001ValueVerifier:
else:
status_icon = "🔴"
status_text = "REQUIERE ATENCIÓN"
print(f"{status_icon} Estado: {status_text}")
print(f"📖 Precisión Legacy: {legacy_acc:.1f}%")
print(f"🚀 Precisión Optimizado: {optimized_acc:.1f}%")
print(f"🔄 Métodos Coinciden: {'✅ SÍ' if methods_match else '❌ NO'}")
print(f"📊 Total WORDs Verificadas: {len(self.reference_values)}")
if legacy_acc == 100.0 and optimized_acc == 100.0 and methods_match:
print(f"\n🎉 ¡VERIFICACIÓN PERFECTA!")
print(f"✅ Todos los valores DB1001 son exactamente correctos")
print(f"✅ Ambos métodos producen resultados idénticos")
print(f"\n" + "=" * 50)
@ -453,17 +493,17 @@ def main():
"""Función principal."""
print("🔍 DB1001 SPECIFIC VALUE VERIFICATION TEST")
print("🚀 Iniciando verificación de valores específicos...")
verifier = DB1001ValueVerifier()
success = verifier.run_verification()
if success:
print("🎉 ¡Verificación DB1001 exitosa!")
print("✅ Valores exactos confirmados en ambos métodos")
else:
print("⚠️ Verificación completada con observaciones")
print("📝 Revisar reporte para detalles específicos")
return success

File diff suppressed because it is too large Load Diff

60
help.md Normal file
View File

@ -0,0 +1,60 @@
# Frontend User Guide — Dashboard Tabs
This short guide explains the purpose of each tab in the frontend `Dashboard.jsx` and what a typical user needs to know to use them.
Checklist
- Describe each Dashboard tab and its main actions
- Keep explanations short and aimed at end users (operators / engineers)
- Include a couple of quick tips and common issues
Quick start
1. Start the backend (Flask): run the Python backend as your project requires (commonly `python main.py`).
2. Start the frontend: from the `frontend` folder run `npm run dev` (Vite development server).
3. Open the UI at the configured address (usually http://localhost:5173/app or root served path).
Tabs overview
1) Config (🔧)
- Purpose: configure PLC connection and UDP/streaming settings.
- What you do here: edit connection parameters (IP, rack/slot, DB mappings), UDP target and sampling interval, and save the PLC configuration.
- Main actions: Save configuration, Import/Export the PLC config (JSON), Reset the form.
- Tip: If the form shows "Loading..." wait for the backend to respond or check backend logs.
2) Datasets (📊)
- Purpose: define logical datasets and the PLC variables that belong to each dataset.
- What you do here:
- "Dataset Definitions" card: add, edit or remove datasets (name, id, metadata).
- "Dataset Variables" area: choose a dataset (combo selector) and configure its variables and streaming settings.
- Main actions: add dataset, save all datasets, import/export full dataset files or single dataset variables.
- Tip: Create datasets first, then select a dataset to add variables for streaming/recording.
3) Plots (📈)
- Purpose: define real-time plots and map variables to visual traces.
- What you do here: configure plots (plot id, name, variables to show), manage sessions, and export plot definitions.
- Main actions: Add/modify plots, start a plotting session in the frontend or launch PlotJuggler for advanced visualization (if available).
- Tip: Use export/import to share plot templates with colleagues.
4) Historical (📉)
- Purpose: explore recorded (CSV) historical data and play it back or export slices.
- What you do here: open historical sessions, choose time ranges or files, and replay or export subsets for analysis.
- Main actions: load CSV sessions, playback controls, export data subset.
- Tip: For large datasets, prefer server-side filtering or export to file and analyze offline.
5) CSV Files (📁)
- Purpose: browse stored CSV recordings created by the streamer.
- What you do here: view file list, download selected CSVs, and see recording metadata (timestamp, size).
- Main actions: Download, inspect file names and timestamps.
- Tip: Check disk space and retention policy in the backend if recordings stop appearing.
6) Logs (📋)
- Purpose: view recent application logs and events for troubleshooting.
- What you do here: read recent log lines, refresh the view and export logs to a file.
- Main actions: Refresh logs, Export logs.
- Tip: If the UI shows backend disconnected, open backend logs (`main.py` / `logs/`) to diagnose.
Common issues & troubleshooting
- Backend disconnected: StatusBar will show a connection error. Verify `main.py` is running and there are no firewall issues.
- Import errors: Use the corresponding exported file as a template (array vs object shape matters). Single-item import expects one array elements structure.
- Empty lists: "Add First Item" buttons appear in empty array forms — use them to create the initial configuration.
If you want, I can: add small JSON examples for each tab (sample dataset, variables, plot definition), translate this guide to Spanish, or embed screenshots.

View File

@ -1,34 +0,0 @@
# ==============================
# LEGACY ROUTES TO REMOVE/COMMENT
# ==============================
# Legacy templates route (replaced by React SPA)
# @app.route("/legacy")
# def serve_legacy_index():
# """Serve legacy HTML template for backward compatibility."""
# try:
# return render_template("index.html")
# except Exception as e:
# return f"Error loading legacy template: {str(e)}", 500
# These routes can be removed after full migration to React:
# All routes serving /templates/index.html
# Static file serving for legacy JS/CSS
# Any jQuery-based endpoints
# Essential APIs to keep:
# - /api/status (SSE)
# - /api/health
# - /api/events
# - /api/config/* (schemas and CRUD)
# - /api/plc/connect, /api/plc/disconnect
# - /api/udp/streaming/*
# - /api/plots/* (for chart functionality)
# - /api/datasets/* (if still needed)
# - /api/variables/* (if still needed)
# React SPA routes to keep:
# - / (React app)
# - /app (React app)
# - /app/<path:path> (React app routing)
# - /assets/* (Vite build assets)

View File

@ -3,11 +3,12 @@
"should_connect": true,
"should_stream": false,
"active_datasets": [
"Test",
"DAR",
"Fast"
"Fast",
"Test"
]
},
"auto_recovery_enabled": true,
"last_update": "2025-08-22T11:59:00.787257"
"last_update": "2025-08-22T12:14:57.462145",
"plotjuggler_path": "C:\\Program Files\\PlotJuggler\\plotjuggler.exe"
}

View File

@ -1,511 +0,0 @@
#!/usr/bin/env python3
"""
🔍 DB1001 SPECIFIC VALUE VERIFICATION TEST
==========================================
Test específico para verificar que los sistemas legacy y optimizado lean
exactamente los valores hexadecimales esperados en DB1001.
Valores de referencia del PLC:
DB1001.DBW 0 = W#16#0006
DB1001.DBW 2 = W#16#3EE6
DB1001.DBW 4 = W#16#6666
DB1001.DBW 6 = W#16#3F4C
DB1001.DBW 8 = W#16#CCCD
DB1001.DBW 10 = W#16#5121
DB1001.DBW 12 = W#16#0006
DB1001.DBW 14 = W#16#6800
DB1001.DBW 16 = W#16#0001
DB1001.DBW 18 = W#16#0100
DB1001.DBW 20 = W#16#0004
DB1001.DBW 24 = W#16#2095
DB1001.DBW 26 = W#16#0000
DB1001.DBW 28 = W#16#0002
DB1001.DBW 30 = W#16#8121
DB1001.DBW 32 = W#16#0001
DB1001.DBW 36 = W#16#0001
"""
import sys
from pathlib import Path
import time
import json
from datetime import datetime
from typing import Dict, Any, List, Tuple
# Add project root to path
project_root = Path(__file__).parent
sys.path.insert(0, str(project_root))
import snap7
from snap7.types import S7DataItem
from utils.json_manager import JSONManager
import struct
import ctypes
class DB1001ValueVerifier:
"""
Verificador específico para valores DB1001.
Valida que ambos métodos lean exactamente los valores hexadecimales esperados.
"""
def __init__(self):
self.json_manager = JSONManager()
self.plc = None
# Tabla de valores de referencia del PLC
self.reference_values = {
0: 0x0006, # DB1001.DBW 0
2: 0x3EE6, # DB1001.DBW 2
4: 0x6666, # DB1001.DBW 4
6: 0x3F4C, # DB1001.DBW 6
8: 0xCCCD, # DB1001.DBW 8
10: 0x5121, # DB1001.DBW 10
12: 0x0006, # DB1001.DBW 12
14: 0x6800, # DB1001.DBW 14
16: 0x0001, # DB1001.DBW 16
18: 0x0100, # DB1001.DBW 18
20: 0x0004, # DB1001.DBW 20
24: 0x2095, # DB1001.DBW 24
26: 0x0000, # DB1001.DBW 26
28: 0x0002, # DB1001.DBW 28
30: 0x8121, # DB1001.DBW 30
32: 0x0001, # DB1001.DBW 32
36: 0x0001, # DB1001.DBW 36
}
self.test_results = {
"test_info": {
"start_time": datetime.now().isoformat(),
"plc_ip": None,
"total_words": len(self.reference_values),
"db_number": 1001,
},
"legacy_results": {},
"optimized_results": {},
"verification": {
"legacy_correct": [],
"legacy_incorrect": [],
"optimized_correct": [],
"optimized_incorrect": [],
"legacy_accuracy": 0.0,
"optimized_accuracy": 0.0,
"methods_match": True,
},
}
def connect_plc(self) -> bool:
"""Conectar al PLC."""
try:
print("🔌 Conectando al PLC...")
# Cargar configuración
config_data = self.json_manager.read_json("plc")
plc_config = config_data.get("plc_config", {})
ip = plc_config.get("ip")
rack = plc_config.get("rack", 0)
slot = plc_config.get("slot", 2)
self.test_results["test_info"]["plc_ip"] = ip
# Conectar
self.plc = snap7.client.Client()
self.plc.connect(ip, rack, slot)
print(f"✅ Conectado a PLC: {ip}:{rack}.{slot}")
return True
except Exception as e:
print(f"❌ Error conectando PLC: {e}")
return False
def read_db1001_legacy(self) -> Dict[int, int]:
"""Leer DB1001 usando método legacy (lecturas individuales)."""
print("📖 Leyendo DB1001 con método LEGACY...")
results = {}
for offset, expected_value in self.reference_values.items():
try:
# Leer WORD (2 bytes) desde DB1001
data = self.plc.db_read(1001, offset, 2)
# Convertir a WORD (unsigned 16-bit big-endian)
word_value = struct.unpack(">H", data)[0]
results[offset] = word_value
print(
f" DBW {offset:2d}: 0x{word_value:04X} (esperado: 0x{expected_value:04X})"
)
# Pequeña pausa entre lecturas
time.sleep(0.001)
except Exception as e:
print(f" ❌ Error leyendo DBW {offset}: {e}")
results[offset] = None
successful = len([v for v in results.values() if v is not None])
print(
f"{successful}/{len(self.reference_values)} WORDs leídas exitosamente"
)
return results
def read_db1001_optimized(self) -> Dict[int, int]:
"""Leer DB1001 usando método optimizado (read_multi_vars)."""
print("🚀 Leyendo DB1001 con método OPTIMIZADO...")
results = {}
CHUNK_SIZE = 19 # Límite seguro para S7
try:
# Preparar S7DataItems para todas las WORDs
all_items = []
all_offsets = []
for offset in self.reference_values.keys():
try:
item = S7DataItem()
item.Area = 132 # DB area
item.WordLen = 4 # WORD type
item.DBNumber = 1001 # DB1001
item.Start = offset # Byte offset
item.Amount = 1 # 1 WORD
# Allocar buffer para WORD (2 bytes)
buffer = (ctypes.c_ubyte * 2)()
item.pData = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_ubyte))
all_items.append(item)
all_offsets.append(offset)
except Exception as e:
print(f" ❌ Error preparando DBW {offset}: {e}")
results[offset] = None
if not all_items:
return results
print(f" 📊 Procesando {len(all_items)} WORDs en chunks de {CHUNK_SIZE}")
# Procesar en chunks
for chunk_start in range(0, len(all_items), CHUNK_SIZE):
chunk_end = min(chunk_start + CHUNK_SIZE, len(all_items))
chunk_items = all_items[chunk_start:chunk_end]
chunk_offsets = all_offsets[chunk_start:chunk_end]
print(
f" 🔄 Procesando chunk {chunk_start//CHUNK_SIZE + 1}: DBW {chunk_offsets[0]}-{chunk_offsets[-1]}"
)
# Convertir chunk a ctypes array
items_array = (S7DataItem * len(chunk_items))(*chunk_items)
# Llamar read_multi_vars para este chunk
result = self.plc.read_multi_vars(items_array)
if isinstance(result, tuple) and len(result) == 2:
ret_code, returned_items = result
if ret_code == 0:
for i, item in enumerate(returned_items):
offset = chunk_offsets[i]
expected_value = self.reference_values[offset]
if item.Result == 0:
try:
# Extraer WORD usando snap7.util
word_value = snap7.util.get_word(item.pData, 0)
results[offset] = word_value
print(
f" DBW {offset:2d}: 0x{word_value:04X} (esperado: 0x{expected_value:04X})"
)
except Exception as e:
print(f" ❌ Error extrayendo DBW {offset}: {e}")
results[offset] = None
else:
print(
f" ❌ Error leyendo DBW {offset}: código {item.Result}"
)
results[offset] = None
else:
print(f" ❌ Chunk falló: código {ret_code}")
for offset in chunk_offsets:
results[offset] = None
else:
print(
f" ❌ Formato de resultado inesperado para chunk: {type(result)}"
)
for offset in chunk_offsets:
results[offset] = None
# Pequeña pausa entre chunks
time.sleep(0.01)
except Exception as e:
print(f" ❌ Error en método optimizado: {e}")
import traceback
traceback.print_exc()
for offset in self.reference_values.keys():
if offset not in results:
results[offset] = None
successful = len([v for v in results.values() if v is not None])
print(
f"{successful}/{len(self.reference_values)} WORDs leídas exitosamente"
)
return results
def verify_results(
self, legacy_results: Dict[int, int], optimized_results: Dict[int, int]
) -> Dict[str, Any]:
"""Verificar resultados contra valores de referencia y entre métodos."""
print("🔍 Verificando resultados contra valores de referencia...")
verification = {
"legacy_correct": [],
"legacy_incorrect": [],
"optimized_correct": [],
"optimized_incorrect": [],
"methods_mismatch": [],
"legacy_accuracy": 0.0,
"optimized_accuracy": 0.0,
"methods_match": True,
}
for offset, expected_value in self.reference_values.items():
legacy_val = legacy_results.get(offset)
optimized_val = optimized_results.get(offset)
# Verificar método legacy
if legacy_val is not None:
if legacy_val == expected_value:
verification["legacy_correct"].append(offset)
else:
verification["legacy_incorrect"].append(
{
"offset": offset,
"expected": f"0x{expected_value:04X}",
"actual": f"0x{legacy_val:04X}",
}
)
# Verificar método optimizado
if optimized_val is not None:
if optimized_val == expected_value:
verification["optimized_correct"].append(offset)
else:
verification["optimized_incorrect"].append(
{
"offset": offset,
"expected": f"0x{expected_value:04X}",
"actual": f"0x{optimized_val:04X}",
}
)
# Verificar que ambos métodos coincidan
if legacy_val is not None and optimized_val is not None:
if legacy_val != optimized_val:
verification["methods_mismatch"].append(
{
"offset": offset,
"legacy": f"0x{legacy_val:04X}",
"optimized": f"0x{optimized_val:04X}",
}
)
verification["methods_match"] = False
# Calcular precisión
total_values = len(self.reference_values)
verification["legacy_accuracy"] = (
len(verification["legacy_correct"]) / total_values * 100
)
verification["optimized_accuracy"] = (
len(verification["optimized_correct"]) / total_values * 100
)
# Mostrar resultados
print(f"📊 Resultados de Verificación:")
print(
f" 📖 Legacy - Correctos: {len(verification['legacy_correct'])}/{total_values} ({verification['legacy_accuracy']:.1f}%)"
)
print(
f" 🚀 Optimizado - Correctos: {len(verification['optimized_correct'])}/{total_values} ({verification['optimized_accuracy']:.1f}%)"
)
print(
f" 🔄 Métodos coinciden: {'✅ SÍ' if verification['methods_match'] else '❌ NO'}"
)
# Mostrar valores incorrectos si los hay
if verification["legacy_incorrect"]:
print(f" ❌ Legacy - Valores incorrectos:")
for item in verification["legacy_incorrect"]:
print(
f" DBW {item['offset']}: {item['actual']} (esperado: {item['expected']})"
)
if verification["optimized_incorrect"]:
print(f" ❌ Optimizado - Valores incorrectos:")
for item in verification["optimized_incorrect"]:
print(
f" DBW {item['offset']}: {item['actual']} (esperado: {item['expected']})"
)
if verification["methods_mismatch"]:
print(f" ⚠️ Diferencias entre métodos:")
for item in verification["methods_mismatch"]:
print(
f" DBW {item['offset']}: Legacy={item['legacy']}, Optimizado={item['optimized']}"
)
return verification
def save_detailed_report(self) -> str:
"""Guardar reporte detallado."""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"db1001_verification_report_{timestamp}.json"
# Completar información del test
self.test_results["test_info"]["end_time"] = datetime.now().isoformat()
# Agregar tabla de referencia al reporte
self.test_results["reference_table"] = {
f"DBW_{offset}": f"0x{value:04X}"
for offset, value in self.reference_values.items()
}
with open(filename, "w", encoding="utf-8") as f:
json.dump(self.test_results, f, indent=2, ensure_ascii=False)
print(f"📄 Reporte detallado guardado en: {filename}")
return filename
def run_verification(self) -> bool:
"""Ejecutar verificación completa de DB1001."""
print("🔍 === VERIFICACIÓN ESPECÍFICA DB1001 ===")
print("Validación de valores hexadecimales específicos")
print("=" * 50)
# Mostrar tabla de valores de referencia
print("📋 Tabla de valores de referencia:")
for offset, value in self.reference_values.items():
print(f" DB1001.DBW {offset:2d} = W#16#{value:04X}")
print()
try:
# 1. Conectar PLC
if not self.connect_plc():
return False
# 2. Leer con método legacy
print("🔄 FASE 1: Lectura Legacy")
print("-" * 25)
legacy_results = self.read_db1001_legacy()
self.test_results["legacy_results"] = {
f"DBW_{k}": f"0x{v:04X}" if v is not None else None
for k, v in legacy_results.items()
}
# 3. Leer con método optimizado
print(f"\n🔄 FASE 2: Lectura Optimizada")
print("-" * 30)
optimized_results = self.read_db1001_optimized()
self.test_results["optimized_results"] = {
f"DBW_{k}": f"0x{v:04X}" if v is not None else None
for k, v in optimized_results.items()
}
# 4. Verificar resultados
print(f"\n🔍 FASE 3: Verificación")
print("-" * 20)
verification = self.verify_results(legacy_results, optimized_results)
self.test_results["verification"] = verification
# 5. Guardar reporte
report_file = self.save_detailed_report()
# 6. Mostrar resumen final
self.print_final_summary(verification)
# 7. Cleanup
self.plc.disconnect()
print("✅ PLC desconectado")
# Evaluar éxito
success = (
verification["legacy_accuracy"] >= 95.0
and verification["optimized_accuracy"] >= 95.0
and verification["methods_match"]
)
return success
except Exception as e:
print(f"❌ Error durante verificación: {e}")
import traceback
traceback.print_exc()
return False
def print_final_summary(self, verification: Dict[str, Any]):
"""Imprimir resumen final."""
print(f"\n🎯 === RESUMEN FINAL DB1001 ===")
legacy_acc = verification["legacy_accuracy"]
optimized_acc = verification["optimized_accuracy"]
methods_match = verification["methods_match"]
# Estado general
if legacy_acc >= 99.0 and optimized_acc >= 99.0 and methods_match:
status_icon = "🟢"
status_text = "PERFECTO"
elif legacy_acc >= 95.0 and optimized_acc >= 95.0 and methods_match:
status_icon = "🟡"
status_text = "BUENO"
else:
status_icon = "🔴"
status_text = "REQUIERE ATENCIÓN"
print(f"{status_icon} Estado: {status_text}")
print(f"📖 Precisión Legacy: {legacy_acc:.1f}%")
print(f"🚀 Precisión Optimizado: {optimized_acc:.1f}%")
print(f"🔄 Métodos Coinciden: {'✅ SÍ' if methods_match else '❌ NO'}")
print(f"📊 Total WORDs Verificadas: {len(self.reference_values)}")
if legacy_acc == 100.0 and optimized_acc == 100.0 and methods_match:
print(f"\n🎉 ¡VERIFICACIÓN PERFECTA!")
print(f"✅ Todos los valores DB1001 son exactamente correctos")
print(f"✅ Ambos métodos producen resultados idénticos")
print(f"\n" + "=" * 50)
def main():
"""Función principal."""
print("🔍 DB1001 SPECIFIC VALUE VERIFICATION TEST")
print("🚀 Iniciando verificación de valores específicos...")
verifier = DB1001ValueVerifier()
success = verifier.run_verification()
if success:
print("🎉 ¡Verificación DB1001 exitosa!")
print("✅ Valores exactos confirmados en ambos métodos")
else:
print("⚠️ Verificación completada con observaciones")
print("📝 Revisar reporte para detalles específicos")
return success
if __name__ == "__main__":
main()