Compare commits
12 Commits
88a6b805be
...
e77a5c3932
Author | SHA1 | Date |
---|---|---|
|
e77a5c3932 | |
|
551ec8b4a5 | |
|
aba83f843a | |
|
250748446f | |
|
366cb638e4 | |
|
cc4888fa18 | |
|
9dae98bfdc | |
|
9bbf299826 | |
|
aa75a46d84 | |
|
6302acfc0f | |
|
082f8b1790 | |
|
ee6918445e |
|
@ -38,7 +38,7 @@ project_root = Path(__file__).parent
|
|||
sys.path.insert(0, str(project_root))
|
||||
|
||||
import snap7
|
||||
from snap7.types import S7DataItem
|
||||
from snap7.type import S7DataItem
|
||||
from utils.json_manager import JSONManager
|
||||
import struct
|
||||
import ctypes
|
||||
|
|
|
@ -19,18 +19,19 @@ project_root = Path(__file__).parent
|
|||
sys.path.insert(0, str(project_root))
|
||||
|
||||
import snap7
|
||||
from snap7.types import S7DataItem
|
||||
from snap7.type import S7DataItem
|
||||
from utils.json_manager import JSONManager
|
||||
from utils.optimized_batch_reader import OptimizedBatchReader
|
||||
import struct
|
||||
import ctypes
|
||||
|
||||
|
||||
class SimpleDataIntegrityVerifier:
|
||||
"""
|
||||
Sistema simplificado de verificación de integridad de datos.
|
||||
Compara directamente métodos optimizado vs individual.
|
||||
"""
|
||||
|
||||
|
||||
def __init__(self):
|
||||
self.json_manager = JSONManager()
|
||||
self.plc = None
|
||||
|
@ -38,46 +39,46 @@ class SimpleDataIntegrityVerifier:
|
|||
"test_info": {
|
||||
"start_time": datetime.now().isoformat(),
|
||||
"plc_ip": None,
|
||||
"total_variables": 0
|
||||
"total_variables": 0,
|
||||
},
|
||||
"results": {}
|
||||
"results": {},
|
||||
}
|
||||
|
||||
|
||||
def connect_plc(self) -> bool:
|
||||
"""Conectar al PLC."""
|
||||
try:
|
||||
print("🔌 Conectando al PLC...")
|
||||
|
||||
|
||||
# Cargar configuración
|
||||
config_data = self.json_manager.read_json("plc")
|
||||
plc_config = config_data.get("plc_config", {})
|
||||
|
||||
|
||||
ip = plc_config.get("ip")
|
||||
rack = plc_config.get("rack", 0)
|
||||
slot = plc_config.get("slot", 2)
|
||||
|
||||
|
||||
self.test_results["test_info"]["plc_ip"] = ip
|
||||
|
||||
|
||||
# Conectar
|
||||
self.plc = snap7.client.Client()
|
||||
self.plc.connect(ip, rack, slot)
|
||||
|
||||
|
||||
print(f"✅ Conectado a PLC: {ip}:{rack}.{slot}")
|
||||
return True
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error conectando PLC: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def generate_test_variables(self) -> Dict[str, Dict[str, Any]]:
|
||||
"""Generar conjunto comprehensivo de variables de test."""
|
||||
variables = {}
|
||||
|
||||
|
||||
print("🔧 Generando variables de test...")
|
||||
|
||||
|
||||
# 1. DB1011 Variables - diferentes tipos
|
||||
print(" 📊 Variables DB1011...")
|
||||
|
||||
|
||||
# REALs
|
||||
for i in range(10):
|
||||
offset = i * 4
|
||||
|
@ -86,9 +87,9 @@ class SimpleDataIntegrityVerifier:
|
|||
"area": "db",
|
||||
"db": 1011,
|
||||
"offset": offset,
|
||||
"type": "real"
|
||||
"type": "real",
|
||||
}
|
||||
|
||||
|
||||
# INTs
|
||||
for i in range(5):
|
||||
offset = 100 + (i * 2)
|
||||
|
@ -97,9 +98,9 @@ class SimpleDataIntegrityVerifier:
|
|||
"area": "db",
|
||||
"db": 1011,
|
||||
"offset": offset,
|
||||
"type": "int"
|
||||
"type": "int",
|
||||
}
|
||||
|
||||
|
||||
# BOOLs
|
||||
for byte_offset in range(50, 53):
|
||||
for bit in range(0, 4):
|
||||
|
@ -109,45 +110,33 @@ class SimpleDataIntegrityVerifier:
|
|||
"db": 1011,
|
||||
"offset": byte_offset,
|
||||
"type": "bool",
|
||||
"bit": bit
|
||||
"bit": bit,
|
||||
}
|
||||
|
||||
|
||||
# 2. Memory Variables
|
||||
print(" 🧠 Variables Memory...")
|
||||
|
||||
|
||||
# Memory REALs
|
||||
for i in range(5):
|
||||
offset = 100 + (i * 4)
|
||||
var_name = f"M{offset}_real"
|
||||
variables[var_name] = {
|
||||
"area": "m",
|
||||
"offset": offset,
|
||||
"type": "real"
|
||||
}
|
||||
|
||||
variables[var_name] = {"area": "m", "offset": offset, "type": "real"}
|
||||
|
||||
# Memory INTs
|
||||
for i in range(3):
|
||||
offset = 200 + (i * 2)
|
||||
var_name = f"M{offset}_int"
|
||||
variables[var_name] = {
|
||||
"area": "m",
|
||||
"offset": offset,
|
||||
"type": "int"
|
||||
}
|
||||
|
||||
variables[var_name] = {"area": "m", "offset": offset, "type": "int"}
|
||||
|
||||
# 3. Input Variables
|
||||
print(" 📥 Variables Input...")
|
||||
|
||||
|
||||
# Input Words
|
||||
for i in range(3):
|
||||
offset = 300 + (i * 2)
|
||||
var_name = f"PEW{offset}"
|
||||
variables[var_name] = {
|
||||
"area": "e",
|
||||
"offset": offset,
|
||||
"type": "int"
|
||||
}
|
||||
|
||||
variables[var_name] = {"area": "e", "offset": offset, "type": "int"}
|
||||
|
||||
# Input Bits
|
||||
for byte_offset in range(0, 2):
|
||||
for bit in range(0, 4):
|
||||
|
@ -156,26 +145,28 @@ class SimpleDataIntegrityVerifier:
|
|||
"area": "e",
|
||||
"offset": byte_offset,
|
||||
"type": "bool",
|
||||
"bit": bit
|
||||
"bit": bit,
|
||||
}
|
||||
|
||||
|
||||
self.test_results["test_info"]["total_variables"] = len(variables)
|
||||
print(f"✅ Generadas {len(variables)} variables de test")
|
||||
|
||||
|
||||
return variables
|
||||
|
||||
def read_with_individual_method(self, variables: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
|
||||
|
||||
def read_with_individual_method(
|
||||
self, variables: Dict[str, Dict[str, Any]]
|
||||
) -> Dict[str, Any]:
|
||||
"""Leer variables usando método individual (legacy)."""
|
||||
print("📖 Leyendo con método INDIVIDUAL...")
|
||||
|
||||
|
||||
results = {}
|
||||
|
||||
|
||||
for var_name, config in variables.items():
|
||||
try:
|
||||
area = config.get("area", "db").lower()
|
||||
offset = config.get("offset", 0)
|
||||
var_type = config.get("type", "real").lower()
|
||||
|
||||
|
||||
if area == "db":
|
||||
db = config.get("db", 0)
|
||||
if var_type == "real":
|
||||
|
@ -218,37 +209,39 @@ class SimpleDataIntegrityVerifier:
|
|||
value = None
|
||||
else:
|
||||
value = None
|
||||
|
||||
|
||||
results[var_name] = value
|
||||
|
||||
|
||||
# Pequeña pausa entre lecturas individuales
|
||||
time.sleep(0.001)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ Error leyendo {var_name}: {e}")
|
||||
results[var_name] = None
|
||||
|
||||
|
||||
successful = len([v for v in results.values() if v is not None])
|
||||
print(f" ✅ {successful}/{len(variables)} variables leídas exitosamente")
|
||||
|
||||
|
||||
return results
|
||||
|
||||
def read_with_optimized_method(self, variables: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
|
||||
|
||||
def read_with_optimized_method(
|
||||
self, variables: Dict[str, Dict[str, Any]]
|
||||
) -> Dict[str, Any]:
|
||||
"""Leer variables usando método optimizado (read_multi_vars) con chunking."""
|
||||
print("🚀 Leyendo con método OPTIMIZADO...")
|
||||
|
||||
|
||||
results = {}
|
||||
CHUNK_SIZE = 19 # Límite seguro para S7 (detectamos que el límite es 20)
|
||||
|
||||
|
||||
try:
|
||||
# Preparar S7DataItems
|
||||
all_items = []
|
||||
all_var_map = []
|
||||
|
||||
|
||||
for var_name, config in variables.items():
|
||||
try:
|
||||
item = S7DataItem()
|
||||
|
||||
|
||||
# Configurar área
|
||||
area = config.get("area", "db").lower()
|
||||
if area == "db":
|
||||
|
@ -259,7 +252,7 @@ class SimpleDataIntegrityVerifier:
|
|||
item.Area = 129
|
||||
else:
|
||||
continue
|
||||
|
||||
|
||||
# Configurar tipo
|
||||
var_type = config.get("type", "real").lower()
|
||||
if var_type == "real":
|
||||
|
@ -273,56 +266,60 @@ class SimpleDataIntegrityVerifier:
|
|||
buffer_size = 1
|
||||
else:
|
||||
continue
|
||||
|
||||
|
||||
item.DBNumber = config.get("db", 0)
|
||||
item.Start = config.get("offset", 0)
|
||||
item.Amount = 1
|
||||
|
||||
|
||||
# Para BOOLs, ajustar offset
|
||||
if var_type == "bool" and "bit" in config:
|
||||
bit = config["bit"]
|
||||
item.Start = (item.Start * 8) + bit
|
||||
|
||||
|
||||
# Allocar buffer
|
||||
buffer = (ctypes.c_ubyte * buffer_size)()
|
||||
item.pData = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_ubyte))
|
||||
|
||||
|
||||
all_items.append(item)
|
||||
all_var_map.append({"name": var_name, "config": config})
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ Error preparando {var_name}: {e}")
|
||||
results[var_name] = None
|
||||
|
||||
|
||||
if not all_items:
|
||||
return results
|
||||
|
||||
print(f" 📊 Procesando {len(all_items)} variables en chunks de {CHUNK_SIZE}")
|
||||
|
||||
|
||||
print(
|
||||
f" 📊 Procesando {len(all_items)} variables en chunks de {CHUNK_SIZE}"
|
||||
)
|
||||
|
||||
# Procesar en chunks
|
||||
for chunk_start in range(0, len(all_items), CHUNK_SIZE):
|
||||
chunk_end = min(chunk_start + CHUNK_SIZE, len(all_items))
|
||||
|
||||
|
||||
chunk_items = all_items[chunk_start:chunk_end]
|
||||
chunk_var_map = all_var_map[chunk_start:chunk_end]
|
||||
|
||||
print(f" 🔄 Procesando chunk {chunk_start//CHUNK_SIZE + 1}: variables {chunk_start+1}-{chunk_end}")
|
||||
|
||||
|
||||
print(
|
||||
f" 🔄 Procesando chunk {chunk_start//CHUNK_SIZE + 1}: variables {chunk_start+1}-{chunk_end}"
|
||||
)
|
||||
|
||||
# Convertir chunk a ctypes array
|
||||
items_array = (S7DataItem * len(chunk_items))(*chunk_items)
|
||||
|
||||
|
||||
# Llamar read_multi_vars para este chunk
|
||||
result = self.plc.read_multi_vars(items_array)
|
||||
|
||||
|
||||
if isinstance(result, tuple) and len(result) == 2:
|
||||
ret_code, returned_items = result
|
||||
|
||||
|
||||
if ret_code == 0:
|
||||
for i, item in enumerate(returned_items):
|
||||
var_name = chunk_var_map[i]["name"]
|
||||
config = chunk_var_map[i]["config"]
|
||||
var_type = config.get("type", "real").lower()
|
||||
|
||||
|
||||
if item.Result == 0:
|
||||
try:
|
||||
if var_type == "real":
|
||||
|
@ -333,58 +330,67 @@ class SimpleDataIntegrityVerifier:
|
|||
value = snap7.util.get_bool(item.pData, 0, 0)
|
||||
else:
|
||||
value = None
|
||||
|
||||
|
||||
results[var_name] = value
|
||||
except Exception as e:
|
||||
print(f" ❌ Error extrayendo {var_name}: {e}")
|
||||
results[var_name] = None
|
||||
else:
|
||||
print(f" ❌ Error leyendo {var_name}: código {item.Result}")
|
||||
print(
|
||||
f" ❌ Error leyendo {var_name}: código {item.Result}"
|
||||
)
|
||||
results[var_name] = None
|
||||
else:
|
||||
print(f" ❌ Chunk falló: código {ret_code}")
|
||||
for var_info in chunk_var_map:
|
||||
results[var_info["name"]] = None
|
||||
else:
|
||||
print(f" ❌ Formato de resultado inesperado para chunk: {type(result)}")
|
||||
print(
|
||||
f" ❌ Formato de resultado inesperado para chunk: {type(result)}"
|
||||
)
|
||||
for var_info in chunk_var_map:
|
||||
results[var_info["name"]] = None
|
||||
|
||||
|
||||
# Pequeña pausa entre chunks
|
||||
time.sleep(0.01)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ Error en método optimizado: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
for var_name in variables.keys():
|
||||
if var_name not in results:
|
||||
results[var_name] = None
|
||||
|
||||
|
||||
successful = len([v for v in results.values() if v is not None])
|
||||
print(f" ✅ {successful}/{len(variables)} variables leídas exitosamente")
|
||||
|
||||
|
||||
return results
|
||||
|
||||
def compare_results(self, individual_results: Dict[str, Any],
|
||||
optimized_results: Dict[str, Any], pass_name: str) -> Dict[str, Any]:
|
||||
|
||||
def compare_results(
|
||||
self,
|
||||
individual_results: Dict[str, Any],
|
||||
optimized_results: Dict[str, Any],
|
||||
pass_name: str,
|
||||
) -> Dict[str, Any]:
|
||||
"""Comparar resultados entre métodos."""
|
||||
print(f"🔍 Comparando resultados - {pass_name}...")
|
||||
|
||||
|
||||
comparison = {
|
||||
"identical": [],
|
||||
"different": [],
|
||||
"individual_errors": [],
|
||||
"optimized_errors": [],
|
||||
"both_errors": []
|
||||
"both_errors": [],
|
||||
}
|
||||
|
||||
|
||||
total_vars = len(individual_results)
|
||||
|
||||
|
||||
for var_name in individual_results.keys():
|
||||
individual_val = individual_results.get(var_name)
|
||||
optimized_val = optimized_results.get(var_name)
|
||||
|
||||
|
||||
if individual_val is None and optimized_val is None:
|
||||
comparison["both_errors"].append(var_name)
|
||||
elif individual_val is None:
|
||||
|
@ -396,126 +402,136 @@ class SimpleDataIntegrityVerifier:
|
|||
if self._values_equal(individual_val, optimized_val):
|
||||
comparison["identical"].append(var_name)
|
||||
else:
|
||||
comparison["different"].append({
|
||||
"variable": var_name,
|
||||
"individual": individual_val,
|
||||
"optimized": optimized_val
|
||||
})
|
||||
|
||||
comparison["different"].append(
|
||||
{
|
||||
"variable": var_name,
|
||||
"individual": individual_val,
|
||||
"optimized": optimized_val,
|
||||
}
|
||||
)
|
||||
|
||||
# Estadísticas
|
||||
identical_count = len(comparison["identical"])
|
||||
different_count = len(comparison["different"])
|
||||
|
||||
|
||||
print(f" 📊 Resultados {pass_name}:")
|
||||
print(f" ✅ Idénticas: {identical_count}/{total_vars} ({identical_count/total_vars*100:.1f}%)")
|
||||
print(
|
||||
f" ✅ Idénticas: {identical_count}/{total_vars} ({identical_count/total_vars*100:.1f}%)"
|
||||
)
|
||||
print(f" ❌ Diferentes: {different_count}/{total_vars}")
|
||||
print(f" ⚠️ Errores individual: {len(comparison['individual_errors'])}")
|
||||
print(f" ⚠️ Errores optimizado: {len(comparison['optimized_errors'])}")
|
||||
print(f" ⚠️ Errores ambos: {len(comparison['both_errors'])}")
|
||||
|
||||
|
||||
return comparison
|
||||
|
||||
|
||||
def _values_equal(self, val1: Any, val2: Any, tolerance: float = 1e-6) -> bool:
|
||||
"""Comparar valores con tolerancia para floats."""
|
||||
if type(val1) != type(val2):
|
||||
return False
|
||||
|
||||
|
||||
if isinstance(val1, float):
|
||||
return abs(val1 - val2) <= tolerance
|
||||
else:
|
||||
return val1 == val2
|
||||
|
||||
|
||||
def run_verification(self) -> bool:
|
||||
"""Ejecutar verificación de integridad."""
|
||||
print("🔍 === VERIFICACIÓN DE INTEGRIDAD DE DATOS ===")
|
||||
print("Sistema de doble pasada para validación de consistencia")
|
||||
print("=" * 60)
|
||||
|
||||
|
||||
try:
|
||||
# 1. Conectar PLC
|
||||
if not self.connect_plc():
|
||||
return False
|
||||
|
||||
|
||||
# 2. Generar variables
|
||||
variables = self.generate_test_variables()
|
||||
|
||||
|
||||
# 3. PASADA 1
|
||||
print(f"\n🔄 PASADA 1")
|
||||
print("-" * 20)
|
||||
|
||||
|
||||
time.sleep(0.5)
|
||||
individual_1 = self.read_with_individual_method(variables)
|
||||
|
||||
|
||||
time.sleep(0.2)
|
||||
optimized_1 = self.read_with_optimized_method(variables)
|
||||
|
||||
|
||||
comparison_1 = self.compare_results(individual_1, optimized_1, "Pasada 1")
|
||||
|
||||
|
||||
# 4. PASADA 2
|
||||
print(f"\n🔄 PASADA 2")
|
||||
print("-" * 20)
|
||||
|
||||
|
||||
time.sleep(0.5)
|
||||
individual_2 = self.read_with_individual_method(variables)
|
||||
|
||||
|
||||
time.sleep(0.2)
|
||||
optimized_2 = self.read_with_optimized_method(variables)
|
||||
|
||||
|
||||
comparison_2 = self.compare_results(individual_2, optimized_2, "Pasada 2")
|
||||
|
||||
|
||||
# 5. Análisis final
|
||||
print(f"\n🔬 ANÁLISIS FINAL")
|
||||
print("-" * 20)
|
||||
|
||||
|
||||
identical_1 = set(comparison_1["identical"])
|
||||
identical_2 = set(comparison_2["identical"])
|
||||
consistently_identical = identical_1.intersection(identical_2)
|
||||
|
||||
|
||||
total_vars = len(variables)
|
||||
success_rate = len(consistently_identical) / total_vars * 100
|
||||
|
||||
|
||||
print(f"📊 Resultados Finales:")
|
||||
print(f" ✅ Variables consistentemente idénticas: {len(consistently_identical)}/{total_vars} ({success_rate:.1f}%)")
|
||||
print(f" 🔄 Variables que cambiaron entre pasadas: {len(identical_1.symmetric_difference(identical_2))}")
|
||||
|
||||
print(
|
||||
f" ✅ Variables consistentemente idénticas: {len(consistently_identical)}/{total_vars} ({success_rate:.1f}%)"
|
||||
)
|
||||
print(
|
||||
f" 🔄 Variables que cambiaron entre pasadas: {len(identical_1.symmetric_difference(identical_2))}"
|
||||
)
|
||||
|
||||
# Mostrar variables diferentes si las hay
|
||||
if comparison_1["different"] or comparison_2["different"]:
|
||||
print(f"\n❌ Variables con diferencias detectadas:")
|
||||
for diff in comparison_1["different"]:
|
||||
print(f" {diff['variable']}: Individual={diff['individual']}, Optimizado={diff['optimized']}")
|
||||
|
||||
print(
|
||||
f" {diff['variable']}: Individual={diff['individual']}, Optimizado={diff['optimized']}"
|
||||
)
|
||||
|
||||
# 6. Guardar resultado
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
filename = f"data_integrity_simple_{timestamp}.json"
|
||||
|
||||
|
||||
report = {
|
||||
"test_info": self.test_results["test_info"],
|
||||
"pass_1": {
|
||||
"individual": individual_1,
|
||||
"optimized": optimized_1,
|
||||
"comparison": comparison_1
|
||||
"comparison": comparison_1,
|
||||
},
|
||||
"pass_2": {
|
||||
"individual": individual_2,
|
||||
"optimized": optimized_2,
|
||||
"comparison": comparison_2
|
||||
"comparison": comparison_2,
|
||||
},
|
||||
"final_analysis": {
|
||||
"success_rate": success_rate,
|
||||
"consistently_identical": list(consistently_identical),
|
||||
"total_variables": total_vars
|
||||
}
|
||||
"total_variables": total_vars,
|
||||
},
|
||||
}
|
||||
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
json.dump(report, f, indent=2, ensure_ascii=False)
|
||||
|
||||
|
||||
print(f"📄 Reporte guardado: {filename}")
|
||||
|
||||
|
||||
# 7. Cleanup
|
||||
self.plc.disconnect()
|
||||
print("✅ PLC desconectado")
|
||||
|
||||
|
||||
# Resultado final
|
||||
if success_rate >= 95.0:
|
||||
print(f"\n🎉 ¡VERIFICACIÓN EXITOSA! ({success_rate:.1f}%)")
|
||||
|
@ -523,10 +539,11 @@ class SimpleDataIntegrityVerifier:
|
|||
else:
|
||||
print(f"\n⚠️ VERIFICACIÓN CON OBSERVACIONES ({success_rate:.1f}%)")
|
||||
return False
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error durante verificación: {e}")
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
|
@ -535,15 +552,15 @@ def main():
|
|||
"""Función principal."""
|
||||
print("🔍 SIMPLE DATA INTEGRITY VERIFICATION")
|
||||
print("🚀 Iniciando verificación...")
|
||||
|
||||
|
||||
verifier = SimpleDataIntegrityVerifier()
|
||||
success = verifier.run_verification()
|
||||
|
||||
|
||||
if success:
|
||||
print("✅ ¡Sistema optimizado mantiene integridad de datos!")
|
||||
else:
|
||||
print("⚠️ Revisar reporte para detalles")
|
||||
|
||||
|
||||
return success
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
# Gap Fix Solution - Historical to Real-time Data Continuity
|
||||
|
||||
## Problem Description
|
||||
|
||||
There was a gap of 1-n points between historical data and real-time streaming data in the Plot Definitions dashboard. The gap was typically 2+ seconds and became larger with more data points in the chart.
|
||||
|
||||
## Root Cause Analysis
|
||||
|
||||
1. **Historical Data Loading**: When loading historical data, the backend uses `end_time = datetime.now()` to calculate the time window for CSV data retrieval.
|
||||
|
||||
2. **LastPushedX Setting**: The frontend sets `lastPushedX` to the timestamp of the last historical data point loaded.
|
||||
|
||||
3. **Streaming Delay**: There's a delay between when historical data is calculated and when real-time streaming begins:
|
||||
- Backend processing time
|
||||
- HTTP transfer time
|
||||
- Frontend processing time
|
||||
- Streaming initialization delay
|
||||
|
||||
4. **Data Filtering**: New streaming data points are only added if `timestamp > lastPushedX`, causing data points in the delay interval to be discarded.
|
||||
|
||||
## Solution Implemented
|
||||
|
||||
### Frontend Fix (ChartjsPlot.jsx)
|
||||
|
||||
**File**: `frontend/src/components/ChartjsPlot.jsx`
|
||||
|
||||
**Location**: Lines 656-675 (approximate)
|
||||
|
||||
**Change**: Modified the historical data loading logic to compensate for frontend delay:
|
||||
|
||||
```javascript
|
||||
// Before:
|
||||
sessionDataRef.current.lastPushedXByDataset.set(index, lastPoint.x);
|
||||
|
||||
// After:
|
||||
const compensatedTimestamp = lastPoint.x - 3000; // 3 seconds compensation
|
||||
sessionDataRef.current.lastPushedXByDataset.set(index, compensatedTimestamp);
|
||||
```
|
||||
|
||||
**Compensation Logic**:
|
||||
- Subtracts 3000ms (3 seconds) from the last historical point timestamp
|
||||
- This allows streaming data in the delay interval to be captured
|
||||
- Accounts for typical delays in the processing pipeline
|
||||
|
||||
### Enhanced Logging
|
||||
|
||||
Added detailed logging to help diagnose gap issues:
|
||||
|
||||
1. **Historical Data Continuity**: Logs the last historical point and compensated timestamp
|
||||
2. **Streaming Filtering**: Logs when points are filtered out due to timestamp constraints
|
||||
3. **Streaming Ingestion**: Logs successful data ingestion with timestamp ranges
|
||||
|
||||
## Benefits
|
||||
|
||||
1. **Seamless Continuity**: Eliminates the visual gap between historical and real-time data
|
||||
2. **Robust Handling**: Works regardless of varying delay times (up to 3 seconds)
|
||||
3. **Debug Visibility**: Enhanced logging helps identify and troubleshoot future issues
|
||||
4. **Backward Compatible**: Doesn't affect existing functionality
|
||||
|
||||
## Testing
|
||||
|
||||
To verify the fix:
|
||||
|
||||
1. Start a plot session with historical data loading
|
||||
2. Begin real-time streaming
|
||||
3. Check browser console for continuity logs
|
||||
4. Verify no visual gap in the chart between historical and streaming data
|
||||
|
||||
## Configuration
|
||||
|
||||
The compensation delay (currently 3000ms) can be adjusted if needed:
|
||||
|
||||
```javascript
|
||||
const COMPENSATION_DELAY_MS = 3000; // Adjust as needed
|
||||
const compensatedTimestamp = lastPoint.x - COMPENSATION_DELAY_MS;
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- The 3-second compensation is conservative to handle most delay scenarios
|
||||
- The solution maintains data accuracy while improving visual continuity
|
||||
- Future optimization could make the delay dynamic based on actual measurements
|
37091
application_events.json
37091
application_events.json
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"timestamp": "2025-08-22T15:14:03.883875",
|
||||
"status": "stopped",
|
||||
"restart_count": 0,
|
||||
"last_restart": 0,
|
||||
"backend_pid": 33676,
|
||||
"manager_pid": 25004,
|
||||
"details": {}
|
||||
}
|
|
@ -0,0 +1,407 @@
|
|||
"""
|
||||
Backend Manager - PLC S7-315 Streamer Watchdog Service
|
||||
|
||||
This script monitors the backend health and automatically restarts it when needed.
|
||||
It runs as a separate process and ensures the backend is always available.
|
||||
|
||||
Key features:
|
||||
- Health monitoring every 30 seconds
|
||||
- Automatic restart of failed backends
|
||||
- Support for both development (main.py) and production (exe) environments
|
||||
- Robust process management and cleanup
|
||||
- Logging and status reporting
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
import psutil
|
||||
import requests
|
||||
import subprocess
|
||||
import threading
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
|
||||
class BackendManager:
|
||||
"""Manages backend lifecycle and health monitoring"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
check_interval: int = 30,
|
||||
health_timeout: float = 5.0,
|
||||
restart_delay: int = 10,
|
||||
max_restart_attempts: int = 3,
|
||||
restart_cooldown: int = 300,
|
||||
):
|
||||
"""
|
||||
Initialize the backend manager
|
||||
|
||||
Args:
|
||||
check_interval: Health check interval in seconds (default: 30)
|
||||
health_timeout: HTTP request timeout in seconds (default: 5.0)
|
||||
restart_delay: Delay before restart attempt in seconds (default: 10)
|
||||
max_restart_attempts: Maximum consecutive restart attempts (default: 3)
|
||||
restart_cooldown: Cooldown period after max attempts in seconds (default: 300)
|
||||
"""
|
||||
self.check_interval = check_interval
|
||||
self.health_timeout = health_timeout
|
||||
self.restart_delay = restart_delay
|
||||
self.max_restart_attempts = max_restart_attempts
|
||||
self.restart_cooldown = restart_cooldown
|
||||
|
||||
# Configuration
|
||||
self.backend_port = 5050
|
||||
self.health_endpoint = "/api/health"
|
||||
self.base_url = f"http://localhost:{self.backend_port}"
|
||||
self.lock_file = "plc_streamer.lock"
|
||||
self.status_file = "backend_manager.status"
|
||||
|
||||
# State tracking
|
||||
self.restart_count = 0
|
||||
self.last_restart_time = 0
|
||||
self.backend_process = None
|
||||
self.running = True
|
||||
|
||||
# Setup logging
|
||||
self.setup_logging()
|
||||
|
||||
# Detect environment
|
||||
self.is_packaged = getattr(sys, "frozen", False)
|
||||
|
||||
self.log(f"[MAIN] Backend Manager initialized")
|
||||
self.log(f"[CONFIG] Check interval: {check_interval}s")
|
||||
self.log(
|
||||
f"[CONFIG] Environment: {'Packaged' if self.is_packaged else 'Development'}"
|
||||
)
|
||||
self.log(f"[CONFIG] Process separation: Independent cmd windows")
|
||||
|
||||
def setup_logging(self):
|
||||
"""Setup logging configuration"""
|
||||
log_format = "%(asctime)s [%(levelname)s] %(message)s"
|
||||
|
||||
# Configure file handler with UTF-8 encoding
|
||||
file_handler = logging.FileHandler("backend_manager.log", encoding="utf-8")
|
||||
file_handler.setFormatter(logging.Formatter(log_format))
|
||||
|
||||
# Configure console handler with UTF-8 encoding
|
||||
console_handler = logging.StreamHandler(sys.stdout)
|
||||
console_handler.setFormatter(logging.Formatter(log_format))
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format=log_format,
|
||||
handlers=[file_handler, console_handler],
|
||||
)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
def log(self, message: str, level: str = "INFO"):
|
||||
"""Log message with appropriate level"""
|
||||
if level == "ERROR":
|
||||
self.logger.error(message)
|
||||
elif level == "WARN":
|
||||
self.logger.warning(message)
|
||||
else:
|
||||
self.logger.info(message)
|
||||
|
||||
def get_backend_command(self) -> list:
|
||||
"""Get the appropriate backend command for current environment (legacy - kept for compatibility)"""
|
||||
if self.is_packaged:
|
||||
# In packaged environment, look for the exe
|
||||
exe_path = os.path.join(
|
||||
os.path.dirname(sys.executable), "S7_Streamer_Logger.exe"
|
||||
)
|
||||
if os.path.exists(exe_path):
|
||||
return [exe_path]
|
||||
else:
|
||||
# Fallback to exe in current directory
|
||||
exe_path = "S7_Streamer_Logger.exe"
|
||||
return [exe_path]
|
||||
else:
|
||||
# In development environment, use conda environment
|
||||
# Try to detect if we're in snap7v12 environment
|
||||
conda_env_python = r"C:\Users\migue\miniconda3\envs\snap7v12\python.exe"
|
||||
if os.path.exists(conda_env_python):
|
||||
main_script = os.path.join(os.path.dirname(__file__), "main.py")
|
||||
return [conda_env_python, main_script]
|
||||
else:
|
||||
# Fallback to current python
|
||||
python_exe = sys.executable
|
||||
main_script = os.path.join(os.path.dirname(__file__), "main.py")
|
||||
return [python_exe, main_script]
|
||||
|
||||
def is_backend_alive(self) -> bool:
|
||||
"""Check if backend is responding to health checks"""
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{self.base_url}{self.health_endpoint}", timeout=self.health_timeout
|
||||
)
|
||||
return 200 <= response.status_code < 300
|
||||
except (
|
||||
requests.RequestException,
|
||||
requests.ConnectionError,
|
||||
requests.Timeout,
|
||||
requests.ConnectTimeout,
|
||||
):
|
||||
return False
|
||||
except Exception as e:
|
||||
self.log(f"[ERROR] Unexpected error during health check: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
def get_backend_pid(self) -> Optional[int]:
|
||||
"""Get backend PID from lock file"""
|
||||
try:
|
||||
if os.path.exists(self.lock_file):
|
||||
with open(self.lock_file, "r") as f:
|
||||
return int(f.read().strip())
|
||||
except (ValueError, FileNotFoundError, IOError):
|
||||
pass
|
||||
return None
|
||||
|
||||
def is_backend_process_running(self, pid: int) -> bool:
|
||||
"""Check if backend process is actually running"""
|
||||
try:
|
||||
if not psutil.pid_exists(pid):
|
||||
return False
|
||||
|
||||
proc = psutil.Process(pid)
|
||||
cmdline = " ".join(proc.cmdline()).lower()
|
||||
|
||||
# Check for backend signatures
|
||||
signatures = ["main.py", "s7_streamer_logger", "plc_streamer"]
|
||||
return any(sig in cmdline for sig in signatures)
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
||||
return False
|
||||
|
||||
def cleanup_zombie_process(self, pid: int) -> bool:
|
||||
"""Terminate zombie backend process"""
|
||||
try:
|
||||
if not psutil.pid_exists(pid):
|
||||
return True
|
||||
|
||||
proc = psutil.Process(pid)
|
||||
self.log(f"[STOP] Terminating zombie process {pid} ({proc.name()})")
|
||||
|
||||
# Try graceful termination
|
||||
proc.terminate()
|
||||
try:
|
||||
proc.wait(timeout=10)
|
||||
self.log(f"[OK] Process {pid} terminated gracefully")
|
||||
return True
|
||||
except psutil.TimeoutExpired:
|
||||
# Force kill
|
||||
self.log(f"[FORCE] Force killing process {pid}")
|
||||
proc.kill()
|
||||
proc.wait(timeout=5)
|
||||
self.log(f"[KILL] Process {pid} force killed")
|
||||
return True
|
||||
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
return True
|
||||
except Exception as e:
|
||||
self.log(f"[ERROR] Error terminating process {pid}: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
def cleanup_lock_file(self):
|
||||
"""Remove stale lock file"""
|
||||
try:
|
||||
if os.path.exists(self.lock_file):
|
||||
os.remove(self.lock_file)
|
||||
self.log(f"[OK] Removed lock file: {self.lock_file}")
|
||||
except Exception as e:
|
||||
self.log(f"[ERROR] Error removing lock file: {e}", "ERROR")
|
||||
|
||||
def get_cmd_command(self) -> str:
|
||||
"""Get Windows cmd command to launch backend in separate console window"""
|
||||
if self.is_packaged:
|
||||
# In packaged environment, launch exe in new cmd window
|
||||
exe_path = os.path.join(
|
||||
os.path.dirname(sys.executable), "S7_Streamer_Logger.exe"
|
||||
)
|
||||
if os.path.exists(exe_path):
|
||||
return f'start "S7_Streamer_Logger" "{exe_path}"'
|
||||
else:
|
||||
# Fallback to exe in current directory
|
||||
return 'start "S7_Streamer_Logger" "S7_Streamer_Logger.exe"'
|
||||
else:
|
||||
# In development environment, launch python script in new cmd window
|
||||
conda_env_python = r"C:\Users\migue\miniconda3\envs\snap7v12\python.exe"
|
||||
if os.path.exists(conda_env_python):
|
||||
main_script = os.path.join(os.path.dirname(__file__), "main.py")
|
||||
return f'start "PLC_Backend" "{conda_env_python}" "{main_script}"'
|
||||
else:
|
||||
# Fallback to current python
|
||||
python_exe = sys.executable
|
||||
main_script = os.path.join(os.path.dirname(__file__), "main.py")
|
||||
return f'start "PLC_Backend" "{python_exe}" "{main_script}"'
|
||||
|
||||
def start_backend(self) -> bool:
|
||||
"""Start the backend process in a separate Windows cmd console"""
|
||||
try:
|
||||
cmd_command = self.get_cmd_command()
|
||||
self.log(f"[START] Starting backend in separate cmd window: {cmd_command}")
|
||||
|
||||
# Launch backend in completely separate cmd window using shell command
|
||||
self.backend_process = subprocess.Popen(
|
||||
cmd_command,
|
||||
cwd=os.path.dirname(__file__) if not self.is_packaged else None,
|
||||
shell=True, # Use shell to properly handle the start command
|
||||
)
|
||||
|
||||
self.log(
|
||||
f"[START] Backend launch command executed with PID: {self.backend_process.pid}"
|
||||
)
|
||||
|
||||
# Wait a moment for the actual backend to start in its new window
|
||||
self.log(
|
||||
f"[WAIT] Waiting 10 seconds for backend to initialize in separate window..."
|
||||
)
|
||||
time.sleep(10)
|
||||
|
||||
# The subprocess.Popen PID is just the cmd launcher, not the actual backend
|
||||
# We'll verify health via HTTP instead of process tracking
|
||||
self.log(f"[OK] Backend launch completed, will verify via health check")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.log(f"[ERROR] Error starting backend: {e}", "ERROR")
|
||||
return False
|
||||
|
||||
def handle_backend_failure(self) -> bool:
|
||||
"""Handle backend failure and attempt restart"""
|
||||
current_time = time.time()
|
||||
|
||||
# Check if we're in cooldown period
|
||||
if (current_time - self.last_restart_time) < self.restart_cooldown:
|
||||
time_left = self.restart_cooldown - (current_time - self.last_restart_time)
|
||||
self.log(f"[WAIT] In cooldown period, {int(time_left)}s remaining")
|
||||
return False
|
||||
|
||||
# Check restart attempt limit
|
||||
if self.restart_count >= self.max_restart_attempts:
|
||||
self.log(
|
||||
f"[FAIL] Maximum restart attempts ({self.max_restart_attempts}) reached"
|
||||
)
|
||||
self.restart_count = 0
|
||||
self.last_restart_time = current_time
|
||||
return False
|
||||
|
||||
# Cleanup existing processes
|
||||
backend_pid = self.get_backend_pid()
|
||||
if backend_pid and self.is_backend_process_running(backend_pid):
|
||||
self.log(f"[STOP] Cleaning up zombie backend process: {backend_pid}")
|
||||
self.cleanup_zombie_process(backend_pid)
|
||||
|
||||
self.cleanup_lock_file()
|
||||
|
||||
# Wait before restart
|
||||
self.log(
|
||||
f"[WAIT] Waiting {self.restart_delay}s before restart attempt {self.restart_count + 1}"
|
||||
)
|
||||
time.sleep(self.restart_delay)
|
||||
|
||||
# Attempt restart
|
||||
self.restart_count += 1
|
||||
if self.start_backend():
|
||||
self.log(
|
||||
f"[OK] Backend restarted successfully (attempt {self.restart_count})"
|
||||
)
|
||||
self.restart_count = 0 # Reset counter on success
|
||||
return True
|
||||
else:
|
||||
self.log(
|
||||
f"[FAIL] Backend restart failed (attempt {self.restart_count})", "ERROR"
|
||||
)
|
||||
return False
|
||||
|
||||
def update_status(self, status: str, details: Dict[str, Any] = None):
|
||||
"""Update status file with current state"""
|
||||
try:
|
||||
status_data = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"status": status,
|
||||
"restart_count": self.restart_count,
|
||||
"last_restart": self.last_restart_time,
|
||||
"backend_pid": self.get_backend_pid(),
|
||||
"manager_pid": os.getpid(),
|
||||
"details": details or {},
|
||||
}
|
||||
|
||||
with open(self.status_file, "w") as f:
|
||||
json.dump(status_data, f, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
self.log(f"[ERROR] Error updating status file: {e}", "ERROR")
|
||||
|
||||
def run(self):
|
||||
"""Main monitoring loop"""
|
||||
self.log(f"[START] Backend Manager started (PID: {os.getpid()})")
|
||||
self.update_status("starting")
|
||||
|
||||
while self.running:
|
||||
try:
|
||||
# Check backend health
|
||||
if self.is_backend_alive():
|
||||
self.log(f"[OK] Backend is healthy")
|
||||
self.update_status("healthy")
|
||||
self.restart_count = (
|
||||
0 # Reset restart counter on successful health check
|
||||
)
|
||||
else:
|
||||
self.log(f"[WARN] Backend health check failed", "WARN")
|
||||
self.update_status("unhealthy")
|
||||
|
||||
# Attempt to handle the failure
|
||||
if self.handle_backend_failure():
|
||||
self.update_status("restarted")
|
||||
else:
|
||||
self.update_status("failed")
|
||||
|
||||
# Wait for next check
|
||||
time.sleep(self.check_interval)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
self.log(f"[SHUTDOWN] Received interrupt signal")
|
||||
self.running = False
|
||||
break
|
||||
except Exception as e:
|
||||
self.log(f"[ERROR] Unexpected error in main loop: {e}", "ERROR")
|
||||
self.update_status("error", {"error": str(e)})
|
||||
time.sleep(self.check_interval)
|
||||
|
||||
self.shutdown()
|
||||
|
||||
def shutdown(self):
|
||||
"""Cleanup and shutdown"""
|
||||
self.log(f"[SHUTDOWN] Backend Manager shutting down")
|
||||
self.update_status("shutting_down")
|
||||
|
||||
# Don't terminate any backend processes - they run independently in their own cmd windows
|
||||
# The manager only monitors health, doesn't control the backend lifecycle directly
|
||||
self.log(
|
||||
f"[OK] Backend Manager stopped - backend continues running independently"
|
||||
)
|
||||
self.update_status("stopped")
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
print("Backend Manager - PLC S7-315 Streamer Watchdog")
|
||||
print("=" * 50)
|
||||
|
||||
try:
|
||||
manager = BackendManager()
|
||||
manager.run()
|
||||
except KeyboardInterrupt:
|
||||
print("\n[SHUTDOWN] Backend Manager interrupted by user")
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Critical error: {e}")
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -0,0 +1,99 @@
|
|||
# PowerShell Build Script for S7 Streamer & Logger
|
||||
# Compatible with PowerShell Core and Windows PowerShell
|
||||
|
||||
Write-Host "Starting build process..." -ForegroundColor Green
|
||||
|
||||
# Step 1: Build Frontend
|
||||
Write-Host "Building frontend..." -ForegroundColor Yellow
|
||||
Set-Location frontend
|
||||
try {
|
||||
npm run build
|
||||
Write-Host "Frontend build completed" -ForegroundColor Green
|
||||
} catch {
|
||||
Write-Host "Frontend build failed: $_" -ForegroundColor Red
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Step 2: Return to root and clean old builds
|
||||
Set-Location ..
|
||||
Write-Host "Cleaning old build directories..." -ForegroundColor Yellow
|
||||
|
||||
if (Test-Path "build") {
|
||||
Remove-Item -Recurse -Force "build"
|
||||
Write-Host " Removed build directory" -ForegroundColor Gray
|
||||
}
|
||||
if (Test-Path "dist") {
|
||||
Remove-Item -Recurse -Force "dist"
|
||||
Write-Host " Removed dist directory" -ForegroundColor Gray
|
||||
}
|
||||
|
||||
# Step 3: Activate conda environment and build with PyInstaller
|
||||
Write-Host "Activating conda environment and building executable..." -ForegroundColor Yellow
|
||||
try {
|
||||
# Activate conda environment
|
||||
conda activate snap7v12
|
||||
|
||||
# Run PyInstaller
|
||||
pyinstaller main.spec --clean
|
||||
|
||||
Write-Host "Build completed successfully!" -ForegroundColor Green
|
||||
Write-Host "Results available in: dist/main/" -ForegroundColor Cyan
|
||||
} catch {
|
||||
Write-Host "PyInstaller build failed: $_" -ForegroundColor Red
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Step 4: Compress the dist/main directory
|
||||
Write-Host "Compressing distribution..." -ForegroundColor Yellow
|
||||
$timestamp = Get-Date -Format "yyyyMMdd_HHmmss"
|
||||
$zipName = "S7_Streamer_Logger_$timestamp.zip"
|
||||
$destinationPath = "C:\Users\migue\OneDrive\Miguel\CSA - Trabajo\Software\SIDEL\09 - DIET AS REGULAR"
|
||||
|
||||
try {
|
||||
# Check if destination directory exists
|
||||
if (-not (Test-Path $destinationPath)) {
|
||||
Write-Host "Creating destination directory..." -ForegroundColor Yellow
|
||||
New-Item -ItemType Directory -Path $destinationPath -Force | Out-Null
|
||||
}
|
||||
|
||||
# Try to use 7zip first (if available)
|
||||
$sevenZipPaths = @(
|
||||
"C:\Program Files\7-Zip\7z.exe",
|
||||
"C:\Program Files (x86)\7-Zip\7z.exe"
|
||||
)
|
||||
|
||||
$sevenZipFound = $false
|
||||
foreach ($path in $sevenZipPaths) {
|
||||
if (Test-Path $path) {
|
||||
Write-Host "Using 7zip for compression..." -ForegroundColor Cyan
|
||||
$zipFullPath = Join-Path $destinationPath $zipName
|
||||
& "$path" a -tzip "$zipFullPath" ".\dist\main\*" -r
|
||||
$sevenZipFound = $true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
# If 7zip not found, use PowerShell's built-in compression
|
||||
if (-not $sevenZipFound) {
|
||||
Write-Host "Using PowerShell built-in compression..." -ForegroundColor Cyan
|
||||
$tempZip = ".\$zipName"
|
||||
Compress-Archive -Path ".\dist\main\*" -DestinationPath $tempZip -Force
|
||||
|
||||
# Move to destination
|
||||
$zipFullPath = Join-Path $destinationPath $zipName
|
||||
Move-Item $tempZip $zipFullPath -Force
|
||||
}
|
||||
|
||||
Write-Host "Archive created successfully!" -ForegroundColor Green
|
||||
Write-Host "Location: $zipFullPath" -ForegroundColor Cyan
|
||||
|
||||
# Show file size
|
||||
$fileSize = (Get-Item $zipFullPath).Length / 1MB
|
||||
Write-Host "Archive size: $([math]::Round($fileSize, 2)) MB" -ForegroundColor Gray
|
||||
|
||||
} catch {
|
||||
Write-Host "Compression failed: $_" -ForegroundColor Red
|
||||
exit 1
|
||||
}
|
||||
|
||||
Write-Host "Build process completed!" -ForegroundColor Green
|
|
@ -5,26 +5,9 @@
|
|||
"enabled": true,
|
||||
"id": "DAR",
|
||||
"name": "DAR",
|
||||
"prefix": "gateway_phoenix",
|
||||
"prefix": "dar",
|
||||
"sampling_interval": 0.5,
|
||||
"use_optimized_reading": true
|
||||
},
|
||||
{
|
||||
"created": "2025-08-09T02:06:26.840011",
|
||||
"enabled": true,
|
||||
"id": "Fast",
|
||||
"name": "Fast",
|
||||
"prefix": "fast",
|
||||
"sampling_interval": 0.5,
|
||||
"use_optimized_reading": true
|
||||
},
|
||||
{
|
||||
"enabled": true,
|
||||
"id": "Test",
|
||||
"name": "test",
|
||||
"prefix": "test",
|
||||
"sampling_interval": 1,
|
||||
"use_optimized_reading": true
|
||||
}
|
||||
]
|
||||
}
|
|
@ -4,31 +4,39 @@
|
|||
"dataset_id": "DAR",
|
||||
"variables": [
|
||||
{
|
||||
"area": "db",
|
||||
"configType": "manual",
|
||||
"area": "DB",
|
||||
"db": 1011,
|
||||
"name": "UR29_Brix",
|
||||
"name": "HMI_Instrument.QTM307.PVFiltered",
|
||||
"offset": 1322,
|
||||
"streaming": true,
|
||||
"type": "real"
|
||||
},
|
||||
{
|
||||
"area": "db",
|
||||
"configType": "manual",
|
||||
"area": "DB",
|
||||
"db": 1011,
|
||||
"name": "UR29_ma",
|
||||
"name": "HMI_Instrument.QTM306.PVFiltered",
|
||||
"offset": 1296,
|
||||
"streaming": true,
|
||||
"type": "real"
|
||||
},
|
||||
{
|
||||
"area": "db",
|
||||
"configType": "manual",
|
||||
"area": "DB",
|
||||
"db": 1011,
|
||||
"name": "UR29_max",
|
||||
"offset": 1296,
|
||||
"name": "HMI_Instrument.CTS306.PVFiltered",
|
||||
"offset": 1348,
|
||||
"streaming": true,
|
||||
"type": "real"
|
||||
},
|
||||
{
|
||||
"configType": "manual",
|
||||
"area": "PEW",
|
||||
"name": "CTS306_PEW",
|
||||
"offset": 256,
|
||||
"streaming": true,
|
||||
"type": "word"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
@ -36,15 +44,12 @@
|
|||
"dataset_id": "Fast",
|
||||
"variables": [
|
||||
{
|
||||
"configType": "symbol",
|
||||
"area": "db",
|
||||
"name": "AUX Blink_2.0S",
|
||||
"streaming": true,
|
||||
"symbol": "AUX Blink_2.0S",
|
||||
"type": "real"
|
||||
"symbol": "AUX Blink_2.0S"
|
||||
},
|
||||
{
|
||||
"configType": "manual",
|
||||
"area": "m",
|
||||
"area": "M",
|
||||
"bit": 1,
|
||||
"name": "M50.1",
|
||||
"offset": 50,
|
||||
|
@ -52,13 +57,12 @@
|
|||
"type": "bool"
|
||||
},
|
||||
{
|
||||
"configType": "manual",
|
||||
"area": "m",
|
||||
"type": "bool",
|
||||
"streaming": false,
|
||||
"offset": 50,
|
||||
"area": "M",
|
||||
"bit": 2,
|
||||
"name": "M50.2"
|
||||
"name": "M50.2",
|
||||
"offset": 50,
|
||||
"streaming": false,
|
||||
"type": "bool"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,29 +1,26 @@
|
|||
{
|
||||
"plots": [
|
||||
{
|
||||
"id": "plot_1",
|
||||
"line_tension": 0,
|
||||
"name": "UR29",
|
||||
"point_hover_radius": 4,
|
||||
"point_radius": 2.5,
|
||||
"stacked": true,
|
||||
"stepped": true,
|
||||
"time_window": 60,
|
||||
"trigger_enabled": false,
|
||||
"trigger_on_true": true,
|
||||
"trigger_variable": null,
|
||||
"y_max": null,
|
||||
"y_min": null
|
||||
},
|
||||
{
|
||||
"id": "Clock",
|
||||
"line_tension": 0,
|
||||
"name": "Clock",
|
||||
"id": "DAR",
|
||||
"line_tension": 0.4,
|
||||
"name": "DAR_Brix",
|
||||
"point_hover_radius": 4,
|
||||
"point_radius": 1,
|
||||
"stacked": false,
|
||||
"stepped": false,
|
||||
"time_window": 60,
|
||||
"trigger_enabled": false,
|
||||
"trigger_on_true": true
|
||||
},
|
||||
{
|
||||
"id": "CTS306",
|
||||
"line_tension": 0.4,
|
||||
"name": "CTS306 Conductivimeter",
|
||||
"point_hover_radius": 4,
|
||||
"point_radius": 1,
|
||||
"stacked": true,
|
||||
"stepped": true,
|
||||
"time_window": 10,
|
||||
"time_window": 60,
|
||||
"trigger_enabled": false,
|
||||
"trigger_on_true": true
|
||||
}
|
||||
|
|
|
@ -32,22 +32,60 @@
|
|||
"plot_id": "Clock",
|
||||
"variables": [
|
||||
{
|
||||
"variable_name": "AUX Blink_2.0S",
|
||||
"color": "#db3376",
|
||||
"enabled": true,
|
||||
"line_width": 2,
|
||||
"y_axis": "left",
|
||||
"enabled": true
|
||||
"variable_name": "AUX Blink_2.0S",
|
||||
"y_axis": "left"
|
||||
},
|
||||
{
|
||||
"color": "#3498db",
|
||||
"enabled": true,
|
||||
"line_width": 2,
|
||||
"variable_name": "M50.1",
|
||||
"y_axis": "left"
|
||||
},
|
||||
{
|
||||
"color": "#3edb33",
|
||||
"enabled": true,
|
||||
"line_width": 2,
|
||||
"variable_name": "M50.2",
|
||||
"y_axis": "left"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"plot_id": "DAR",
|
||||
"variables": [
|
||||
{
|
||||
"color": "#3498db",
|
||||
"enabled": true,
|
||||
"line_width": 2,
|
||||
"variable_name": "HMI_Instrument.QTM306.PVFiltered",
|
||||
"y_axis": "left"
|
||||
},
|
||||
{
|
||||
"color": "#e30d4d",
|
||||
"enabled": true,
|
||||
"line_width": 2,
|
||||
"variable_name": "HMI_Instrument.QTM307.PVFiltered",
|
||||
"y_axis": "left"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"plot_id": "CTS306",
|
||||
"variables": [
|
||||
{
|
||||
"variable_name": "CTS306_PEW",
|
||||
"color": "#3498db",
|
||||
"line_width": 2,
|
||||
"y_axis": "left",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"variable_name": "M50.2",
|
||||
"color": "#3edb33",
|
||||
"variable_name": "HMI_Instrument.CTS306.PVFiltered",
|
||||
"color": "#1bf38e",
|
||||
"line_width": 2,
|
||||
"y_axis": "left",
|
||||
"enabled": true
|
||||
|
|
|
@ -24,113 +24,161 @@
|
|||
"description": "Array of PLC variables for this dataset",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
"properties": {
|
||||
"configType": {
|
||||
"type": "string",
|
||||
"title": "Configuration Type",
|
||||
"enum": ["manual", "symbol"],
|
||||
"default": "manual"
|
||||
}
|
||||
},
|
||||
"allOf": [
|
||||
{
|
||||
"title": "Manual Configuration",
|
||||
"description": "Manually configure PLC variable parameters",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Variable Name",
|
||||
"description": "Human-readable name for the variable"
|
||||
},
|
||||
"area": {
|
||||
"type": "string",
|
||||
"title": "Memory Area",
|
||||
"enum": [
|
||||
"db",
|
||||
"mw",
|
||||
"m",
|
||||
"pew",
|
||||
"pe",
|
||||
"paw",
|
||||
"pa",
|
||||
"e",
|
||||
"a",
|
||||
"mb"
|
||||
]
|
||||
},
|
||||
"db": {
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
],
|
||||
"title": "DB Number",
|
||||
"minimum": 1,
|
||||
"maximum": 9999
|
||||
},
|
||||
"offset": {
|
||||
"type": "integer",
|
||||
"title": "Offset",
|
||||
"minimum": 0,
|
||||
"maximum": 8191
|
||||
},
|
||||
"bit": {
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
],
|
||||
"title": "Bit Position",
|
||||
"minimum": 0,
|
||||
"maximum": 7
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"title": "Data Type",
|
||||
"enum": [
|
||||
"real",
|
||||
"int",
|
||||
"bool",
|
||||
"dint",
|
||||
"word",
|
||||
"byte",
|
||||
"uint",
|
||||
"udint",
|
||||
"sint",
|
||||
"usint"
|
||||
]
|
||||
},
|
||||
"streaming": {
|
||||
"type": "boolean",
|
||||
"title": "Stream to PlotJuggler",
|
||||
"description": "Include this variable in UDP streaming",
|
||||
"default": false
|
||||
"if": {
|
||||
"properties": {
|
||||
"configType": {
|
||||
"const": "manual"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name",
|
||||
"area",
|
||||
"offset",
|
||||
"type"
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Symbol-based Configuration",
|
||||
"description": "Use a symbol from the loaded ASC file",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Variable Name",
|
||||
"description": "Human-readable name for the variable (auto-filled from symbol)"
|
||||
},
|
||||
"symbol": {
|
||||
"type": "string",
|
||||
"title": "PLC Symbol",
|
||||
"description": "Select a symbol from the loaded ASC file",
|
||||
"options": {
|
||||
"widget": "symbol-selector"
|
||||
"then": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Variable Name",
|
||||
"description": "Human-readable name for the variable"
|
||||
},
|
||||
"area": {
|
||||
"type": "string",
|
||||
"title": "Memory Area",
|
||||
"enum": [
|
||||
"DB",
|
||||
"MW",
|
||||
"M",
|
||||
"PEW",
|
||||
"PE",
|
||||
"PAW",
|
||||
"PA",
|
||||
"E",
|
||||
"A",
|
||||
"MB"
|
||||
]
|
||||
},
|
||||
"db": {
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
],
|
||||
"title": "DB Number",
|
||||
"minimum": 1,
|
||||
"maximum": 9999
|
||||
},
|
||||
"offset": {
|
||||
"type": "integer",
|
||||
"title": "Offset",
|
||||
"minimum": 0,
|
||||
"maximum": 8191
|
||||
},
|
||||
"bit": {
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
],
|
||||
"title": "Bit Position",
|
||||
"minimum": 0,
|
||||
"maximum": 7
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"title": "Data Type",
|
||||
"enum": [
|
||||
"real",
|
||||
"int",
|
||||
"bool",
|
||||
"dint",
|
||||
"word",
|
||||
"byte",
|
||||
"uint",
|
||||
"udint",
|
||||
"sint",
|
||||
"usint",
|
||||
"dword"
|
||||
]
|
||||
},
|
||||
"streaming": {
|
||||
"type": "boolean",
|
||||
"title": "Stream to PlotJuggler",
|
||||
"description": "Include this variable in UDP streaming",
|
||||
"default": false
|
||||
}
|
||||
},
|
||||
"streaming": {
|
||||
"type": "boolean",
|
||||
"title": "Stream to PlotJuggler",
|
||||
"description": "Include this variable in UDP streaming",
|
||||
"default": false
|
||||
"required": [
|
||||
"configType",
|
||||
"name",
|
||||
"area",
|
||||
"offset",
|
||||
"type"
|
||||
],
|
||||
"allOf": [
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"area": {
|
||||
"const": "DB"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"required": ["db"]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"type": {
|
||||
"const": "bool"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"required": ["bit"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"configType": {
|
||||
"const": "symbol"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"symbol"
|
||||
]
|
||||
"then": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"title": "Variable Name",
|
||||
"description": "Human-readable name for the variable (auto-filled from symbol)"
|
||||
},
|
||||
"symbol": {
|
||||
"type": "string",
|
||||
"title": "PLC Symbol",
|
||||
"description": "Select a symbol from the loaded ASC file"
|
||||
},
|
||||
"streaming": {
|
||||
"type": "boolean",
|
||||
"title": "Stream to PlotJuggler",
|
||||
"description": "Include this variable in UDP streaming",
|
||||
"default": false
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"configType",
|
||||
"symbol"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -25,215 +25,206 @@
|
|||
"removable": true
|
||||
},
|
||||
"items": {
|
||||
"oneOf": [
|
||||
{
|
||||
"ui:title": "Manual Configuration",
|
||||
"ui:description": "Configure PLC variable parameters manually",
|
||||
"ui:order": [
|
||||
"name",
|
||||
"area",
|
||||
"db",
|
||||
"offset",
|
||||
"bit",
|
||||
"type",
|
||||
"streaming"
|
||||
],
|
||||
"ui:layout": [
|
||||
[
|
||||
{
|
||||
"name": "name",
|
||||
"width": 4
|
||||
},
|
||||
{
|
||||
"name": "area",
|
||||
"width": 2
|
||||
},
|
||||
{
|
||||
"name": "db",
|
||||
"width": 2
|
||||
},
|
||||
{
|
||||
"name": "offset",
|
||||
"width": 2
|
||||
},
|
||||
{
|
||||
"name": "type",
|
||||
"width": 2
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "bit",
|
||||
"width": 3
|
||||
},
|
||||
{
|
||||
"name": "streaming",
|
||||
"width": 9
|
||||
}
|
||||
]
|
||||
],
|
||||
"name": {
|
||||
"ui:widget": "text",
|
||||
"ui:placeholder": "Variable name",
|
||||
"ui:help": "📝 Human-readable name for this variable"
|
||||
},
|
||||
"area": {
|
||||
"ui:widget": "select",
|
||||
"ui:help": "PLC memory area (DB=DataBlock, MW=MemoryWord, etc.)",
|
||||
"ui:options": {
|
||||
"enumOptions": [
|
||||
{
|
||||
"value": "db",
|
||||
"label": "🗃️ DB (Data Block)"
|
||||
},
|
||||
{
|
||||
"value": "mw",
|
||||
"label": "📊 MW (Memory Word)"
|
||||
},
|
||||
{
|
||||
"value": "m",
|
||||
"label": "💾 M (Memory)"
|
||||
},
|
||||
{
|
||||
"value": "pew",
|
||||
"label": "📥 PEW (Process Input Word)"
|
||||
},
|
||||
{
|
||||
"value": "pe",
|
||||
"label": "📥 PE (Process Input)"
|
||||
},
|
||||
{
|
||||
"value": "paw",
|
||||
"label": "📤 PAW (Process Output Word)"
|
||||
},
|
||||
{
|
||||
"value": "pa",
|
||||
"label": "📤 PA (Process Output)"
|
||||
},
|
||||
{
|
||||
"value": "e",
|
||||
"label": "🔌 E (Input)"
|
||||
},
|
||||
{
|
||||
"value": "a",
|
||||
"label": "🔌 A (Output)"
|
||||
},
|
||||
{
|
||||
"value": "mb",
|
||||
"label": "💾 MB (Memory Byte)"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
"ui:widget": "updown",
|
||||
"ui:help": "Data Block number (required for DB area)",
|
||||
"ui:placeholder": "1011"
|
||||
},
|
||||
"offset": {
|
||||
"ui:widget": "updown",
|
||||
"ui:help": "Byte offset within the memory area"
|
||||
},
|
||||
"bit": {
|
||||
"ui:widget": "updown",
|
||||
"ui:help": "Bit position (0-7) for bit-addressable areas"
|
||||
},
|
||||
"type": {
|
||||
"ui:widget": "select",
|
||||
"ui:help": "PLC data type",
|
||||
"ui:options": {
|
||||
"enumOptions": [
|
||||
{
|
||||
"value": "real",
|
||||
"label": "🔢 REAL (32-bit float)"
|
||||
},
|
||||
{
|
||||
"value": "int",
|
||||
"label": "🔢 INT (16-bit signed)"
|
||||
},
|
||||
{
|
||||
"value": "bool",
|
||||
"label": "✅ BOOL (1-bit boolean)"
|
||||
},
|
||||
{
|
||||
"value": "dint",
|
||||
"label": "🔢 DINT (32-bit signed)"
|
||||
},
|
||||
{
|
||||
"value": "word",
|
||||
"label": "🔢 WORD (16-bit unsigned)"
|
||||
},
|
||||
{
|
||||
"value": "byte",
|
||||
"label": "🔢 BYTE (8-bit unsigned)"
|
||||
},
|
||||
{
|
||||
"value": "uint",
|
||||
"label": "🔢 UINT (16-bit unsigned)"
|
||||
},
|
||||
{
|
||||
"value": "udint",
|
||||
"label": "🔢 UDINT (32-bit unsigned)"
|
||||
},
|
||||
{
|
||||
"value": "sint",
|
||||
"label": "🔢 SINT (8-bit signed)"
|
||||
},
|
||||
{
|
||||
"value": "usint",
|
||||
"label": "🔢 USINT (8-bit unsigned)"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"streaming": {
|
||||
"ui:widget": "switch",
|
||||
"ui:help": "📡 Enable real-time streaming to PlotJuggler for visualization"
|
||||
"ui:order": [
|
||||
"configType",
|
||||
"name",
|
||||
"symbol",
|
||||
"area",
|
||||
"db",
|
||||
"offset",
|
||||
"bit",
|
||||
"type",
|
||||
"streaming"
|
||||
],
|
||||
"ui:layout": [
|
||||
[
|
||||
{
|
||||
"name": "configType",
|
||||
"width": 3
|
||||
}
|
||||
},
|
||||
{
|
||||
"ui:title": "Symbol-based Configuration",
|
||||
"ui:description": "Use a symbol from the loaded ASC file",
|
||||
"ui:order": [
|
||||
"name",
|
||||
"symbol",
|
||||
"streaming"
|
||||
],
|
||||
"ui:layout": [
|
||||
[
|
||||
{
|
||||
"name": "name",
|
||||
"width": 6
|
||||
},
|
||||
{
|
||||
"name": "symbol",
|
||||
"width": 6
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "streaming",
|
||||
"width": 12
|
||||
}
|
||||
]
|
||||
],
|
||||
"name": {
|
||||
"ui:widget": "text",
|
||||
"ui:placeholder": "Variable name (auto-filled from symbol)",
|
||||
"ui:help": "📝 Human-readable name for this variable",
|
||||
"ui:readonly": true
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "name",
|
||||
"width": 6
|
||||
},
|
||||
"symbol": {
|
||||
"ui:widget": "dataset-variable-symbol",
|
||||
"ui:placeholder": "Select a PLC symbol...",
|
||||
"ui:help": "🔍 Search and select a symbol from the loaded ASC file"
|
||||
},
|
||||
"streaming": {
|
||||
"ui:widget": "switch",
|
||||
"ui:help": "📡 Enable real-time streaming to PlotJuggler for visualization"
|
||||
{
|
||||
"name": "symbol",
|
||||
"width": 6
|
||||
}
|
||||
],
|
||||
[
|
||||
{
|
||||
"name": "area",
|
||||
"width": 2
|
||||
},
|
||||
{
|
||||
"name": "db",
|
||||
"width": 2
|
||||
},
|
||||
{
|
||||
"name": "offset",
|
||||
"width": 2
|
||||
},
|
||||
{
|
||||
"name": "bit",
|
||||
"width": 2
|
||||
},
|
||||
{
|
||||
"name": "type",
|
||||
"width": 2
|
||||
},
|
||||
{
|
||||
"name": "streaming",
|
||||
"width": 2
|
||||
}
|
||||
]
|
||||
],
|
||||
"configType": {
|
||||
"ui:widget": "select",
|
||||
"ui:help": "Choose between manual configuration or symbol-based setup",
|
||||
"ui:options": {
|
||||
"enumOptions": [
|
||||
{
|
||||
"value": "manual",
|
||||
"label": "🔧 Manual Configuration"
|
||||
},
|
||||
{
|
||||
"value": "symbol",
|
||||
"label": "🔍 Symbol-based Configuration"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"name": {
|
||||
"ui:widget": "text",
|
||||
"ui:placeholder": "Variable name",
|
||||
"ui:help": "📝 Human-readable name for this variable"
|
||||
},
|
||||
"symbol": {
|
||||
"ui:widget": "dataset-variable-symbol",
|
||||
"ui:placeholder": "Select a PLC symbol...",
|
||||
"ui:help": "🔍 Search and select a symbol from the loaded ASC file"
|
||||
},
|
||||
"area": {
|
||||
"ui:widget": "select",
|
||||
"ui:help": "PLC memory area (DB=DataBlock, MW=MemoryWord, etc.)",
|
||||
"ui:options": {
|
||||
"enumOptions": [
|
||||
{
|
||||
"value": "DB",
|
||||
"label": "🗃️ DB (Data Block)"
|
||||
},
|
||||
{
|
||||
"value": "MW",
|
||||
"label": "📊 MW (Memory Word)"
|
||||
},
|
||||
{
|
||||
"value": "M",
|
||||
"label": "💾 M (Memory)"
|
||||
},
|
||||
{
|
||||
"value": "PEW",
|
||||
"label": "📥 PEW (Process Input Word)"
|
||||
},
|
||||
{
|
||||
"value": "PE",
|
||||
"label": "📥 PE (Process Input)"
|
||||
},
|
||||
{
|
||||
"value": "PAW",
|
||||
"label": "📤 PAW (Process Output Word)"
|
||||
},
|
||||
{
|
||||
"value": "PA",
|
||||
"label": "📤 PA (Process Output)"
|
||||
},
|
||||
{
|
||||
"value": "E",
|
||||
"label": "🔌 E (Input)"
|
||||
},
|
||||
{
|
||||
"value": "A",
|
||||
"label": "🔌 A (Output)"
|
||||
},
|
||||
{
|
||||
"value": "MB",
|
||||
"label": "💾 MB (Memory Byte)"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
"ui:widget": "updown",
|
||||
"ui:help": "⚠️ Data Block number (only required for DB area - will be ignored for other areas like PE, PA, MW, etc.)",
|
||||
"ui:placeholder": "1011",
|
||||
"ui:description": "🗃️ This field is only used when Area = 'DB (Data Block)'"
|
||||
},
|
||||
"offset": {
|
||||
"ui:widget": "updown",
|
||||
"ui:help": "Byte offset within the memory area"
|
||||
},
|
||||
"bit": {
|
||||
"ui:widget": "updown",
|
||||
"ui:help": "⚠️ Bit position (0-7) - only required for BOOL data type, will be ignored for other types",
|
||||
"ui:description": "✅ This field is only used when Type = 'BOOL (1-bit boolean)'"
|
||||
},
|
||||
"type": {
|
||||
"ui:widget": "select",
|
||||
"ui:help": "PLC data type",
|
||||
"ui:options": {
|
||||
"enumOptions": [
|
||||
{
|
||||
"value": "real",
|
||||
"label": "🔢 REAL (32-bit float)"
|
||||
},
|
||||
{
|
||||
"value": "int",
|
||||
"label": "🔢 INT (16-bit signed)"
|
||||
},
|
||||
{
|
||||
"value": "bool",
|
||||
"label": "✅ BOOL (1-bit boolean)"
|
||||
},
|
||||
{
|
||||
"value": "dint",
|
||||
"label": "🔢 DINT (32-bit signed)"
|
||||
},
|
||||
{
|
||||
"value": "word",
|
||||
"label": "🔢 WORD (16-bit unsigned)"
|
||||
},
|
||||
{
|
||||
"value": "byte",
|
||||
"label": "🔢 BYTE (8-bit unsigned)"
|
||||
},
|
||||
{
|
||||
"value": "uint",
|
||||
"label": "🔢 UINT (16-bit unsigned)"
|
||||
},
|
||||
{
|
||||
"value": "udint",
|
||||
"label": "🔢 UDINT (32-bit unsigned)"
|
||||
},
|
||||
{
|
||||
"value": "sint",
|
||||
"label": "🔢 SINT (8-bit signed)"
|
||||
},
|
||||
{
|
||||
"value": "usint",
|
||||
"label": "🔢 USINT (8-bit unsigned)"
|
||||
},
|
||||
{
|
||||
"value": "dword",
|
||||
"label": "🔢 DWORD (32-bit unsigned)"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"streaming": {
|
||||
"ui:widget": "switch",
|
||||
"ui:help": "📡 Enable real-time streaming to PlotJuggler for visualization"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import snap7
|
||||
import snap7.util
|
||||
import snap7.type
|
||||
import struct
|
||||
import time
|
||||
import threading
|
||||
|
@ -8,6 +9,7 @@ from typing import Dict, Any, Optional
|
|||
# 🚀 OPTIMIZATION: Check if optimized batch reader is available
|
||||
try:
|
||||
import utils.optimized_batch_reader
|
||||
|
||||
OPTIMIZED_BATCH_READER_AVAILABLE = True
|
||||
except ImportError as e:
|
||||
OPTIMIZED_BATCH_READER_AVAILABLE = False
|
||||
|
@ -63,10 +65,11 @@ class PLCClient:
|
|||
try:
|
||||
# Import here to avoid circular imports
|
||||
from utils.optimized_batch_reader import OptimizedBatchReader
|
||||
|
||||
self.batch_reader = OptimizedBatchReader(
|
||||
plc_client=self,
|
||||
logger=logger,
|
||||
inter_read_delay=self.inter_read_delay_seconds
|
||||
plc_client=self,
|
||||
logger=logger,
|
||||
inter_read_delay=self.inter_read_delay_seconds,
|
||||
)
|
||||
if logger:
|
||||
logger.info("🚀 OptimizedBatchReader initialized successfully")
|
||||
|
@ -324,7 +327,7 @@ class PLCClient:
|
|||
result = self._read_memory_variable(offset, var_type)
|
||||
elif area_type in [
|
||||
"pew",
|
||||
"pe",
|
||||
"pe",
|
||||
"i", # Process Input area
|
||||
"ped", # Process Input Double word (REAL)
|
||||
"peb", # Process Input Byte
|
||||
|
@ -413,19 +416,21 @@ class PLCClient:
|
|||
return None
|
||||
|
||||
def read_variables_batch(
|
||||
self, variables_config: Dict[str, Dict[str, Any]], use_optimized_reading: bool = None
|
||||
self,
|
||||
variables_config: Dict[str, Dict[str, Any]],
|
||||
use_optimized_reading: bool = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""🚀 OPTIMIZED: Read multiple variables using advanced batch operations
|
||||
|
||||
|
||||
This method can use either the optimized read_multi_vars method or fall back
|
||||
to the legacy grouping method based on the use_optimized_reading parameter
|
||||
or the global USE_OPTIMIZED_BATCH_READING setting.
|
||||
|
||||
|
||||
When optimization is enabled and available:
|
||||
- Uses snap7.read_multi_vars with automatic chunking
|
||||
- Handles scattered variables across different memory areas
|
||||
- Significantly reduces network overhead and improves performance
|
||||
|
||||
|
||||
When optimization is disabled or unavailable:
|
||||
- Falls back to the original grouping and batch reading method
|
||||
- Maintains compatibility with older snap7 versions
|
||||
|
@ -444,21 +449,27 @@ class PLCClient:
|
|||
# <20> Determine which reading method to use
|
||||
# Priority: dataset-specific setting > global setting
|
||||
should_use_optimized = (
|
||||
use_optimized_reading
|
||||
if use_optimized_reading is not None
|
||||
use_optimized_reading
|
||||
if use_optimized_reading is not None
|
||||
else USE_OPTIMIZED_BATCH_READING
|
||||
)
|
||||
|
||||
# 🚀 Check if we should use the optimized batch reader
|
||||
if (
|
||||
should_use_optimized
|
||||
and self.batch_reader is not None
|
||||
should_use_optimized
|
||||
and self.batch_reader is not None
|
||||
and OPTIMIZED_BATCH_READER_AVAILABLE
|
||||
):
|
||||
# Use the optimized read_multi_vars method
|
||||
if self.logger:
|
||||
source = "dataset config" if use_optimized_reading is not None else "global config"
|
||||
self.logger.debug(f"🚀 Using optimized batch reading for {len(variables_config)} variables (from {source})")
|
||||
source = (
|
||||
"dataset config"
|
||||
if use_optimized_reading is not None
|
||||
else "global config"
|
||||
)
|
||||
self.logger.debug(
|
||||
f"🚀 Using optimized batch reading for {len(variables_config)} variables (from {source})"
|
||||
)
|
||||
return self.batch_reader.read_variables_batch(variables_config)
|
||||
else:
|
||||
# Fall back to the legacy grouping method
|
||||
|
@ -474,7 +485,7 @@ class PLCClient:
|
|||
self, variables_config: Dict[str, Dict[str, Any]]
|
||||
) -> Dict[str, Any]:
|
||||
"""Legacy batch reading method (original implementation)
|
||||
|
||||
|
||||
This method groups variables by DB/Area and performs batch reads when possible,
|
||||
reducing the number of snap7 calls compared to individual reads.
|
||||
"""
|
||||
|
@ -734,48 +745,48 @@ class PLCClient:
|
|||
def _read_input_variable(self, offset: int, var_type: str) -> Any:
|
||||
"""Read from Process Inputs using correct area code (0x81)"""
|
||||
try:
|
||||
# Use snap7.types.Areas.PE (0x81) for Process Inputs
|
||||
# Use snap7.type.Areas.PE (0x81) for Process Inputs
|
||||
# read_area(area, dbnumber, start, size) - only 4 parameters!
|
||||
if var_type == "real":
|
||||
# For REAL (32-bit float), read 4 bytes
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 4)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 4)
|
||||
return struct.unpack(">f", raw_data)[0]
|
||||
elif var_type == "int":
|
||||
# For INT (16-bit signed), read 2 bytes
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 2)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 2)
|
||||
return struct.unpack(">h", raw_data)[0]
|
||||
elif var_type == "word":
|
||||
# For WORD (16-bit unsigned), read 2 bytes
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 2)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 2)
|
||||
return struct.unpack(">H", raw_data)[0]
|
||||
elif var_type == "dint":
|
||||
# For DINT (32-bit signed), read 4 bytes
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 4)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 4)
|
||||
return struct.unpack(">l", raw_data)[0]
|
||||
elif var_type == "dword":
|
||||
# For DWORD (32-bit unsigned), read 4 bytes
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 4)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 4)
|
||||
return struct.unpack(">L", raw_data)[0]
|
||||
elif var_type == "byte":
|
||||
# For BYTE (8-bit), read 1 byte
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 1)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 1)
|
||||
return struct.unpack(">B", raw_data)[0]
|
||||
elif var_type == "bool":
|
||||
# For BOOL, we need to read the byte and extract the specific bit
|
||||
# Default to bit 0 if not specified
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 1)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 1)
|
||||
return bool(raw_data[0] & 0x01)
|
||||
elif var_type == "uint":
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 2)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 2)
|
||||
return struct.unpack(">H", raw_data)[0]
|
||||
elif var_type == "udint":
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 4)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 4)
|
||||
return struct.unpack(">L", raw_data)[0]
|
||||
elif var_type == "sint":
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 1)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 1)
|
||||
return struct.unpack(">b", raw_data)[0]
|
||||
elif var_type == "usint":
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 1)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 1)
|
||||
return struct.unpack(">B", raw_data)[0]
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
|
@ -791,48 +802,48 @@ class PLCClient:
|
|||
def _read_output_variable(self, offset: int, var_type: str) -> Any:
|
||||
"""Read from Process Outputs using correct area code (0x82)"""
|
||||
try:
|
||||
# Use snap7.types.Areas.PA (0x82) for Process Outputs
|
||||
# Use snap7.type.Areas.PA (0x82) for Process Outputs
|
||||
# read_area(area, dbnumber, start, size) - only 4 parameters!
|
||||
if var_type == "real":
|
||||
# For REAL (32-bit float), read 4 bytes
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 4)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 4)
|
||||
return struct.unpack(">f", raw_data)[0]
|
||||
elif var_type == "int":
|
||||
# For INT (16-bit signed), read 2 bytes
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 2)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 2)
|
||||
return struct.unpack(">h", raw_data)[0]
|
||||
elif var_type == "word":
|
||||
# For WORD (16-bit unsigned), read 2 bytes
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 2)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 2)
|
||||
return struct.unpack(">H", raw_data)[0]
|
||||
elif var_type == "dint":
|
||||
# For DINT (32-bit signed), read 4 bytes
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 4)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 4)
|
||||
return struct.unpack(">l", raw_data)[0]
|
||||
elif var_type == "dword":
|
||||
# For DWORD (32-bit unsigned), read 4 bytes
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 4)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 4)
|
||||
return struct.unpack(">L", raw_data)[0]
|
||||
elif var_type == "byte":
|
||||
# For BYTE (8-bit), read 1 byte
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 1)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 1)
|
||||
return struct.unpack(">B", raw_data)[0]
|
||||
elif var_type == "bool":
|
||||
# For BOOL, we need to read the byte and extract the specific bit
|
||||
# Default to bit 0 if not specified
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 1)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 1)
|
||||
return bool(raw_data[0] & 0x01)
|
||||
elif var_type == "uint":
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 2)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 2)
|
||||
return struct.unpack(">H", raw_data)[0]
|
||||
elif var_type == "udint":
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 4)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 4)
|
||||
return struct.unpack(">L", raw_data)[0]
|
||||
elif var_type == "sint":
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 1)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 1)
|
||||
return struct.unpack(">b", raw_data)[0]
|
||||
elif var_type == "usint":
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 1)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 1)
|
||||
return struct.unpack(">B", raw_data)[0]
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
|
@ -848,9 +859,9 @@ class PLCClient:
|
|||
def _read_input_bit(self, offset: int, bit: int) -> bool:
|
||||
"""Read from Process Input Bits using correct area code (0x81)"""
|
||||
try:
|
||||
# Use snap7.types.Areas.PE (0x81) for Process Inputs
|
||||
# Use snap7.type.Areas.PE (0x81) for Process Inputs
|
||||
# read_area(area, dbnumber, start, size) - only 4 parameters!
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PE, 0, offset, 1)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PE, 0, offset, 1)
|
||||
return snap7.util.get_bool(raw_data, 0, bit)
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
|
@ -860,9 +871,9 @@ class PLCClient:
|
|||
def _read_output_bit(self, offset: int, bit: int) -> bool:
|
||||
"""Read from Process Output Bits using correct area code (0x82)"""
|
||||
try:
|
||||
# Use snap7.types.Areas.PA (0x82) for Process Outputs
|
||||
# Use snap7.type.Areas.PA (0x82) for Process Outputs
|
||||
# read_area(area, dbnumber, start, size) - only 4 parameters!
|
||||
raw_data = self.plc.read_area(snap7.types.Areas.PA, 0, offset, 1)
|
||||
raw_data = self.plc.read_area(snap7.type.Areas.PA, 0, offset, 1)
|
||||
return snap7.util.get_bool(raw_data, 0, bit)
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
|
@ -1106,7 +1117,7 @@ class PLCClient:
|
|||
"batch_reader_initialized": self.batch_reader is not None,
|
||||
"inter_read_delay": self.inter_read_delay_seconds,
|
||||
}
|
||||
|
||||
|
||||
# Add detailed stats from the batch reader if available
|
||||
if self.batch_reader is not None:
|
||||
try:
|
||||
|
@ -1115,5 +1126,5 @@ class PLCClient:
|
|||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.warning(f"Error getting batch reader stats: {e}")
|
||||
|
||||
|
||||
return base_stats
|
||||
|
|
|
@ -598,7 +598,12 @@ class PLCDataStreamer:
|
|||
os.makedirs(records_path)
|
||||
|
||||
# Get disk usage for the drive where records are stored
|
||||
usage = psutil.disk_usage(os.path.abspath(records_path))
|
||||
# Use multiple fallback methods for Windows compatibility
|
||||
usage = self._get_disk_usage_safe(records_path)
|
||||
if usage is None:
|
||||
if hasattr(self, "logger"):
|
||||
self.logger.warning("Could not get disk usage information")
|
||||
return None
|
||||
|
||||
# Calculate average CSV file size (estimate based on active datasets)
|
||||
avg_file_size_per_hour = self._estimate_csv_size_per_hour()
|
||||
|
@ -633,6 +638,41 @@ class PLCDataStreamer:
|
|||
self.logger.error(f"Error calculating disk space: {e}")
|
||||
return None
|
||||
|
||||
def _get_disk_usage_safe(self, path):
|
||||
"""Get disk usage using shutil.disk_usage() - robust for Windows Python 3.12+"""
|
||||
from collections import namedtuple
|
||||
import os
|
||||
import shutil
|
||||
|
||||
DiskUsage = namedtuple("DiskUsage", ["total", "used", "free", "percent"])
|
||||
|
||||
try:
|
||||
# For Windows, always use the drive root (e.g., "D:\") for maximum compatibility
|
||||
# Extract drive letter from the given path
|
||||
drive = os.path.splitdrive(os.path.abspath(path))[0]
|
||||
|
||||
# Ensure we have a valid drive (e.g., "D:")
|
||||
if not drive or len(drive) < 2:
|
||||
raise ValueError(f"Invalid drive extracted from path: {path}")
|
||||
|
||||
# Create proper drive root path for Windows (e.g., "D:\")
|
||||
drive_root = drive + "\\"
|
||||
|
||||
# Use shutil.disk_usage() instead of psutil - avoids Python 3.12 Unicode API issues
|
||||
usage = shutil.disk_usage(drive_root)
|
||||
|
||||
return DiskUsage(
|
||||
total=usage.total,
|
||||
used=usage.used,
|
||||
free=usage.free,
|
||||
percent=round((usage.used / usage.total) * 100, 1),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if hasattr(self, "logger"):
|
||||
self.logger.error(f"Failed to get disk usage for drive root: {e}")
|
||||
return None
|
||||
|
||||
def _estimate_csv_size_per_hour(self) -> float:
|
||||
"""Estimate CSV file size per hour based on active datasets and variables"""
|
||||
try:
|
||||
|
|
|
@ -76,9 +76,11 @@ class RotatingFileHandler(logging.Handler):
|
|||
oldest_file = log_files.pop(0)
|
||||
try:
|
||||
os.remove(oldest_file)
|
||||
print(f"🗑️ Removed old log file: {os.path.basename(oldest_file)}")
|
||||
print(
|
||||
f"[CLEANUP] Removed old log file: {os.path.basename(oldest_file)}"
|
||||
)
|
||||
except OSError as e:
|
||||
print(f"⚠️ Could not remove {oldest_file}: {e}")
|
||||
print(f"[WARNING] Could not remove {oldest_file}: {e}")
|
||||
|
||||
def emit(self, record):
|
||||
"""Emitir un registro de log"""
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
{
|
||||
"events": [
|
||||
{
|
||||
"timestamp": "2025-08-22T16:33:02.020381",
|
||||
"level": "info",
|
||||
"event_type": "application_started",
|
||||
"message": "Application initialization completed successfully",
|
||||
"details": {}
|
||||
},
|
||||
{
|
||||
"timestamp": "2025-08-22T16:36:41.330445",
|
||||
"level": "info",
|
||||
"event_type": "udp_streaming_stopped",
|
||||
"message": "UDP streaming to PlotJuggler stopped (CSV recording continues)",
|
||||
"details": {}
|
||||
},
|
||||
{
|
||||
"timestamp": "2025-08-22T16:36:41.331526",
|
||||
"level": "info",
|
||||
"event_type": "csv_recording_stopped",
|
||||
"message": "🔥 CRITICAL: CSV recording stopped (dataset threads continue for UDP streaming)",
|
||||
"details": {
|
||||
"recording_protection": false,
|
||||
"performance_monitoring": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp": "2025-08-22T16:36:41.331526",
|
||||
"level": "info",
|
||||
"event_type": "udp_streaming_stopped",
|
||||
"message": "UDP streaming to PlotJuggler stopped (CSV recording continues)",
|
||||
"details": {}
|
||||
},
|
||||
{
|
||||
"timestamp": "2025-08-22T16:36:41.332612",
|
||||
"level": "info",
|
||||
"event_type": "plc_disconnection",
|
||||
"message": "Disconnected from PLC 192.168.1.100 (application shutdown (will auto-reconnect on restart))",
|
||||
"details": {}
|
||||
},
|
||||
{
|
||||
"timestamp": "2025-08-22T16:36:59.890526",
|
||||
"level": "info",
|
||||
"event_type": "application_started",
|
||||
"message": "Application initialization completed successfully",
|
||||
"details": {}
|
||||
},
|
||||
{
|
||||
"timestamp": "2025-08-22T16:38:28.043351",
|
||||
"level": "info",
|
||||
"event_type": "udp_streaming_stopped",
|
||||
"message": "UDP streaming to PlotJuggler stopped (CSV recording continues)",
|
||||
"details": {}
|
||||
},
|
||||
{
|
||||
"timestamp": "2025-08-22T16:38:28.044362",
|
||||
"level": "info",
|
||||
"event_type": "csv_recording_stopped",
|
||||
"message": "🔥 CRITICAL: CSV recording stopped (dataset threads continue for UDP streaming)",
|
||||
"details": {
|
||||
"recording_protection": false,
|
||||
"performance_monitoring": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"timestamp": "2025-08-22T16:38:28.045373",
|
||||
"level": "info",
|
||||
"event_type": "udp_streaming_stopped",
|
||||
"message": "UDP streaming to PlotJuggler stopped (CSV recording continues)",
|
||||
"details": {}
|
||||
},
|
||||
{
|
||||
"timestamp": "2025-08-22T16:38:28.045373",
|
||||
"level": "info",
|
||||
"event_type": "plc_disconnection",
|
||||
"message": "Disconnected from PLC 192.168.1.100 (application shutdown (will auto-reconnect on restart))",
|
||||
"details": {}
|
||||
}
|
||||
],
|
||||
"last_updated": "2025-08-22T16:38:28.045373",
|
||||
"total_entries": 10
|
||||
}
|
|
@ -663,7 +663,16 @@ const ChartjsPlot = ({ session, height = '400px' }) => {
|
|||
// Update lastPushedX tracking for streaming continuity
|
||||
const lastPoint = historicalPoints[historicalPoints.length - 1];
|
||||
if (lastPoint && typeof lastPoint.x === 'number') {
|
||||
sessionDataRef.current.lastPushedXByDataset.set(index, lastPoint.x);
|
||||
// Compensate for frontend processing delay to prevent gaps
|
||||
// Subtract 3 seconds (3000ms) to account for:
|
||||
// - Backend processing time
|
||||
// - HTTP transfer time
|
||||
// - Frontend processing time
|
||||
// - Streaming initialization delay
|
||||
const compensatedTimestamp = lastPoint.x - 3000;
|
||||
sessionDataRef.current.lastPushedXByDataset.set(index, compensatedTimestamp);
|
||||
|
||||
console.log(`📊 Historical data continuity: variable ${variableInfo.name}, last point: ${new Date(lastPoint.x).toISOString()}, compensated lastPushedX: ${new Date(compensatedTimestamp).toISOString()}`);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -1114,7 +1123,17 @@ const ChartjsPlot = ({ session, height = '400px' }) => {
|
|||
if (xNum > lastPushedX) newPoints.push({ x: xNum, y: yNum });
|
||||
}
|
||||
|
||||
if (newPoints.length === 0) return;
|
||||
if (newPoints.length === 0) {
|
||||
// Log when no new points are found for debugging gap issues
|
||||
if (backendDataset.data.length > 0) {
|
||||
const firstBackendPoint = backendDataset.data[0];
|
||||
const firstXNum = getXValueMs(firstBackendPoint);
|
||||
if (firstXNum !== null && firstXNum <= lastPushedX) {
|
||||
console.log(`📊 Streaming continuity: Dataset ${datasetIndex} - ${backendDataset.data.length} points filtered out (oldest: ${new Date(firstXNum).toISOString()}, lastPushedX: ${new Date(lastPushedX).toISOString()})`);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Sort by x and ensure monotonicity
|
||||
newPoints.sort((a, b) => a.x - b.x);
|
||||
|
@ -1138,6 +1157,13 @@ const ChartjsPlot = ({ session, height = '400px' }) => {
|
|||
pointsAdded++;
|
||||
}
|
||||
sessionData.lastPushedXByDataset.set(datasetIndex, lastX);
|
||||
|
||||
// Log successful streaming data ingestion for gap debugging
|
||||
if (newPoints.length > 0) {
|
||||
const firstNewPoint = newPoints[0];
|
||||
const lastNewPoint = newPoints[newPoints.length - 1];
|
||||
console.log(`📊 Streaming ingested: Dataset ${datasetIndex} - ${newPoints.length} points from ${new Date(firstNewPoint.x).toISOString()} to ${new Date(lastNewPoint.x).toISOString()}`);
|
||||
}
|
||||
});
|
||||
|
||||
// Update chart
|
||||
|
|
|
@ -415,13 +415,15 @@ export default function PlotHistoricalSession({
|
|||
const newStart = new Date(start)
|
||||
const newEnd = new Date(end)
|
||||
|
||||
// Calculate new central time (keep same range)
|
||||
// Calculate new central time AND update range based on actual visible range
|
||||
const newCentralTime = new Date((newStart.getTime() + newEnd.getTime()) / 2)
|
||||
const actualVisibleRangeSeconds = Math.floor((newEnd.getTime() - newStart.getTime()) / 1000)
|
||||
|
||||
console.log('📊 Pan: New central time:', newCentralTime)
|
||||
console.log('📊 Pan: New central time:', newCentralTime, 'Actual visible range:', actualVisibleRangeSeconds, 'seconds')
|
||||
|
||||
// Use debounced update (only central time, no range change)
|
||||
debouncedTimeChange(newCentralTime)
|
||||
// IMPORTANT: Update both central time AND range to match what's actually visible
|
||||
// This ensures that when data is reloaded, it uses the correct visible range
|
||||
debouncedTimeChange(newCentralTime, actualVisibleRangeSeconds)
|
||||
}
|
||||
|
||||
// Handle time change from TimePointSelector
|
||||
|
@ -442,7 +444,7 @@ export default function PlotHistoricalSession({
|
|||
const smallTextColor = useColorModeValue('gray.400', 'gray.500')
|
||||
|
||||
// Additional color mode values for conditional elements
|
||||
const whiteAlphaBg = useColorModeValue('whiteAlpha.800', 'blackAlpha.800')
|
||||
const whiteAlphaBg = useColorModeValue('whiteAlpha.400', 'blackAlpha.400')
|
||||
const inputBg = useColorModeValue('white', 'gray.700')
|
||||
const inputHoverBg = useColorModeValue('gray.50', 'gray.600')
|
||||
const editFieldColor = useColorModeValue('gray.900', 'gray.100')
|
||||
|
|
|
@ -72,7 +72,7 @@ export default function PlotRealtimeSession({
|
|||
const [isRefreshing, setIsRefreshing] = useState(false)
|
||||
const { isOpen: isFullscreen, onOpen: openFullscreen, onClose: closeFullscreen } = useDisclosure()
|
||||
const [localConfig, setLocalConfig] = useState({
|
||||
time_window: plotDefinition.time_window || 60,
|
||||
time_window: plotDefinition.time_window || 10,
|
||||
y_min: plotDefinition.y_min,
|
||||
y_max: plotDefinition.y_max,
|
||||
trigger_enabled: plotDefinition.trigger_enabled || false,
|
||||
|
@ -118,7 +118,7 @@ export default function PlotRealtimeSession({
|
|||
useEffect(() => {
|
||||
if (!applyingChangesRef.current) {
|
||||
setLocalConfig({
|
||||
time_window: plotDefinition.time_window || 60,
|
||||
time_window: plotDefinition.time_window || 10,
|
||||
y_min: plotDefinition.y_min,
|
||||
y_max: plotDefinition.y_max,
|
||||
trigger_enabled: plotDefinition.trigger_enabled || false,
|
||||
|
@ -216,7 +216,7 @@ export default function PlotRealtimeSession({
|
|||
browser_tab_id: browserTabId, // Include unique tab identifier
|
||||
name: plotDefinition.name,
|
||||
variables: variableNames,
|
||||
time_window: plotDefinition.time_window || 60,
|
||||
time_window: plotDefinition.time_window || 10,
|
||||
trigger_enabled: plotDefinition.trigger_enabled || false,
|
||||
trigger_variable: plotDefinition.trigger_variable,
|
||||
trigger_on_true: plotDefinition.trigger_on_true || true,
|
||||
|
@ -253,7 +253,7 @@ export default function PlotRealtimeSession({
|
|||
browser_tab_id: browserTabId, // Include unique tab identifier
|
||||
name: plotDefinition.name,
|
||||
variables: plotVariables.map(v => v.variable_name), // Simplified format
|
||||
time_window: localConfig.time_window,
|
||||
time_window: localConfig.time_window || 10,
|
||||
trigger_enabled: localConfig.trigger_enabled,
|
||||
trigger_variable: localConfig.trigger_variable,
|
||||
trigger_on_true: localConfig.trigger_on_true,
|
||||
|
@ -401,7 +401,7 @@ export default function PlotRealtimeSession({
|
|||
|
||||
const resetConfigChanges = () => {
|
||||
setLocalConfig({
|
||||
time_window: plotDefinition.time_window || 60,
|
||||
time_window: plotDefinition.time_window || 10,
|
||||
y_min: plotDefinition.y_min,
|
||||
y_max: plotDefinition.y_max,
|
||||
trigger_enabled: plotDefinition.trigger_enabled || false,
|
||||
|
@ -567,16 +567,16 @@ export default function PlotRealtimeSession({
|
|||
<FormControl>
|
||||
<FormLabel fontSize="sm">Time Window (seconds)</FormLabel>
|
||||
<NumberInput
|
||||
value={localConfig.time_window}
|
||||
value={localConfig.time_window || ''}
|
||||
onChange={(valueString) => setLocalConfig(prev => ({
|
||||
...prev,
|
||||
time_window: parseInt(valueString) || 60
|
||||
time_window: valueString === '' ? '' : (parseInt(valueString) || 10)
|
||||
}))}
|
||||
min={10}
|
||||
max={3600}
|
||||
size="sm"
|
||||
>
|
||||
<NumberInputField />
|
||||
<NumberInputField placeholder="10" />
|
||||
<NumberInputStepper>
|
||||
<NumberIncrementStepper />
|
||||
<NumberDecrementStepper />
|
||||
|
|
|
@ -0,0 +1,95 @@
|
|||
import React from 'react'
|
||||
import { SimpleGrid, Box, Heading, Text, Stack } from '@chakra-ui/react'
|
||||
|
||||
// ConditionalObjectFieldTemplate with PLC-specific field visibility logic
|
||||
// Hides/shows fields based on PLC memory area and data type rules
|
||||
export default function ConditionalObjectFieldTemplate(props) {
|
||||
const { TitleField, DescriptionField, title, description, properties = [], uiSchema, formData } = props
|
||||
const layout = uiSchema && uiSchema['ui:layout']
|
||||
|
||||
// Logic to determine if a field should be visible
|
||||
const shouldShowField = (fieldName) => {
|
||||
if (!formData) return true
|
||||
|
||||
const area = formData.area
|
||||
const type = formData.type
|
||||
|
||||
// DB Number field logic
|
||||
if (fieldName === 'db') {
|
||||
// Only show DB field when area is 'db'
|
||||
return area === 'db'
|
||||
}
|
||||
|
||||
// Bit Position field logic
|
||||
if (fieldName === 'bit') {
|
||||
// Only show bit field for boolean types
|
||||
return type === 'bool'
|
||||
}
|
||||
|
||||
// Show all other fields by default
|
||||
return true
|
||||
}
|
||||
|
||||
// Filter properties based on visibility rules
|
||||
const visibleProperties = properties.filter(prop => shouldShowField(prop.name))
|
||||
|
||||
if (!layout) {
|
||||
return (
|
||||
<Stack spacing={3}>
|
||||
{title && (
|
||||
TitleField ? (
|
||||
<TitleField id={`${props.idSchema.$id}__title`} title={title} />
|
||||
) : (
|
||||
<Heading as="h5" size="sm">{title}</Heading>
|
||||
)
|
||||
)}
|
||||
{description && (
|
||||
DescriptionField ? (
|
||||
<DescriptionField id={`${props.idSchema.$id}__desc`} description={description} />
|
||||
) : (
|
||||
<Text color="gray.500">{description}</Text>
|
||||
)
|
||||
)}
|
||||
<Stack spacing={2}>
|
||||
{visibleProperties.map((prop) => (
|
||||
<Box key={prop.name}>{prop.content}</Box>
|
||||
))}
|
||||
</Stack>
|
||||
</Stack>
|
||||
)
|
||||
}
|
||||
|
||||
// Map property name to its renderer
|
||||
const propMap = new Map(visibleProperties.map((p) => [p.name, p]))
|
||||
|
||||
return (
|
||||
<Stack spacing={3}>
|
||||
{title && (
|
||||
TitleField ? (
|
||||
<TitleField id={`${props.idSchema.$id}__title`} title={title} />
|
||||
) : (
|
||||
<Heading as="h5" size="sm">{title}</Heading>
|
||||
)
|
||||
)}
|
||||
{description && (
|
||||
DescriptionField ? (
|
||||
<DescriptionField id={`${props.idSchema.$id}__desc`} description={description} />
|
||||
) : (
|
||||
<Text color="gray.500">{description}</Text>
|
||||
)
|
||||
)}
|
||||
{layout.map((row, rowIdx) => (
|
||||
<SimpleGrid key={rowIdx} columns={12} spacing={3}>
|
||||
{row.map((cell, cellIdx) => {
|
||||
const prop = propMap.get(cell.name)
|
||||
if (!prop) return null
|
||||
const col = Math.min(Math.max(cell.width || 12, 1), 12)
|
||||
return (
|
||||
<Box key={`${rowIdx}-${cellIdx}`} gridColumn={`span ${col}`}>{prop.content}</Box>
|
||||
)
|
||||
})}
|
||||
</SimpleGrid>
|
||||
))}
|
||||
</Stack>
|
||||
)
|
||||
}
|
|
@ -86,7 +86,8 @@ export function PlcDataTypeWidget(props) {
|
|||
'uint': '16-bit',
|
||||
'udint': '32-bit',
|
||||
'sint': '8-bit',
|
||||
'usint': '8-bit'
|
||||
'usint': '8-bit',
|
||||
'dword': '32-bit'
|
||||
}
|
||||
|
||||
const typeColors = {
|
||||
|
@ -99,7 +100,8 @@ export function PlcDataTypeWidget(props) {
|
|||
'uint': 'green',
|
||||
'udint': 'green',
|
||||
'sint': 'green',
|
||||
'usint': 'green'
|
||||
'usint': 'green',
|
||||
'dword': 'orange'
|
||||
}
|
||||
|
||||
return (
|
||||
|
|
|
@ -112,7 +112,15 @@ export function useCoordinatedPolling(source, fetchFunction, interval = 5000, de
|
|||
let intervalId = null
|
||||
let isActive = true
|
||||
let consecutiveErrors = 0
|
||||
const maxConsecutiveErrors = 3
|
||||
let currentInterval = interval
|
||||
|
||||
const scheduleNextPoll = (delay) => {
|
||||
if (!isActive) return
|
||||
if (intervalId) {
|
||||
clearTimeout(intervalId)
|
||||
}
|
||||
intervalId = setTimeout(poll, delay)
|
||||
}
|
||||
|
||||
const poll = async () => {
|
||||
if (!isActive) return
|
||||
|
@ -120,35 +128,43 @@ export function useCoordinatedPolling(source, fetchFunction, interval = 5000, de
|
|||
const data = await fetchFunction()
|
||||
if (isActive) {
|
||||
consecutiveErrors = 0
|
||||
currentInterval = interval // Reset to normal interval
|
||||
setConnectionError(null)
|
||||
onData(data)
|
||||
// Schedule next poll at normal interval
|
||||
scheduleNextPoll(interval)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Polling error for ${source}:`, error)
|
||||
consecutiveErrors++
|
||||
|
||||
if (consecutiveErrors >= maxConsecutiveErrors) {
|
||||
if (consecutiveErrors >= 3) {
|
||||
setConnectionError(error)
|
||||
// Stop polling after too many consecutive errors
|
||||
if (intervalId) {
|
||||
clearInterval(intervalId)
|
||||
intervalId = null
|
||||
}
|
||||
// Use exponential backoff instead of stopping completely
|
||||
// Start with 10 seconds, max out at 30 seconds
|
||||
currentInterval = Math.min(10000 + (consecutiveErrors - 3) * 5000, 30000)
|
||||
console.log(`${source}: Using backoff interval ${currentInterval}ms after ${consecutiveErrors} errors`)
|
||||
} else {
|
||||
// For first few errors, keep normal interval
|
||||
currentInterval = interval
|
||||
}
|
||||
|
||||
// Always continue polling, even with errors
|
||||
if (isActive) {
|
||||
scheduleNextPoll(currentInterval)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Poll inicial
|
||||
poll()
|
||||
|
||||
// Configurar intervalo
|
||||
intervalId = setInterval(poll, interval)
|
||||
|
||||
return {
|
||||
close: () => {
|
||||
isActive = false
|
||||
if (intervalId) {
|
||||
clearInterval(intervalId)
|
||||
clearTimeout(intervalId)
|
||||
intervalId = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1374,129 +1374,56 @@ function DatasetManager() {
|
|||
/>
|
||||
</Flex>
|
||||
|
||||
{/* Simplified schema for selected dataset variables */}
|
||||
{/* Schema for selected dataset variables - derived from external schema */}
|
||||
{(() => {
|
||||
const selectedDatasetVars = getSelectedDatasetVariables()
|
||||
|
||||
// Schema for this dataset's variables
|
||||
const singleDatasetSchema = {
|
||||
type: "object",
|
||||
properties: {
|
||||
variables: {
|
||||
type: "array",
|
||||
title: `Variables for Dataset: ${availableDatasets.find(d => d.id === selectedDatasetId)?.name || selectedDatasetId}`,
|
||||
description: `PLC variables to record in dataset ${selectedDatasetId}`,
|
||||
items: {
|
||||
type: "object",
|
||||
properties: {
|
||||
configType: {
|
||||
type: "string",
|
||||
title: "Configuration Type",
|
||||
enum: ["manual", "symbol"],
|
||||
default: "manual"
|
||||
}
|
||||
},
|
||||
allOf: [
|
||||
{
|
||||
if: { properties: { configType: { const: "manual" } } },
|
||||
then: {
|
||||
properties: {
|
||||
name: { type: "string", title: "Variable Name" },
|
||||
area: {
|
||||
type: "string",
|
||||
title: "Memory Area",
|
||||
enum: ["db", "mw", "m", "pew", "pe", "paw", "pa", "e", "a", "mb"],
|
||||
default: "db"
|
||||
},
|
||||
db: { type: "integer", title: "DB Number", minimum: 1, maximum: 9999 },
|
||||
offset: { type: "integer", title: "Offset", minimum: 0, maximum: 8191 },
|
||||
bit: { type: "integer", title: "Bit Position", minimum: 0, maximum: 7 },
|
||||
type: {
|
||||
type: "string",
|
||||
title: "Data Type",
|
||||
enum: ["real", "int", "dint", "bool", "word", "byte"],
|
||||
default: "real"
|
||||
},
|
||||
streaming: { type: "boolean", title: "Stream to UDP", default: false }
|
||||
},
|
||||
required: ["name", "area", "offset", "type"]
|
||||
}
|
||||
},
|
||||
{
|
||||
if: { properties: { configType: { const: "symbol" } } },
|
||||
then: {
|
||||
properties: {
|
||||
name: {
|
||||
type: "string",
|
||||
title: "Variable Name",
|
||||
description: "Auto-filled from symbol",
|
||||
readOnly: true
|
||||
},
|
||||
symbol: {
|
||||
type: "string",
|
||||
title: "PLC Symbol",
|
||||
description: "Select a symbol from loaded ASC file"
|
||||
},
|
||||
streaming: { type: "boolean", title: "Stream to UDP", default: false }
|
||||
},
|
||||
required: ["symbol"]
|
||||
}
|
||||
}
|
||||
]
|
||||
// Create simplified schema from external schema for single dataset variables
|
||||
let singleDatasetSchema = null
|
||||
let singleDatasetUiSchema = null
|
||||
|
||||
if (variablesSchemaData?.schema) {
|
||||
// Extract the variables array schema from the external schema
|
||||
// Path: schema.properties.variables.items.properties.variables
|
||||
const datasetItemSchema = variablesSchemaData.schema.properties?.variables?.items
|
||||
const variablesArraySchema = datasetItemSchema?.properties?.variables
|
||||
|
||||
if (variablesArraySchema) {
|
||||
singleDatasetSchema = {
|
||||
type: "object",
|
||||
properties: {
|
||||
variables: {
|
||||
...variablesArraySchema,
|
||||
title: `Variables for Dataset: ${availableDatasets.find(d => d.id === selectedDatasetId)?.name || selectedDatasetId}`,
|
||||
description: `PLC variables to record in dataset ${selectedDatasetId}`
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const singleDatasetUiSchema = {
|
||||
variables: {
|
||||
items: {
|
||||
"ui:order": ["configType", "name", "symbol", "area", "db", "offset", "bit", "type", "streaming"],
|
||||
"ui:layout": [
|
||||
[
|
||||
{ "name": "configType", "width": 3 }
|
||||
],
|
||||
[
|
||||
{ "name": "name", "width": 4 },
|
||||
{ "name": "symbol", "width": 8 }
|
||||
],
|
||||
[
|
||||
{ "name": "area", "width": 2 },
|
||||
{ "name": "db", "width": 2 },
|
||||
{ "name": "offset", "width": 2 },
|
||||
{ "name": "bit", "width": 2 },
|
||||
{ "name": "type", "width": 2 },
|
||||
{ "name": "streaming", "width": 2 }
|
||||
]
|
||||
],
|
||||
"configType": {
|
||||
"ui:widget": "select",
|
||||
"ui:help": "Choose between manual configuration or symbol-based setup",
|
||||
"ui:enumNames": ["Manual Configuration", "Symbol-based Configuration"]
|
||||
},
|
||||
"symbol": {
|
||||
"ui:widget": "symbol-selector",
|
||||
"ui:placeholder": "Select a PLC symbol...",
|
||||
"ui:help": "🔍 Search and select a symbol from the loaded ASC file"
|
||||
},
|
||||
"name": {
|
||||
"ui:help": "Human-readable name for this variable"
|
||||
},
|
||||
"area": {
|
||||
"ui:widget": "select",
|
||||
"ui:help": "PLC memory area"
|
||||
},
|
||||
"type": {
|
||||
"ui:widget": "select",
|
||||
"ui:help": "PLC data type"
|
||||
},
|
||||
"streaming": {
|
||||
"ui:widget": "switch",
|
||||
"ui:help": "Enable UDP streaming to PlotJuggler"
|
||||
}
|
||||
|
||||
if (variablesSchemaData?.uiSchema) {
|
||||
// Extract the variables UI schema from the external UI schema
|
||||
// Path: uiSchema.variables.items.variables
|
||||
const datasetItemUiSchema = variablesSchemaData.uiSchema.variables?.items
|
||||
const variablesUiSchema = datasetItemUiSchema?.variables
|
||||
|
||||
if (variablesUiSchema) {
|
||||
singleDatasetUiSchema = {
|
||||
variables: variablesUiSchema
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback if external schemas are not available
|
||||
if (!singleDatasetSchema || !singleDatasetUiSchema) {
|
||||
return (
|
||||
<Alert status="warning">
|
||||
<AlertIcon />
|
||||
<Text>External schemas not loaded. Please refresh the page.</Text>
|
||||
</Alert>
|
||||
)
|
||||
}
|
||||
|
||||
// Function to expand symbol data using backend API
|
||||
const expandSymbolToManualConfig = async (symbolName, currentVariable = {}) => {
|
||||
|
@ -1504,8 +1431,7 @@ function DatasetManager() {
|
|||
// Create a temporary variable array with just this symbol
|
||||
const tempVariables = [{
|
||||
symbol: symbolName,
|
||||
streaming: currentVariable.streaming || false,
|
||||
configType: "symbol"
|
||||
streaming: currentVariable.streaming || false
|
||||
}]
|
||||
|
||||
// Call backend API to process the symbol
|
||||
|
@ -1527,7 +1453,7 @@ function DatasetManager() {
|
|||
// Build the configuration object, only including relevant fields
|
||||
const config = {
|
||||
name: processedVar.name || symbolName,
|
||||
area: processedVar.area || "db",
|
||||
area: processedVar.area || "DB",
|
||||
offset: processedVar.offset !== undefined && processedVar.offset !== null ? processedVar.offset : 0,
|
||||
type: processedVar.type || "real",
|
||||
streaming: currentVariable.streaming || false
|
||||
|
@ -1536,7 +1462,7 @@ function DatasetManager() {
|
|||
// Only include db field if it's actually present and area requires it
|
||||
if (processedVar.db !== undefined && processedVar.db !== null) {
|
||||
config.db = processedVar.db
|
||||
} else if (config.area === "db") {
|
||||
} else if (config.area === "DB") {
|
||||
// Default to 1 only for DB area if no DB number was provided
|
||||
config.db = 1
|
||||
}
|
||||
|
@ -1554,7 +1480,7 @@ function DatasetManager() {
|
|||
// If backend processing failed, return basic defaults
|
||||
const fallbackConfig = {
|
||||
name: currentVariable.name || symbolName,
|
||||
area: "db", // Default to DB area
|
||||
area: "DB", // Default to DB area
|
||||
offset: 0,
|
||||
type: "real",
|
||||
bit: 0,
|
||||
|
@ -1562,7 +1488,7 @@ function DatasetManager() {
|
|||
}
|
||||
|
||||
// Only add db field for DB area
|
||||
if (fallbackConfig.area === "db") {
|
||||
if (fallbackConfig.area === "DB") {
|
||||
fallbackConfig.db = 1
|
||||
}
|
||||
|
||||
|
@ -1573,7 +1499,7 @@ function DatasetManager() {
|
|||
// Return basic defaults on error
|
||||
const errorConfig = {
|
||||
name: currentVariable.name || symbolName,
|
||||
area: "db", // Default to DB area
|
||||
area: "DB", // Default to DB area
|
||||
offset: 0,
|
||||
type: "real",
|
||||
bit: 0,
|
||||
|
@ -1581,7 +1507,7 @@ function DatasetManager() {
|
|||
}
|
||||
|
||||
// Only add db field for DB area
|
||||
if (errorConfig.area === "db") {
|
||||
if (errorConfig.area === "DB") {
|
||||
errorConfig.db = 1
|
||||
}
|
||||
|
||||
|
@ -1589,68 +1515,10 @@ function DatasetManager() {
|
|||
}
|
||||
}
|
||||
|
||||
// Custom onChange handler that detects configType changes and auto-fills data
|
||||
const handleFormChange = async ({ formData }) => {
|
||||
// Check if there are variables and if any configType changed from symbol to manual
|
||||
if (formData?.variables && selectedDatasetVars?.variables) {
|
||||
const updatedVariables = []
|
||||
let hasSymbolToManualChange = false
|
||||
|
||||
for (let index = 0; index < formData.variables.length; index++) {
|
||||
const newVar = formData.variables[index]
|
||||
const oldVar = selectedDatasetVars.variables[index]
|
||||
|
||||
// Detect if configType changed from "symbol" to "manual"
|
||||
if (oldVar?.configType === "symbol" &&
|
||||
newVar?.configType === "manual" &&
|
||||
oldVar?.symbol) {
|
||||
|
||||
hasSymbolToManualChange = true
|
||||
|
||||
try {
|
||||
// Auto-fill manual fields from symbol data using backend API
|
||||
const symbolData = await expandSymbolToManualConfig(oldVar.symbol, oldVar)
|
||||
|
||||
// Add the variable with auto-filled data
|
||||
updatedVariables.push({
|
||||
...newVar,
|
||||
...symbolData,
|
||||
configType: "manual", // Ensure configType is set correctly
|
||||
symbol: undefined // Clear symbol field to avoid confusion
|
||||
})
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error expanding symbol:', error)
|
||||
// Fallback: add variable as-is on error
|
||||
updatedVariables.push(newVar)
|
||||
}
|
||||
} else {
|
||||
// For other cases, return the variable as-is
|
||||
updatedVariables.push(newVar)
|
||||
}
|
||||
}
|
||||
|
||||
if (hasSymbolToManualChange) {
|
||||
// Show toast notification about the auto-fill
|
||||
toast({
|
||||
title: '🔄 Auto-filled from symbol',
|
||||
description: 'Symbol data has been copied to manual configuration fields',
|
||||
status: 'success',
|
||||
duration: 3000
|
||||
})
|
||||
}
|
||||
|
||||
// Update with the modified variables
|
||||
const updatedFormData = {
|
||||
...formData,
|
||||
variables: updatedVariables
|
||||
}
|
||||
|
||||
updateSelectedDatasetVariables(updatedFormData)
|
||||
} else {
|
||||
// Normal update without special processing
|
||||
updateSelectedDatasetVariables(formData)
|
||||
}
|
||||
// Standard form change handler for external schema compatibility
|
||||
const handleFormChange = ({ formData }) => {
|
||||
// Direct update without special processing for external schema compatibility
|
||||
updateSelectedDatasetVariables(formData)
|
||||
}
|
||||
|
||||
return (
|
||||
|
@ -1669,12 +1537,7 @@ function DatasetManager() {
|
|||
})
|
||||
}}
|
||||
onChange={({ formData }) => {
|
||||
// Call the async handler
|
||||
handleFormChange({ formData }).catch(error => {
|
||||
console.error('Error in form change handler:', error)
|
||||
// Fallback to normal update on error
|
||||
updateSelectedDatasetVariables(formData)
|
||||
})
|
||||
handleFormChange({ formData })
|
||||
}}
|
||||
>
|
||||
<VStack spacing={3} mt={4} align="stretch">
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"last_state": {
|
||||
"should_connect": false,
|
||||
"should_stream": false,
|
||||
"active_datasets": []
|
||||
},
|
||||
"auto_recovery_enabled": true,
|
||||
"last_update": "2025-08-22T16:38:28.045373"
|
||||
}
|
|
@ -0,0 +1,267 @@
|
|||
# -*- mode: python ; coding: utf-8 -*-
|
||||
import os
|
||||
import sys
|
||||
|
||||
block_cipher = None
|
||||
|
||||
# Analysis for main application (backend)
|
||||
a_main = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
binaries=[
|
||||
# Include snap7.dll - now confirmed to be in project root
|
||||
('snap7.dll', '.'),
|
||||
],
|
||||
datas=[
|
||||
# Include the entire frontend build
|
||||
('frontend/dist', 'frontend/dist'),
|
||||
|
||||
# Include configuration directories and schemas
|
||||
('config', 'config'),
|
||||
|
||||
# Include core modules
|
||||
('core', 'core'),
|
||||
|
||||
# Include utils
|
||||
('utils', 'utils'),
|
||||
|
||||
# Include translation files
|
||||
('translation.json', '.'),
|
||||
('i18n.js', '.'),
|
||||
|
||||
],
|
||||
hiddenimports=[
|
||||
# Flask and web dependencies
|
||||
'jinja2.ext',
|
||||
'flask',
|
||||
'flask_cors',
|
||||
'flask_socketio',
|
||||
'socketio',
|
||||
'werkzeug',
|
||||
|
||||
# JSON Schema validation
|
||||
'jsonschema',
|
||||
'jsonschema.validators',
|
||||
'jsonschema._format',
|
||||
'jsonschema._types',
|
||||
|
||||
# PLC and system dependencies
|
||||
'snap7',
|
||||
'psutil._pswindows',
|
||||
'psutil._psutil_windows',
|
||||
|
||||
# Data processing
|
||||
'pandas',
|
||||
'numpy',
|
||||
|
||||
# Threading and networking
|
||||
'threading',
|
||||
'socket',
|
||||
'json',
|
||||
'csv',
|
||||
'datetime',
|
||||
'pathlib',
|
||||
|
||||
# Core modules (explicit imports)
|
||||
'core.config_manager',
|
||||
'core.plc_client',
|
||||
'core.plc_data_streamer',
|
||||
'core.event_logger',
|
||||
'core.instance_manager',
|
||||
'core.schema_manager',
|
||||
'core.streamer',
|
||||
'core.plot_manager',
|
||||
'core.historical_cache',
|
||||
'core.performance_monitor',
|
||||
'core.priority_manager',
|
||||
'core.rotating_logger',
|
||||
|
||||
# Utils modules
|
||||
'utils.csv_validator',
|
||||
'utils.json_manager',
|
||||
'utils.symbol_loader',
|
||||
'utils.symbol_processor',
|
||||
'utils.instance_manager',
|
||||
],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[
|
||||
# Exclude unnecessary packages to reduce size
|
||||
'matplotlib',
|
||||
'scipy',
|
||||
'IPython',
|
||||
'notebook',
|
||||
'jupyter',
|
||||
'tests',
|
||||
'unittest',
|
||||
'pydoc',
|
||||
'doctest',
|
||||
],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False,
|
||||
)
|
||||
|
||||
# Analysis for backend manager (watchdog)
|
||||
a_manager = Analysis(
|
||||
['backmanager.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[
|
||||
# Include utils for instance management
|
||||
('utils', 'utils'),
|
||||
],
|
||||
hiddenimports=[
|
||||
# System and monitoring dependencies
|
||||
'psutil',
|
||||
'psutil._pswindows',
|
||||
'psutil._psutil_windows',
|
||||
'requests',
|
||||
'subprocess',
|
||||
'logging',
|
||||
'json',
|
||||
|
||||
# Utils modules needed by manager
|
||||
'utils.instance_manager',
|
||||
],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[
|
||||
# Exclude heavy packages not needed by manager
|
||||
'matplotlib',
|
||||
'scipy',
|
||||
'IPython',
|
||||
'notebook',
|
||||
'jupyter',
|
||||
'flask',
|
||||
'snap7',
|
||||
'pandas',
|
||||
'numpy',
|
||||
],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False,
|
||||
)
|
||||
|
||||
# Build PYZ files
|
||||
pyz_main = PYZ(a_main.pure, a_main.zipped_data, cipher=block_cipher)
|
||||
pyz_manager = PYZ(a_manager.pure, a_manager.zipped_data, cipher=block_cipher)
|
||||
|
||||
# Build main backend executable
|
||||
exe_main = EXE(
|
||||
pyz_main,
|
||||
a_main.scripts,
|
||||
[],
|
||||
exclude_binaries=True,
|
||||
name='S7_Streamer_Logger',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
console=True, # True para ver los logs del servidor en una consola.
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
|
||||
# Build backend manager executable
|
||||
exe_manager = EXE(
|
||||
pyz_manager,
|
||||
a_manager.scripts,
|
||||
[],
|
||||
exclude_binaries=True,
|
||||
name='Backend_Manager',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
console=True,
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
|
||||
# Collect all files together - Only include executables and shared dependencies
|
||||
coll = COLLECT(
|
||||
exe_main,
|
||||
exe_manager,
|
||||
a_main.binaries,
|
||||
a_main.zipfiles,
|
||||
a_main.datas,
|
||||
# Don't duplicate manager dependencies since they're minimal
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
name='main'
|
||||
)
|
||||
|
||||
# Post-build: Copy config directory to the same level as the executable
|
||||
import shutil
|
||||
import os
|
||||
|
||||
def copy_config_external():
|
||||
"""Copy config directory to external location for runtime access"""
|
||||
try:
|
||||
# Get absolute paths
|
||||
current_dir = os.path.abspath('.')
|
||||
source_config = os.path.join(current_dir, 'config')
|
||||
dist_main_dir = os.path.join(current_dir, 'dist', 'main')
|
||||
dest_config = os.path.join(dist_main_dir, 'config')
|
||||
|
||||
print(f"Current directory: {current_dir}")
|
||||
print(f"Source config: {source_config}")
|
||||
print(f"Destination config: {dest_config}")
|
||||
|
||||
# Ensure dist/main directory exists
|
||||
os.makedirs(dist_main_dir, exist_ok=True)
|
||||
|
||||
# Remove existing config if present
|
||||
if os.path.exists(dest_config):
|
||||
shutil.rmtree(dest_config)
|
||||
print(f"Removed existing config at: {dest_config}")
|
||||
|
||||
# Copy config directory to dist/main/config
|
||||
if os.path.exists(source_config):
|
||||
shutil.copytree(source_config, dest_config)
|
||||
print(f"✓ Config directory copied to: {dest_config}")
|
||||
return True
|
||||
else:
|
||||
print(f"✗ Source config directory not found: {source_config}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Error copying config directory: {e}")
|
||||
return False
|
||||
|
||||
# Execute the copy operation
|
||||
copy_config_external()
|
||||
|
||||
def config_path(relative_path):
|
||||
"""Get path to config file, checking external location first when running as executable"""
|
||||
if getattr(sys, 'frozen', False):
|
||||
# Running as executable - config should be at same level as executable
|
||||
executable_dir = os.path.dirname(sys.executable)
|
||||
external_config = os.path.join(executable_dir, 'config', relative_path)
|
||||
|
||||
if os.path.exists(external_config):
|
||||
return external_config
|
||||
|
||||
# Fallback to internal config within _internal
|
||||
internal_config = os.path.join(executable_dir, '_internal', 'config', relative_path)
|
||||
if os.path.exists(internal_config):
|
||||
return internal_config
|
||||
|
||||
raise FileNotFoundError(f"Configuration file not found: {relative_path}")
|
||||
else:
|
||||
# Running as script - use standard path
|
||||
base_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
project_root = os.path.dirname(base_dir)
|
||||
return os.path.join(project_root, 'config', relative_path)
|
377
main.py
377
main.py
|
@ -10,6 +10,7 @@ import json
|
|||
import time
|
||||
import signal
|
||||
import sys
|
||||
import requests # For HTTP health checks
|
||||
from datetime import datetime, timedelta, timezone
|
||||
import os
|
||||
import logging
|
||||
|
@ -44,62 +45,74 @@ except ImportError:
|
|||
TKINTER_AVAILABLE = False
|
||||
print("Warning: tkinter not available. File browse functionality will be limited.")
|
||||
|
||||
# System Tray Icon imports
|
||||
try:
|
||||
import pystray
|
||||
from PIL import Image
|
||||
import threading
|
||||
|
||||
TRAY_AVAILABLE = True
|
||||
except ImportError:
|
||||
TRAY_AVAILABLE = False
|
||||
print(
|
||||
"Warning: pystray/PIL not available. System tray functionality will be disabled."
|
||||
)
|
||||
|
||||
# Import core modules
|
||||
from core import PLCDataStreamer
|
||||
from core.historical_cache import HistoricalDataCache
|
||||
from utils.json_manager import JSONManager, SchemaManager
|
||||
from utils.symbol_loader import SymbolLoader
|
||||
from utils.symbol_processor import SymbolProcessor
|
||||
from utils.instance_manager import InstanceManager
|
||||
|
||||
|
||||
def check_for_running_instance_early():
|
||||
def check_backend_instance_robust(
|
||||
port: int = 5050, lock_file: str = "plc_streamer.lock"
|
||||
):
|
||||
"""
|
||||
Optional early check for running instance before initializing PLCDataStreamer.
|
||||
This provides faster feedback to the user without going through full initialization.
|
||||
🔒 ROBUST INSTANCE CHECK - HTTP + PID based verification
|
||||
|
||||
This function provides a more reliable way to detect existing backend instances:
|
||||
1. Double HTTP health check with 5-second interval
|
||||
2. PID verification and zombie process cleanup
|
||||
3. Automatic lock file management
|
||||
|
||||
Args:
|
||||
port: Backend server port (default: 5050)
|
||||
lock_file: Lock file path (default: "plc_streamer.lock")
|
||||
|
||||
Returns:
|
||||
Tuple[bool, str]: (can_proceed, message)
|
||||
- can_proceed: True if this instance can start safely
|
||||
- message: Detailed status message
|
||||
"""
|
||||
import psutil
|
||||
|
||||
lock_file = "plc_streamer.lock"
|
||||
|
||||
if not os.path.exists(lock_file):
|
||||
return True # No lock file, safe to proceed
|
||||
print("🔍 Starting robust backend instance verification...")
|
||||
|
||||
try:
|
||||
with open(lock_file, "r") as f:
|
||||
old_pid = int(f.read().strip())
|
||||
# Initialize instance manager
|
||||
instance_manager = InstanceManager(port=port, lock_file=lock_file)
|
||||
|
||||
if psutil.pid_exists(old_pid):
|
||||
proc = psutil.Process(old_pid)
|
||||
cmdline = " ".join(proc.cmdline())
|
||||
# Perform comprehensive instance check
|
||||
can_proceed, message = instance_manager.check_and_handle_existing_instance()
|
||||
|
||||
# Check if it's really our application
|
||||
if (
|
||||
("main.py" in cmdline and "S7_snap7_Stremer_n_Log" in cmdline)
|
||||
or ("plc_streamer" in cmdline.lower())
|
||||
or ("PLCDataStreamer" in cmdline)
|
||||
):
|
||||
print(f"🚫 Another instance is already running (PID: {old_pid})")
|
||||
print(f"📋 Process: {proc.name()}")
|
||||
print(f"💻 Command: {cmdline}")
|
||||
return False
|
||||
if can_proceed:
|
||||
print(f"✅ {message}")
|
||||
print("🔒 Initializing new backend instance...")
|
||||
|
||||
# Process not running or different process, remove stale lock
|
||||
os.remove(lock_file)
|
||||
print(f"🧹 Removed stale lock file")
|
||||
return True
|
||||
# Create lock file for this instance
|
||||
if not instance_manager.initialize_instance():
|
||||
return False, "❌ Failed to create instance lock file"
|
||||
|
||||
return True, "✅ Backend instance ready to start"
|
||||
else:
|
||||
print(f"🚫 {message}")
|
||||
return False, message
|
||||
|
||||
except (ValueError, psutil.NoSuchProcess, psutil.AccessDenied, FileNotFoundError):
|
||||
# Invalid or inaccessible, remove lock file if exists
|
||||
if os.path.exists(lock_file):
|
||||
try:
|
||||
os.remove(lock_file)
|
||||
print(f"🧹 Removed invalid lock file")
|
||||
except:
|
||||
pass
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error checking instance: {e}")
|
||||
return True # On error, allow to proceed
|
||||
error_msg = f"❌ Error during instance verification: {e}"
|
||||
print(error_msg)
|
||||
return False, error_msg
|
||||
|
||||
|
||||
app = Flask(__name__)
|
||||
|
@ -3197,7 +3210,7 @@ def stream_status():
|
|||
|
||||
|
||||
def graceful_shutdown():
|
||||
"""Perform graceful shutdown"""
|
||||
"""Perform graceful shutdown with robust instance cleanup"""
|
||||
print("\n⏹️ Performing graceful shutdown...")
|
||||
try:
|
||||
if streamer is not None:
|
||||
|
@ -3223,6 +3236,27 @@ def graceful_shutdown():
|
|||
else:
|
||||
print("⚠️ Streamer not initialized, skipping shutdown steps")
|
||||
|
||||
# 🔒 ROBUST CLEANUP: Use instance manager for reliable lock file cleanup
|
||||
print("🧹 Cleaning up instance lock file...")
|
||||
try:
|
||||
instance_manager = InstanceManager(port=5050, lock_file="plc_streamer.lock")
|
||||
if instance_manager.cleanup_instance():
|
||||
print("✅ Instance lock file cleaned up successfully")
|
||||
else:
|
||||
print("⚠️ Warning: Instance lock file cleanup had issues")
|
||||
except Exception as cleanup_error:
|
||||
print(f"⚠️ Error during instance cleanup: {cleanup_error}")
|
||||
# Fallback to direct file removal
|
||||
try:
|
||||
import os
|
||||
|
||||
lock_file = "plc_streamer.lock"
|
||||
if os.path.exists(lock_file):
|
||||
os.remove(lock_file)
|
||||
print(f"🧹 Emergency cleanup: Removed lock file directly")
|
||||
except:
|
||||
pass # Silent fail for emergency cleanup
|
||||
|
||||
print("📝 Closing rotating logger system...")
|
||||
# 📝 Close rotating logger system
|
||||
backend_logger.close()
|
||||
|
@ -3250,8 +3284,62 @@ def signal_handler(sig, frame):
|
|||
sys.exit(0)
|
||||
|
||||
|
||||
# Global variables for Flask and tray management
|
||||
flask_thread = None
|
||||
tray_icon = None
|
||||
|
||||
|
||||
def open_app_browser(icon, item):
|
||||
"""Open application in web browser"""
|
||||
import webbrowser
|
||||
|
||||
webbrowser.open("http://localhost:5050")
|
||||
|
||||
|
||||
def shutdown_from_tray(icon, item):
|
||||
"""Shutdown Flask server from tray menu"""
|
||||
print("🔄 Shutdown requested from system tray...")
|
||||
graceful_shutdown()
|
||||
if tray_icon:
|
||||
tray_icon.stop()
|
||||
|
||||
|
||||
def exit_application(icon, item):
|
||||
"""Exit entire application from tray menu"""
|
||||
print("🚪 Exit requested from system tray...")
|
||||
graceful_shutdown()
|
||||
if tray_icon:
|
||||
tray_icon.stop()
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def run_flask_app():
|
||||
"""Run Flask application in a separate thread"""
|
||||
global streamer
|
||||
try:
|
||||
print("🚀 Starting Flask server for PLC S7-315 Streamer")
|
||||
print("📊 Web interface available at: http://localhost:5050")
|
||||
print("🔧 Configure your PLC and variables through the web interface")
|
||||
|
||||
# Initialize streamer (this will handle instance locking and auto-recovery)
|
||||
streamer = PLCDataStreamer()
|
||||
|
||||
# Start Flask application
|
||||
app.run(
|
||||
debug=False,
|
||||
host="0.0.0.0",
|
||||
port=5050,
|
||||
use_reloader=False,
|
||||
threaded=True,
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"💥 Flask error: {e}")
|
||||
|
||||
|
||||
def main():
|
||||
"""Main application entry point with error handling and recovery"""
|
||||
"""Main application entry point with system tray support"""
|
||||
global flask_thread, tray_icon, streamer
|
||||
|
||||
# Setup signal handlers for graceful shutdown
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
@ -3259,52 +3347,161 @@ def main():
|
|||
max_retries = 3
|
||||
retry_count = 0
|
||||
|
||||
while retry_count < max_retries:
|
||||
# Check if tray is available and try to setup system tray
|
||||
if TRAY_AVAILABLE:
|
||||
try:
|
||||
print("🚀 Starting Flask server for PLC S7-315 Streamer")
|
||||
print("📊 Web interface available at: http://localhost:5050")
|
||||
print("🔧 Configure your PLC and variables through the web interface")
|
||||
# Start Flask in a separate thread
|
||||
flask_thread = threading.Thread(target=run_flask_app, daemon=True)
|
||||
flask_thread.start()
|
||||
|
||||
# Initialize streamer (this will handle instance locking and auto-recovery)
|
||||
global streamer
|
||||
# Give Flask time to start
|
||||
time.sleep(2)
|
||||
|
||||
# Start Flask application
|
||||
app.run(
|
||||
debug=False,
|
||||
host="0.0.0.0",
|
||||
port=5050,
|
||||
use_reloader=False,
|
||||
threaded=True,
|
||||
)
|
||||
|
||||
# If we reach here, the server stopped normally
|
||||
break
|
||||
|
||||
except RuntimeError as e:
|
||||
if "Another instance" in str(e):
|
||||
print(f"❌ {e}")
|
||||
print("💡 Tip: Stop the other instance or wait for it to finish")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(f"⚠️ Runtime error: {e}")
|
||||
retry_count += 1
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n⏸️ Received interrupt signal...")
|
||||
graceful_shutdown()
|
||||
break
|
||||
# Setup and run the system tray icon
|
||||
icon_path = project_path("frontend", "src", "assets", "logo", "record.png")
|
||||
try:
|
||||
image = Image.open(icon_path)
|
||||
menu = pystray.Menu(
|
||||
pystray.MenuItem(
|
||||
"🌐 Abrir PLC Streamer", open_app_browser, default=True
|
||||
),
|
||||
pystray.MenuItem("🛑 Cerrar servidor", shutdown_from_tray),
|
||||
pystray.MenuItem("🚪 Salir", exit_application),
|
||||
)
|
||||
tray_icon = pystray.Icon(
|
||||
"PLC S7-315 Streamer", image, "PLC S7-315 Streamer & Logger", menu
|
||||
)
|
||||
print("🎯 Starting system tray icon...")
|
||||
tray_icon.run() # This blocks the main thread until icon.stop() is called
|
||||
except FileNotFoundError:
|
||||
print(
|
||||
f"⚠️ Error: Icon not found at '{icon_path}'. System tray will not start."
|
||||
)
|
||||
print(
|
||||
"🔧 The Flask application will continue running in background. Press Ctrl+C to stop."
|
||||
)
|
||||
# Keep the main thread alive so the Flask thread doesn't exit immediately
|
||||
try:
|
||||
while flask_thread.is_alive():
|
||||
flask_thread.join(timeout=1.0)
|
||||
except KeyboardInterrupt:
|
||||
print("\n⏸️ Ctrl+C detected. Stopping Flask...")
|
||||
graceful_shutdown()
|
||||
print("👋 Exiting.")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error starting system tray: {e}")
|
||||
# Keep Flask running without tray
|
||||
try:
|
||||
while flask_thread.is_alive():
|
||||
flask_thread.join(timeout=1.0)
|
||||
except KeyboardInterrupt:
|
||||
print("\n⏸️ Ctrl+C detected. Stopping Flask...")
|
||||
graceful_shutdown()
|
||||
print("👋 Exiting.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"💥 Unexpected error: {e}")
|
||||
retry_count += 1
|
||||
print(f"💥 Error with threaded execution: {e}")
|
||||
# Fallback to original single-threaded mode
|
||||
retry_count = 0
|
||||
while retry_count < max_retries:
|
||||
try:
|
||||
print(
|
||||
"🚀 Starting Flask server for PLC S7-315 Streamer (fallback mode)"
|
||||
)
|
||||
print("📊 Web interface available at: http://localhost:5050")
|
||||
print(
|
||||
"🔧 Configure your PLC and variables through the web interface"
|
||||
)
|
||||
|
||||
if retry_count < max_retries:
|
||||
print(f"🔄 Attempting restart ({retry_count}/{max_retries})...")
|
||||
time.sleep(2) # Wait before retry
|
||||
else:
|
||||
print("❌ Maximum retries reached. Exiting...")
|
||||
# Initialize streamer
|
||||
streamer = PLCDataStreamer()
|
||||
|
||||
# Start Flask application
|
||||
app.run(
|
||||
debug=False,
|
||||
host="0.0.0.0",
|
||||
port=5050,
|
||||
use_reloader=False,
|
||||
threaded=True,
|
||||
)
|
||||
break
|
||||
|
||||
except RuntimeError as e:
|
||||
if "Another instance" in str(e):
|
||||
print(f"❌ {e}")
|
||||
print(
|
||||
"💡 Tip: Stop the other instance or wait for it to finish"
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(f"⚠️ Runtime error: {e}")
|
||||
retry_count += 1
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n⏸️ Received interrupt signal...")
|
||||
graceful_shutdown()
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
print(f"💥 Unexpected error: {e}")
|
||||
retry_count += 1
|
||||
|
||||
if retry_count < max_retries:
|
||||
print(f"🔄 Attempting restart ({retry_count}/{max_retries})...")
|
||||
time.sleep(2)
|
||||
else:
|
||||
print("❌ Maximum retries reached. Exiting...")
|
||||
graceful_shutdown()
|
||||
sys.exit(1)
|
||||
else:
|
||||
# Original mode without system tray (when pystray is not available)
|
||||
print("⚠️ System tray not available. Running in console mode.")
|
||||
while retry_count < max_retries:
|
||||
try:
|
||||
print("🚀 Starting Flask server for PLC S7-315 Streamer")
|
||||
print("📊 Web interface available at: http://localhost:5050")
|
||||
print("🔧 Configure your PLC and variables through the web interface")
|
||||
|
||||
# Initialize streamer
|
||||
streamer = PLCDataStreamer()
|
||||
|
||||
# Start Flask application
|
||||
app.run(
|
||||
debug=False,
|
||||
host="0.0.0.0",
|
||||
port=5050,
|
||||
use_reloader=False,
|
||||
threaded=True,
|
||||
)
|
||||
break
|
||||
|
||||
except RuntimeError as e:
|
||||
if "Another instance" in str(e):
|
||||
print(f"❌ {e}")
|
||||
print("💡 Tip: Stop the other instance or wait for it to finish")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(f"⚠️ Runtime error: {e}")
|
||||
retry_count += 1
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n⏸️ Received interrupt signal...")
|
||||
graceful_shutdown()
|
||||
sys.exit(1)
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
print(f"💥 Unexpected error: {e}")
|
||||
retry_count += 1
|
||||
|
||||
if retry_count < max_retries:
|
||||
print(f"🔄 Attempting restart ({retry_count}/{max_retries})...")
|
||||
time.sleep(2)
|
||||
else:
|
||||
print("❌ Maximum retries reached. Exiting...")
|
||||
graceful_shutdown()
|
||||
sys.exit(1)
|
||||
|
||||
print("🏁 Application finished.")
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
|
@ -3928,16 +4125,21 @@ if __name__ == "__main__":
|
|||
print(f"🚀 Starting PLC S7-315 Streamer & Logger...")
|
||||
print(f"🐍 Process PID: {os.getpid()}")
|
||||
|
||||
# 🔍 OPTIONAL: Early check for existing instance (faster feedback)
|
||||
# Comment out the next 4 lines if you prefer the full error handling in PLCDataStreamer
|
||||
if not check_for_running_instance_early():
|
||||
print("❌ Startup aborted due to existing instance")
|
||||
# input("Press Enter to exit...")
|
||||
# <20> ROBUST INSTANCE CHECK - HTTP + PID based verification
|
||||
print("=" * 60)
|
||||
can_proceed, check_message = check_backend_instance_robust(port=5050)
|
||||
print("=" * 60)
|
||||
|
||||
if not can_proceed:
|
||||
print(f"❌ Startup aborted: {check_message}")
|
||||
print(
|
||||
"💡 Tip: If you believe this is an error, check Task Manager for python.exe processes"
|
||||
)
|
||||
# input("\nPress Enter to exit...")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
# Initialize streamer instance with instance check
|
||||
print("✅ No conflicting instances found (early check)")
|
||||
# Initialize streamer instance
|
||||
print("🔧 Initializing PLCDataStreamer...")
|
||||
streamer = PLCDataStreamer()
|
||||
|
||||
|
@ -3946,6 +4148,7 @@ if __name__ == "__main__":
|
|||
historical_cache = HistoricalDataCache(backend_logger)
|
||||
|
||||
print("✅ Backend initialization complete")
|
||||
print(f"🌐 Starting Flask server on port 5050...")
|
||||
main()
|
||||
|
||||
except RuntimeError as e:
|
||||
|
|
76
main.spec
76
main.spec
|
@ -4,6 +4,7 @@ import sys
|
|||
|
||||
block_cipher = None
|
||||
|
||||
# Analysis for main application
|
||||
a = Analysis(
|
||||
['main.py'],
|
||||
pathex=[],
|
||||
|
@ -102,7 +103,47 @@ a = Analysis(
|
|||
noarchive=False,
|
||||
)
|
||||
|
||||
# Analysis for backend manager
|
||||
a_manager = Analysis(
|
||||
['backmanager.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[],
|
||||
hiddenimports=[
|
||||
# Backend manager dependencies
|
||||
'psutil',
|
||||
'psutil._pswindows',
|
||||
'psutil._psutil_windows',
|
||||
'requests',
|
||||
'json',
|
||||
'datetime',
|
||||
'threading',
|
||||
'subprocess',
|
||||
'logging',
|
||||
],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[
|
||||
# Exclude unnecessary packages to reduce size
|
||||
'matplotlib',
|
||||
'scipy',
|
||||
'IPython',
|
||||
'notebook',
|
||||
'jupyter',
|
||||
'tests',
|
||||
'unittest',
|
||||
'pydoc',
|
||||
'doctest',
|
||||
],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False,
|
||||
)
|
||||
|
||||
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
|
||||
pyz_manager = PYZ(a_manager.pure, a_manager.zipped_data, cipher=block_cipher)
|
||||
|
||||
exe = EXE(
|
||||
pyz,
|
||||
|
@ -121,7 +162,40 @@ exe = EXE(
|
|||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
coll = COLLECT(exe, a.binaries, a.zipfiles, a.datas, strip=False, upx=True, upx_exclude=[], name='main')
|
||||
|
||||
# Executable for backend manager
|
||||
exe_manager = EXE(
|
||||
pyz_manager,
|
||||
a_manager.scripts,
|
||||
[],
|
||||
exclude_binaries=True,
|
||||
name='Backend_Manager',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
console=True, # True para ver los logs del manager en una consola.
|
||||
disable_windowed_traceback=False,
|
||||
argv_emulation=False,
|
||||
target_arch=None,
|
||||
codesign_identity=None,
|
||||
entitlements_file=None,
|
||||
)
|
||||
|
||||
coll = COLLECT(
|
||||
exe,
|
||||
a.binaries,
|
||||
a.zipfiles,
|
||||
a.datas,
|
||||
exe_manager,
|
||||
a_manager.binaries,
|
||||
a_manager.zipfiles,
|
||||
a_manager.datas,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
name='main'
|
||||
)
|
||||
|
||||
# Post-build: Copy config directory to the same level as the executable
|
||||
import shutil
|
||||
|
|
|
@ -3,7 +3,7 @@ Flask==2.3.3
|
|||
Flask-Cors==4.0.0
|
||||
|
||||
# PLC Communication
|
||||
python-snap7==1.3
|
||||
python-snap7==2.0.2
|
||||
|
||||
# System Monitoring & Process Management
|
||||
psutil==5.9.5
|
||||
|
@ -15,6 +15,10 @@ numpy==2.2.6
|
|||
# JSON Schema Validation
|
||||
jsonschema==4.22.0
|
||||
|
||||
# System Tray Icon Support
|
||||
pystray==0.19.4
|
||||
Pillow==10.0.1
|
||||
|
||||
# Note: The following dependencies are automatically installed with Flask:
|
||||
# - Werkzeug==3.1.3 (WSGI toolkit)
|
||||
# - Jinja2==3.1.6 (templating engine)
|
||||
|
|
|
@ -1,14 +1,12 @@
|
|||
{
|
||||
"last_state": {
|
||||
"should_connect": true,
|
||||
"should_stream": false,
|
||||
"should_stream": true,
|
||||
"active_datasets": [
|
||||
"DAR",
|
||||
"Fast",
|
||||
"Test"
|
||||
"DAR"
|
||||
]
|
||||
},
|
||||
"auto_recovery_enabled": true,
|
||||
"last_update": "2025-08-22T12:14:57.462145",
|
||||
"last_update": "2025-08-25T18:40:24.478882",
|
||||
"plotjuggler_path": "C:\\Program Files\\PlotJuggler\\plotjuggler.exe"
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script to validate the disk space calculation fix
|
||||
"""
|
||||
|
||||
import requests
|
||||
import json
|
||||
import time
|
||||
|
||||
|
||||
def test_disk_space_api():
|
||||
"""Test the /api/status endpoint to see if disk_space_info works"""
|
||||
|
||||
url = "http://localhost:5050/api/status"
|
||||
|
||||
try:
|
||||
print("🔍 Testing disk space calculation...")
|
||||
|
||||
# Make the API request
|
||||
response = requests.get(url, timeout=10)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
|
||||
# Check if disk_space_info exists and is valid
|
||||
disk_info = data.get("disk_space_info")
|
||||
|
||||
if disk_info is not None:
|
||||
print("✅ Disk space information retrieved successfully!")
|
||||
print(f"📁 Free space: {disk_info.get('free_space', 'N/A')}")
|
||||
print(f"📊 Total space: {disk_info.get('total_space', 'N/A')}")
|
||||
print(f"💾 Used space: {disk_info.get('used_space', 'N/A')}")
|
||||
print(f"📈 Percent used: {disk_info.get('percent_used', 'N/A')}%")
|
||||
print(
|
||||
f"⏱️ Recording time left: {disk_info.get('recording_time_left', 'N/A')}"
|
||||
)
|
||||
print(
|
||||
f"📝 Avg file size per hour: {disk_info.get('avg_file_size_per_hour', 'N/A')}"
|
||||
)
|
||||
|
||||
return True
|
||||
else:
|
||||
print("❌ disk_space_info is null - error occurred during calculation")
|
||||
return False
|
||||
|
||||
else:
|
||||
print(f"❌ HTTP Error: {response.status_code}")
|
||||
print(response.text)
|
||||
return False
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"❌ Request failed: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"❌ Unexpected error: {e}")
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🚀 Testing disk space calculation fix...")
|
||||
print("=" * 50)
|
||||
|
||||
# Wait a bit for server to be ready
|
||||
time.sleep(2)
|
||||
|
||||
success = test_disk_space_api()
|
||||
|
||||
print("=" * 50)
|
||||
if success:
|
||||
print("✅ Test PASSED - Disk space calculation is working!")
|
||||
else:
|
||||
print("❌ Test FAILED - Disk space calculation has issues")
|
|
@ -0,0 +1,51 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script to verify disk_space functionality after psutil fix
|
||||
"""
|
||||
import requests
|
||||
import json
|
||||
|
||||
|
||||
def test_disk_status():
|
||||
"""Test the /api/status endpoint to check if disk_space_info works correctly"""
|
||||
try:
|
||||
print("🧪 Testing /api/status endpoint...")
|
||||
response = requests.get("http://localhost:5050/api/status", timeout=10)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
print("✅ Status endpoint responded successfully")
|
||||
|
||||
# Check if disk_space_info is present and valid
|
||||
if "disk_space_info" in data:
|
||||
disk_info = data["disk_space_info"]
|
||||
print(f"✅ Disk space info retrieved successfully:")
|
||||
print(f" 📁 Free space: {disk_info.get('free_space', 'Unknown')}")
|
||||
print(f" 📁 Total space: {disk_info.get('total_space', 'Unknown')}")
|
||||
print(f" 📁 Used space: {disk_info.get('used_space', 'Unknown')}")
|
||||
print(
|
||||
f" 📊 Percent used: {disk_info.get('percent_used', 'Unknown')}%"
|
||||
)
|
||||
print(
|
||||
f" ⏱️ Recording time left: {disk_info.get('recording_time_left', 'Unknown')}"
|
||||
)
|
||||
|
||||
if disk_info.get("error"):
|
||||
print(f"❌ Error in disk_space_info: {disk_info['error']}")
|
||||
else:
|
||||
print("✅ No errors in disk_space_info")
|
||||
else:
|
||||
print("❌ disk_space_info not found in response")
|
||||
|
||||
else:
|
||||
print(f"❌ Status endpoint failed: {response.status_code}")
|
||||
print(f"Response: {response.text}")
|
||||
|
||||
except requests.exceptions.ConnectionError:
|
||||
print("❌ Cannot connect to backend server at http://localhost:5050")
|
||||
except Exception as e:
|
||||
print(f"❌ Error testing disk status: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_disk_status()
|
|
@ -0,0 +1,289 @@
|
|||
"""
|
||||
🔒 Instance Manager - Robust backend instance control system
|
||||
|
||||
This module provides a reliable way to manage backend instances using:
|
||||
1. HTTP health check on the backend port
|
||||
2. PID-based verification and cleanup
|
||||
3. Graceful termination of zombie processes
|
||||
|
||||
Key features:
|
||||
- Double health check with 5-second intervals for reliability
|
||||
- Automatic cleanup of stale lock files
|
||||
- Force termination of unresponsive processes
|
||||
- Thread-safe operations
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
import psutil
|
||||
import requests
|
||||
from typing import Optional, Tuple
|
||||
|
||||
|
||||
class InstanceManager:
|
||||
"""Manages backend instance lifecycle and prevents duplicate executions"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
port: int = 5050,
|
||||
lock_file: str = "plc_streamer.lock",
|
||||
health_endpoint: str = "/api/health",
|
||||
check_timeout: float = 3.0,
|
||||
check_interval: float = 5.0,
|
||||
):
|
||||
"""
|
||||
Initialize the instance manager
|
||||
|
||||
Args:
|
||||
port: Backend server port to check
|
||||
lock_file: Path to the PID lock file
|
||||
health_endpoint: HTTP endpoint for health checks
|
||||
check_timeout: Timeout for each HTTP request (seconds)
|
||||
check_interval: Time between double-checks (seconds)
|
||||
"""
|
||||
self.port = port
|
||||
self.lock_file = lock_file
|
||||
self.health_endpoint = health_endpoint
|
||||
self.check_timeout = check_timeout
|
||||
self.check_interval = check_interval
|
||||
self.base_url = f"http://localhost:{port}"
|
||||
|
||||
def is_backend_alive_http(self) -> bool:
|
||||
"""
|
||||
Check if backend is alive via HTTP health check
|
||||
|
||||
Returns:
|
||||
True if backend responds to health check, False otherwise
|
||||
"""
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{self.base_url}{self.health_endpoint}", timeout=self.check_timeout
|
||||
)
|
||||
# Accept any successful HTTP response (200-299)
|
||||
return 200 <= response.status_code < 300
|
||||
|
||||
except (
|
||||
requests.RequestException,
|
||||
requests.ConnectionError,
|
||||
requests.Timeout,
|
||||
requests.ConnectTimeout,
|
||||
):
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"⚠️ Unexpected error during health check: {e}")
|
||||
return False
|
||||
|
||||
def get_lock_file_pid(self) -> Optional[int]:
|
||||
"""
|
||||
Read PID from lock file
|
||||
|
||||
Returns:
|
||||
PID if lock file exists and is valid, None otherwise
|
||||
"""
|
||||
if not os.path.exists(self.lock_file):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(self.lock_file, "r") as f:
|
||||
content = f.read().strip()
|
||||
return int(content) if content else None
|
||||
except (ValueError, FileNotFoundError, IOError):
|
||||
return None
|
||||
|
||||
def is_process_our_backend(self, pid: int) -> bool:
|
||||
"""
|
||||
Verify if the process with given PID is our backend application
|
||||
|
||||
Args:
|
||||
pid: Process ID to check
|
||||
|
||||
Returns:
|
||||
True if it's our backend process, False otherwise
|
||||
"""
|
||||
try:
|
||||
if not psutil.pid_exists(pid):
|
||||
return False
|
||||
|
||||
proc = psutil.Process(pid)
|
||||
cmdline = " ".join(proc.cmdline()).lower()
|
||||
|
||||
# Check for our application signatures
|
||||
backend_signatures = [
|
||||
"main.py",
|
||||
"s7_snap7_streamer_n_log",
|
||||
"plc_streamer",
|
||||
"plcdatastreamer",
|
||||
]
|
||||
|
||||
return any(sig in cmdline for sig in backend_signatures)
|
||||
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"⚠️ Error checking process {pid}: {e}")
|
||||
return False
|
||||
|
||||
def terminate_process_safely(self, pid: int) -> bool:
|
||||
"""
|
||||
Safely terminate a process
|
||||
|
||||
Args:
|
||||
pid: Process ID to terminate
|
||||
|
||||
Returns:
|
||||
True if process was terminated successfully, False otherwise
|
||||
"""
|
||||
try:
|
||||
if not psutil.pid_exists(pid):
|
||||
return True # Already gone
|
||||
|
||||
proc = psutil.Process(pid)
|
||||
print(f"🛑 Attempting to terminate process {pid} ({proc.name()})...")
|
||||
|
||||
# Try graceful termination first
|
||||
proc.terminate()
|
||||
|
||||
# Wait up to 10 seconds for graceful shutdown
|
||||
try:
|
||||
proc.wait(timeout=10)
|
||||
print(f"✅ Process {pid} terminated gracefully")
|
||||
return True
|
||||
except psutil.TimeoutExpired:
|
||||
# Force kill if graceful didn't work
|
||||
print(f"⚡ Force killing process {pid}...")
|
||||
proc.kill()
|
||||
proc.wait(timeout=5)
|
||||
print(f"💥 Process {pid} force killed")
|
||||
return True
|
||||
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
return True # Process already gone or no permission
|
||||
except Exception as e:
|
||||
print(f"❌ Error terminating process {pid}: {e}")
|
||||
return False
|
||||
|
||||
def cleanup_lock_file(self) -> bool:
|
||||
"""
|
||||
Remove the lock file
|
||||
|
||||
Returns:
|
||||
True if lock file was removed or didn't exist, False on error
|
||||
"""
|
||||
try:
|
||||
if os.path.exists(self.lock_file):
|
||||
os.remove(self.lock_file)
|
||||
print(f"🧹 Removed lock file: {self.lock_file}")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Error removing lock file: {e}")
|
||||
return False
|
||||
|
||||
def create_lock_file(self) -> bool:
|
||||
"""
|
||||
Create lock file with current process PID
|
||||
|
||||
Returns:
|
||||
True if lock file was created successfully, False otherwise
|
||||
"""
|
||||
try:
|
||||
with open(self.lock_file, "w") as f:
|
||||
f.write(str(os.getpid()))
|
||||
print(f"🔒 Created lock file: {self.lock_file} (PID: {os.getpid()})")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Error creating lock file: {e}")
|
||||
return False
|
||||
|
||||
def check_and_handle_existing_instance(self) -> Tuple[bool, str]:
|
||||
"""
|
||||
Main method: Check for existing instances and handle them
|
||||
|
||||
Returns:
|
||||
Tuple of (can_proceed, message)
|
||||
- can_proceed: True if this instance can start, False if should exit
|
||||
- message: Description of what happened
|
||||
"""
|
||||
print("🔍 Checking for existing backend instances...")
|
||||
|
||||
# Step 1: First HTTP health check
|
||||
print("📡 Performing first health check...")
|
||||
if self.is_backend_alive_http():
|
||||
return False, f"❌ Another backend is already running on port {self.port}"
|
||||
|
||||
print(f"⏳ Waiting {self.check_interval} seconds for double-check...")
|
||||
time.sleep(self.check_interval)
|
||||
|
||||
# Step 2: Second HTTP health check (double verification)
|
||||
print("📡 Performing second health check...")
|
||||
if self.is_backend_alive_http():
|
||||
return False, f"❌ Another backend is confirmed running on port {self.port}"
|
||||
|
||||
print("✅ No active backend detected via HTTP")
|
||||
|
||||
# Step 3: Check lock file and handle zombie processes
|
||||
lock_pid = self.get_lock_file_pid()
|
||||
if lock_pid is None:
|
||||
print("📝 No lock file found")
|
||||
return True, "✅ No existing instances detected"
|
||||
|
||||
print(f"📋 Found lock file with PID: {lock_pid}")
|
||||
|
||||
# Step 4: Verify if the process is actually our backend
|
||||
if not self.is_process_our_backend(lock_pid):
|
||||
print(f"🧹 PID {lock_pid} is not our backend process")
|
||||
self.cleanup_lock_file()
|
||||
return True, "✅ Cleaned up stale lock file"
|
||||
|
||||
# Step 5: We have a zombie backend process - terminate it
|
||||
print(f"🧟 Found zombie backend process (PID: {lock_pid})")
|
||||
if self.terminate_process_safely(lock_pid):
|
||||
self.cleanup_lock_file()
|
||||
print("🎯 Successfully cleaned up zombie backend")
|
||||
return True, "✅ Cleaned up zombie backend process"
|
||||
else:
|
||||
return False, f"❌ Failed to cleanup zombie process (PID: {lock_pid})"
|
||||
|
||||
def initialize_instance(self) -> bool:
|
||||
"""
|
||||
Initialize this instance (create lock file)
|
||||
|
||||
Returns:
|
||||
True if initialization successful, False otherwise
|
||||
"""
|
||||
return self.create_lock_file()
|
||||
|
||||
def cleanup_instance(self) -> bool:
|
||||
"""
|
||||
Cleanup this instance (remove lock file)
|
||||
|
||||
Returns:
|
||||
True if cleanup successful, False otherwise
|
||||
"""
|
||||
return self.cleanup_lock_file()
|
||||
|
||||
|
||||
def check_backend_instance(
|
||||
port: int = 5050, lock_file: str = "plc_streamer.lock"
|
||||
) -> Tuple[bool, str]:
|
||||
"""
|
||||
Convenience function to check and handle backend instances
|
||||
|
||||
Args:
|
||||
port: Backend server port
|
||||
lock_file: Lock file path
|
||||
|
||||
Returns:
|
||||
Tuple of (can_proceed, message)
|
||||
"""
|
||||
manager = InstanceManager(port=port, lock_file=lock_file)
|
||||
return manager.check_and_handle_existing_instance()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Test the instance manager
|
||||
manager = InstanceManager()
|
||||
can_proceed, message = manager.check_and_handle_existing_instance()
|
||||
print(f"\nResult: {message}")
|
||||
print(f"Can proceed: {can_proceed}")
|
|
@ -32,7 +32,7 @@ except ImportError:
|
|||
|
||||
import snap7
|
||||
import snap7.util
|
||||
import snap7.types
|
||||
import snap7.type
|
||||
import time
|
||||
import threading
|
||||
import ctypes
|
||||
|
@ -40,7 +40,7 @@ from typing import Dict, Any, Optional, List
|
|||
|
||||
# Try to import S7DataItem with fallback for different snap7 versions
|
||||
try:
|
||||
from snap7.types import S7DataItem
|
||||
from snap7.type import S7DataItem
|
||||
|
||||
SNAP7_TYPES_AVAILABLE = True
|
||||
except ImportError:
|
||||
|
@ -213,15 +213,19 @@ class OptimizedBatchReader:
|
|||
|
||||
# Convert to ctypes array for read_multi_vars (CRITICAL for snap7 v2)
|
||||
items_array = (S7DataItem * len(items_to_read))(*items_to_read)
|
||||
|
||||
|
||||
# Perform the multi-variable read for the current chunk
|
||||
result = self.plc_client.plc.read_multi_vars(items_array)
|
||||
|
||||
|
||||
# Handle result format (result code, array of items)
|
||||
if isinstance(result, tuple) and len(result) == 2:
|
||||
ret_code, read_results = result
|
||||
if ret_code != 0:
|
||||
error_msg = snap7.util.get_error_text(ret_code) if hasattr(snap7.util, 'get_error_text') else f"Error code: {ret_code}"
|
||||
error_msg = (
|
||||
snap7.util.get_error_text(ret_code)
|
||||
if hasattr(snap7.util, "get_error_text")
|
||||
else f"Error code: {ret_code}"
|
||||
)
|
||||
self._log_error(f"read_multi_vars failed: {error_msg}")
|
||||
for var_name, _ in chunk:
|
||||
chunk_results[var_name] = None
|
||||
|
@ -244,7 +248,11 @@ class OptimizedBatchReader:
|
|||
chunk_results[var_name] = None
|
||||
else:
|
||||
# Handle read error
|
||||
error_msg = snap7.util.get_error_text(item_result.Result) if hasattr(snap7.util, 'get_error_text') else f"Error: {item_result.Result}"
|
||||
error_msg = (
|
||||
snap7.util.get_error_text(item_result.Result)
|
||||
if hasattr(snap7.util, "get_error_text")
|
||||
else f"Error: {item_result.Result}"
|
||||
)
|
||||
self._log_error(f"Failed to read '{var_name}': {error_msg}")
|
||||
chunk_results[var_name] = None
|
||||
|
||||
|
@ -292,6 +300,14 @@ class OptimizedBatchReader:
|
|||
"mw": 131,
|
||||
"md": 131,
|
||||
"mb": 131,
|
||||
# PEW/PAW area mappings
|
||||
"pew": 129, # Process Input Words
|
||||
"paw": 130, # Process Output Words
|
||||
# Additional PE/PA area mappings for consistency with plc_client.py
|
||||
"ped": 129, # Process Input Double word (REAL)
|
||||
"peb": 129, # Process Input Byte
|
||||
"pad": 130, # Process Output Double word (REAL)
|
||||
"pab": 130, # Process Output Byte
|
||||
}
|
||||
return area_map.get(area_str.lower(), 132) # Default to DB
|
||||
|
||||
|
|
Loading…
Reference in New Issue