Compare commits
3 Commits
0f162377cd
...
ffabf6b2b0
Author | SHA1 | Date |
---|---|---|
|
ffabf6b2b0 | |
|
3975a96395 | |
|
89451abd15 |
49
app.py
49
app.py
|
@ -5,6 +5,7 @@ import os
|
|||
import json # Added import
|
||||
from datetime import datetime
|
||||
import time # Added for shutdown delay
|
||||
import sys # Added for platform detection
|
||||
import subprocess # Add this to the imports at the top
|
||||
|
||||
# --- Imports for System Tray Icon ---
|
||||
|
@ -109,7 +110,10 @@ def execute_script():
|
|||
@app.route("/")
|
||||
def index():
|
||||
script_groups = config_manager.get_script_groups()
|
||||
return render_template("index.html", script_groups=script_groups)
|
||||
# Para ordenar una lista de diccionarios, necesitamos especificar una clave.
|
||||
# Asumimos que cada diccionario tiene una clave 'name' por la cual ordenar.
|
||||
sorted_script_groups = sorted(script_groups, key=lambda group: group['name'])
|
||||
return render_template("index.html", script_groups=sorted_script_groups)
|
||||
|
||||
|
||||
@app.route("/api/config/<level>", methods=["GET", "POST"])
|
||||
|
@ -391,6 +395,49 @@ def open_miniconda_console():
|
|||
)
|
||||
|
||||
|
||||
@app.route("/api/open-explorer", methods=["POST"])
|
||||
def open_explorer_route():
|
||||
data = request.json
|
||||
frontend_path = data.get("path")
|
||||
group = data.get("group")
|
||||
|
||||
if not group:
|
||||
return jsonify({"status": "error", "message": "Grupo no proporcionado."}), 400
|
||||
if not frontend_path:
|
||||
return jsonify({"status": "error", "message": "Ruta no proporcionada."}), 400
|
||||
|
||||
# Obtener el directorio de trabajo configurado y normalizado para el grupo
|
||||
configured_work_dir_raw = config_manager.get_work_dir(group)
|
||||
if not configured_work_dir_raw:
|
||||
return jsonify({"status": "error", "message": f"No hay directorio de trabajo configurado para el grupo '{group}'."}), 404
|
||||
|
||||
configured_work_dir_abs = os.path.abspath(configured_work_dir_raw)
|
||||
frontend_path_abs = os.path.abspath(frontend_path)
|
||||
|
||||
# Validar que la ruta del frontend coincide con la ruta configurada
|
||||
if configured_work_dir_abs != frontend_path_abs:
|
||||
print(f"Intento de acceso no válido: Grupo '{group}', Frontend Path '{frontend_path_abs}', Configured Path '{configured_work_dir_abs}'")
|
||||
return jsonify({"status": "error", "message": "La ruta proporcionada no coincide con el directorio de trabajo seguro para este grupo."}), 403
|
||||
|
||||
# Validar que la ruta (ahora sabemos que es la configurada) realmente existe
|
||||
if not os.path.isdir(configured_work_dir_abs):
|
||||
return jsonify({"status": "error", "message": f"El directorio de trabajo configurado '{configured_work_dir_abs}' no es un directorio válido o no existe."}), 400
|
||||
|
||||
try:
|
||||
if sys.platform == "win32":
|
||||
os.startfile(configured_work_dir_abs)
|
||||
elif sys.platform == "darwin": # macOS
|
||||
subprocess.Popen(["open", configured_work_dir_abs])
|
||||
else: # linux variants
|
||||
subprocess.Popen(["xdg-open", configured_work_dir_abs])
|
||||
|
||||
return jsonify({"status": "success", "message": f"Abriendo '{configured_work_dir_abs}' en el explorador."})
|
||||
except Exception as e:
|
||||
error_msg = f"Error al abrir el explorador en '{configured_work_dir_abs}': {str(e)}"
|
||||
print(error_msg)
|
||||
return jsonify({"status": "error", "message": error_msg}), 500
|
||||
|
||||
|
||||
# --- System Tray Icon Functions ---
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"name": "Code : 01 : CSharpCodeMerger",
|
||||
"description": "Sin descripción",
|
||||
"version": "1.0",
|
||||
"author": "Unknown"
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "Desempaquetado de Emails EML",
|
||||
"name": "Emails : Desempaquetado de Emails EML",
|
||||
"description": "This script processes email files (.eml) into a chronological narrative in Markdown format, optimized for processing with Large Language Models (LLMs). It extracts essential information from emails while removing unnecessary metadata, creating a clean, temporal narrative that can be easily analyzed. ",
|
||||
"version": "1.0",
|
||||
"author": "Miguel"
|
||||
|
|
|
@ -1,33 +1,39 @@
|
|||
--- Log de Ejecución: x1.py ---
|
||||
Grupo: EmailCrono
|
||||
Directorio de Trabajo: C:\Trabajo\SIDEL\10 - E5.007095 - Modifica O&U - SAE463\Reporte\Email
|
||||
Inicio: 2025-05-09 16:58:28
|
||||
Fin: 2025-05-09 16:58:29
|
||||
Duración: 0:00:00.434600
|
||||
Directorio de Trabajo: C:\Trabajo\SIDEL\12 - SAE052 - Syrup Update & GSD Update\Reporte\Emails
|
||||
Inicio: 2025-05-18 16:00:44
|
||||
Fin: 2025-05-18 16:00:44
|
||||
Duración: 0:00:00.445734
|
||||
Estado: SUCCESS (Código de Salida: 0)
|
||||
|
||||
--- SALIDA ESTÁNDAR (STDOUT) ---
|
||||
Working directory: C:\Trabajo\SIDEL\10 - E5.007095 - Modifica O&U - SAE463\Reporte\Email
|
||||
Input directory: C:\Trabajo\SIDEL\10 - E5.007095 - Modifica O&U - SAE463\Reporte\Email
|
||||
Output directory: C:/Users/migue/OneDrive/Miguel/Obsidean/Trabajo/VM/04-SIDEL/10 - E5.007095 - Modifica O&U - SAE463
|
||||
Cronologia file: C:/Users/migue/OneDrive/Miguel/Obsidean/Trabajo/VM/04-SIDEL/10 - E5.007095 - Modifica O&U - SAE463\cronologia.md
|
||||
Attachments directory: C:\Trabajo\SIDEL\10 - E5.007095 - Modifica O&U - SAE463\Reporte\Email\adjuntos
|
||||
Working directory: C:\Trabajo\SIDEL\12 - SAE052 - Syrup Update & GSD Update\Reporte\Emails
|
||||
Input directory: C:\Trabajo\SIDEL\12 - SAE052 - Syrup Update & GSD Update\Reporte\Emails
|
||||
Output directory: C:/Users/migue/OneDrive/Miguel/Obsidean/Trabajo/VM/04-SIDEL/12 - SAE052 - Syrup Update & GSD Update
|
||||
Cronologia file: C:/Users/migue/OneDrive/Miguel/Obsidean/Trabajo/VM/04-SIDEL/12 - SAE052 - Syrup Update & GSD Update\cronologia.md
|
||||
Attachments directory: C:\Trabajo\SIDEL\12 - SAE052 - Syrup Update & GSD Update\Reporte\Emails\adjuntos
|
||||
Beautify rules file: D:\Proyectos\Scripts\ParamManagerScripts\backend\script_groups\EmailCrono\config\beautify_rules.json
|
||||
Found 1 .eml files
|
||||
Found 2 .eml files
|
||||
Loaded 0 existing messages
|
||||
|
||||
Processing C:\Trabajo\SIDEL\10 - E5.007095 - Modifica O&U - SAE463\Reporte\Email\R_ {EXT} E5.006894 - Modifica O&U - SAE463 New Analyzer.eml
|
||||
Processing C:\Trabajo\SIDEL\12 - SAE052 - Syrup Update & GSD Update\Reporte\Emails\I_ Backup SAE052.eml
|
||||
Aplicando reglas de prioridad 1
|
||||
Aplicando reglas de prioridad 2
|
||||
Aplicando reglas de prioridad 3
|
||||
Aplicando reglas de prioridad 4
|
||||
|
||||
Processing C:\Trabajo\SIDEL\12 - SAE052 - Syrup Update & GSD Update\Reporte\Emails\Parametri Modificati SAE052.eml
|
||||
Aplicando reglas de prioridad 1
|
||||
Aplicando reglas de prioridad 2
|
||||
Aplicando reglas de prioridad 3
|
||||
Aplicando reglas de prioridad 4
|
||||
|
||||
Estadísticas de procesamiento:
|
||||
- Total mensajes encontrados: 1
|
||||
- Mensajes únicos añadidos: 1
|
||||
- Total mensajes encontrados: 2
|
||||
- Mensajes únicos añadidos: 2
|
||||
- Mensajes duplicados ignorados: 0
|
||||
|
||||
Writing 1 messages to C:/Users/migue/OneDrive/Miguel/Obsidean/Trabajo/VM/04-SIDEL/10 - E5.007095 - Modifica O&U - SAE463\cronologia.md
|
||||
Writing 2 messages to C:/Users/migue/OneDrive/Miguel/Obsidean/Trabajo/VM/04-SIDEL/12 - SAE052 - Syrup Update & GSD Update\cronologia.md
|
||||
|
||||
--- ERRORES (STDERR) ---
|
||||
Ninguno
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
"cronologia_file": "cronologia.md"
|
||||
},
|
||||
"level3": {
|
||||
"output_directory": "C:/Users/migue/OneDrive/Miguel/Obsidean/Trabajo/VM/04-SIDEL/10 - E5.007095 - Modifica O&U - SAE463"
|
||||
"output_directory": "C:/Users/migue/OneDrive/Miguel/Obsidean/Trabajo/VM/04-SIDEL/12 - SAE052 - Syrup Update & GSD Update"
|
||||
},
|
||||
"working_directory": "C:\\Trabajo\\SIDEL\\10 - E5.007095 - Modifica O&U - SAE463\\Reporte\\Email"
|
||||
"working_directory": "C:\\Trabajo\\SIDEL\\12 - SAE052 - Syrup Update & GSD Update\\Reporte\\Emails"
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
{
|
||||
"path": "C:\\Trabajo\\SIDEL\\10 - E5.007095 - Modifica O&U - SAE463\\Reporte\\Email",
|
||||
"path": "C:\\Trabajo\\SIDEL\\12 - SAE052 - Syrup Update & GSD Update\\Reporte\\Emails",
|
||||
"history": [
|
||||
"C:\\Trabajo\\SIDEL\\12 - SAE052 - Syrup Update & GSD Update\\Reporte\\Emails",
|
||||
"C:\\Trabajo\\SIDEL\\10 - E5.007095 - Modifica O&U - SAE463\\Reporte\\Email",
|
||||
"C:\\Trabajo\\SIDEL\\08 - Masselli TEST\\Reporte\\EMAILs",
|
||||
"C:\\Trabajo\\SIDEL\\EMAILs\\I_ E5.007727 _ Evo On - SFSRFH300172 + SFSRFH300109 - ANDIA LACTEOS",
|
||||
|
@ -9,7 +10,6 @@
|
|||
"C:\\Trabajo\\VM\\30 - 9.3941- Kosme - Portogallo (Modifica + Linea)\\Reporte\\Emails",
|
||||
"C:\\Users\\migue\\OneDrive\\Miguel\\Obsidean\\Trabajo\\VM\\30 - 9.3941- Kosme - Portogallo (Modifica + Linea)\\Emails",
|
||||
"C:\\Trabajo\\VM\\40 - 93040 - HENKEL - NEXT2 Problem\\Reporte\\Emails\\Trial",
|
||||
"C:\\Trabajo\\VM\\40 - 93040 - HENKEL - NEXT2 Problem\\Reporte\\Emails",
|
||||
"C:\\Trabajo\\VM\\40 - 93040 - HENKEL - NEXT2 Problem\\Reporte\\Emails\\Error de tablas"
|
||||
"C:\\Trabajo\\VM\\40 - 93040 - HENKEL - NEXT2 Problem\\Reporte\\Emails"
|
||||
]
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "Scripts para adaptar IO de Hardware S7 a IO Master en Tia Portal",
|
||||
"name": "Siemens-Tia : 01 : Scripts para adaptar IO de Hardware S7 a IO Master en Tia Portal",
|
||||
"description": "Este conjunto de scripts está diseñado para ayudar a los usuarios a adaptar el IO de Hardware S7 a un IO Master en Tia Portal. Incluye scripts para la creación de un nuevo proyecto, la importación de un proyecto existente y la adaptación del IO de Hardware S7 a un IO Master.",
|
||||
"version": "1.0",
|
||||
"author": "Miguel"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "Importación de Archivos HTML",
|
||||
"name": "Obsidean : 01 : Importación de Archivos HTML",
|
||||
"description": "Este script procesa archivos HTML en un directorio y los convierte en un único archivo Markdown, extrayendo las imágenes a una carpeta de adjuntos y manteniendo los enlaces. También genera un índice al principio del archivo.",
|
||||
"version": "1.0",
|
||||
"author": "Miguel"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "Exportador de objetos de Tia Portal y procesador de CAx",
|
||||
"description": "Este conjunto de scripts exporta desde Tia Portal los objetos en fomarto XML y los objetos CAx. Luego se puede generar documentacion desde estos CAx de la periferia IO del PLC exportado.",
|
||||
"name": "Siemens-Tia : 02 : Exportador de objetos de Tia Portal en formato XML",
|
||||
"description": "Este conjunto de scripts exporta desde Tia Portal los objetos en fomarto XML.",
|
||||
"version": "1.0",
|
||||
"author": "Miguel"
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -5,5 +5,5 @@
|
|||
},
|
||||
"level2": {},
|
||||
"level3": {},
|
||||
"working_directory": "C:\\Trabajo\\SIDEL\\06 - E5.007363 - Modifica O&U - SAE196 (cip integrato)\\Reporte\\IOExport"
|
||||
"working_directory": "C:\\Trabajo\\SIDEL\\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\\Reporte\\SourceDoc\\SourceXML"
|
||||
}
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"x1.py": {
|
||||
"display_name": "1: Exportar Lógica desde TIA",
|
||||
"display_name": "1: Exportar Lógica desde TIA Portal",
|
||||
"short_description": "Exporta la lógica del PLC desde TIA Portal en archivos XML y SCL.",
|
||||
"long_description": "Este script utiliza TIA Portal Openness para exportar la lógica de un PLC en formato XML y SCL. Permite seleccionar un proyecto de TIA Portal y genera los archivos de exportación en el directorio configurado.\n***\n**Lógica Principal:**\n\n1. **Configuración:** Carga parámetros desde `ParamManagerScripts` (directorio de trabajo, versión de TIA Portal).\n2. **Selección de Proyecto:** Abre un cuadro de diálogo para seleccionar el archivo del proyecto de TIA Portal.\n3. **Conexión a TIA Portal:** Utiliza la API de TIA Openness para conectarse al portal y abrir el proyecto seleccionado.\n4. **Exportación:** Exporta la lógica del PLC en archivos XML y SCL al directorio configurado.\n5. **Cierre:** Cierra la conexión con TIA Portal al finalizar.",
|
||||
"hidden": false
|
||||
},
|
||||
"x4.py": {
|
||||
"display_name": "4: Exportar Referencias Cruzadas",
|
||||
"display_name": "2: Exportar Referencias Cruzadas desde Tia Portal",
|
||||
"short_description": "Script para exportar las referencias cruzadas",
|
||||
"long_description": "",
|
||||
"hidden": false
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
{
|
||||
"path": "C:\\Trabajo\\SIDEL\\06 - E5.007363 - Modifica O&U - SAE196 (cip integrato)\\Reporte\\IOExport",
|
||||
"path": "C:\\Trabajo\\SIDEL\\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\\Reporte\\SourceDoc\\SourceXML",
|
||||
"history": [
|
||||
"C:\\Trabajo\\SIDEL\\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\\Reporte\\SourceDoc\\SourceXML",
|
||||
"C:\\Trabajo\\SIDEL\\06 - E5.007363 - Modifica O&U - SAE196 (cip integrato)\\Reporte\\IOExport"
|
||||
]
|
||||
}
|
|
@ -0,0 +1,335 @@
|
|||
# S7 Parser Utilities - Technical Documentation
|
||||
|
||||
## Table of Contents
|
||||
1. [Introduction](#introduction)
|
||||
2. [Utility Functions](#utility-functions)
|
||||
- [find_working_directory](#find_working_directory)
|
||||
- [custom_json_serializer](#custom_json_serializer)
|
||||
- [flatten_db_structure](#flatten_db_structure)
|
||||
- [format_address_for_display](#format_address_for_display)
|
||||
- [access_by_hierarchy_path](#access_by_hierarchy_path)
|
||||
- [format_data_type_for_source](#format_data_type_for_source)
|
||||
3. [Usage Examples](#usage-examples)
|
||||
|
||||
## Introduction
|
||||
|
||||
This document provides technical documentation for the utility functions used in the S7 parser system. These functions facilitate working with Siemens S7 data structures, addressing, and serialization/deserialization processes.
|
||||
|
||||
## Utility Functions
|
||||
|
||||
### find_working_directory
|
||||
|
||||
#### Purpose
|
||||
Retrieves the configured working directory from the script configuration.
|
||||
|
||||
#### Signature
|
||||
```python
|
||||
def find_working_directory() -> str:
|
||||
```
|
||||
|
||||
#### Returns
|
||||
- `str`: The absolute path to the configured working directory.
|
||||
|
||||
#### Behavior
|
||||
1. Loads the configuration using `load_configuration()`
|
||||
2. Retrieves the "working_directory" setting
|
||||
3. Exits with error if no working directory is specified
|
||||
4. Returns the working directory path
|
||||
|
||||
#### Usage Example
|
||||
```python
|
||||
working_dir = find_working_directory()
|
||||
print(f"Using working directory: {working_dir}")
|
||||
```
|
||||
|
||||
#### Notes
|
||||
- This function depends on `load_configuration()` from `backend.script_utils`
|
||||
- The working directory should be defined in the script configuration JSON file
|
||||
|
||||
---
|
||||
|
||||
### custom_json_serializer
|
||||
|
||||
#### Purpose
|
||||
Custom JSON serializer that handles S7 data structures that are not natively JSON-serializable.
|
||||
|
||||
#### Signature
|
||||
```python
|
||||
def custom_json_serializer(obj: Any) -> Any:
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
- `obj` (Any): Object to be serialized to JSON
|
||||
|
||||
#### Returns
|
||||
- Serializable version of the object compatible with JSON format
|
||||
|
||||
#### Behavior
|
||||
1. Ignores `OffsetContext` objects (returns `None`)
|
||||
2. Converts `ArrayDimension` objects to dictionaries
|
||||
3. Handles `VariableInfo` and other objects with `__dict__` attribute
|
||||
4. Properly manages special properties like `current_element_values`
|
||||
5. Raises `TypeError` for objects that cannot be serialized
|
||||
|
||||
#### Usage Example
|
||||
```python
|
||||
# Serialize a parsed S7 object to JSON
|
||||
json_output = json.dumps(parsed_result, default=custom_json_serializer, indent=2)
|
||||
with open("output.json", "w", encoding='utf-8') as f:
|
||||
f.write(json_output)
|
||||
```
|
||||
|
||||
#### Notes
|
||||
- Removes empty lists and None values to keep JSON output clean
|
||||
- Special handling for `is_udt_expanded_member` flag
|
||||
- Preserves format of array element values with their offsets
|
||||
|
||||
---
|
||||
|
||||
### flatten_db_structure
|
||||
|
||||
#### Purpose
|
||||
Completely flattens a hierarchical DB/UDT structure, expanding all nested variables, UDTs, and array elements for easier processing.
|
||||
|
||||
#### Signature
|
||||
```python
|
||||
def flatten_db_structure(db_info: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
- `db_info` (Dict): The DB or UDT structure to flatten
|
||||
|
||||
#### Returns
|
||||
- `List[Dict]`: A list of flattened variables with full paths and hierarchy path references
|
||||
|
||||
#### Behavior
|
||||
1. Recursively processes the hierarchical structure of a DB/UDT
|
||||
2. Expands nested structures, UDT instances, and array elements
|
||||
3. Maintains offset information and creates full path strings
|
||||
4. Adds hierarchical path pointers for direct access to original structure
|
||||
5. Assigns element type designations based on variable characteristics
|
||||
6. Returns a list sorted by byte.bit offset for consistent ordering
|
||||
|
||||
#### Key Properties Added to Each Variable
|
||||
- `full_path`: Complete path to the variable (e.g., `"MyStruct.MyArray[1]"`)
|
||||
- `is_array_element`: Boolean indicating if this is an array element
|
||||
- `element_type`: Type classification ("SIMPLE_VAR", "ARRAY", "ARRAY_ELEMENT", "STRUCT", "UDT_INSTANCE")
|
||||
- `address_display`: Formatted display version of the address
|
||||
- `_hierarchy_path`: Internal path references for direct access to the original structure
|
||||
- `_array_index`: For array elements, stores the index for direct access
|
||||
|
||||
#### Usage Example
|
||||
```python
|
||||
# Get a flattened view of a DB
|
||||
flat_variables = flatten_db_structure(my_db)
|
||||
|
||||
# Find variables with specific element types
|
||||
simple_vars = [var for var in flat_variables if var.get("element_type") == "SIMPLE_VAR"]
|
||||
|
||||
# Create an offset-to-variable map for quick lookup
|
||||
variables_by_offset = {var["byte_offset"]: var for var in flat_variables}
|
||||
```
|
||||
|
||||
#### Notes
|
||||
- Prevents duplicate processing of expanded UDT members
|
||||
- Handles array elements including their specific offsets
|
||||
- Preserves all original attributes of each variable
|
||||
- Critical for functions that need to process or match variables by offset
|
||||
|
||||
---
|
||||
|
||||
### format_address_for_display
|
||||
|
||||
#### Purpose
|
||||
Formats byte offsets into readable S7 address notation, correctly handling bit addresses for BOOL types.
|
||||
|
||||
#### Signature
|
||||
```python
|
||||
def format_address_for_display(byte_offset: float, bit_size: int = 0) -> str:
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
- `byte_offset` (float): The byte offset with the decimal part representing the bit offset
|
||||
- `bit_size` (int, optional): Size in bits (>0 for BOOL types). Defaults to 0.
|
||||
|
||||
#### Returns
|
||||
- `str`: A formatted address string in S7 notation
|
||||
|
||||
#### Behavior
|
||||
1. For bit variables (BOOLs), formats as "X.Y" where X is the byte and Y is the bit
|
||||
2. For byte-aligned variables, returns the byte number as an integer
|
||||
3. Preserves decimal notation only when necessary
|
||||
|
||||
#### Usage Example
|
||||
```python
|
||||
# Format a BOOL address
|
||||
bool_address = format_address_for_display(12.5, 1) # Returns "12.5"
|
||||
|
||||
# Format a regular variable address
|
||||
int_address = format_address_for_display(24.0, 0) # Returns "24"
|
||||
```
|
||||
|
||||
#### Notes
|
||||
- Particularly important for BOOL variables and array elements
|
||||
- Converts integer byte offsets (e.g., 10.0) to simple integers ("10")
|
||||
- The bit part of BOOL addresses is rounded to account for potential floating-point imprecision
|
||||
|
||||
---
|
||||
|
||||
### access_by_hierarchy_path
|
||||
|
||||
#### Purpose
|
||||
Directly accesses a variable in the hierarchical structure using the hierarchy path references created by `flatten_db_structure`.
|
||||
|
||||
#### Signature
|
||||
```python
|
||||
def access_by_hierarchy_path(root_obj: Dict[str, Any], hierarchy_path: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
- `root_obj` (Dict): The root object (usually a DB) containing the hierarchy
|
||||
- `hierarchy_path` (List[Dict]): List of hierarchical steps to navigate
|
||||
|
||||
#### Returns
|
||||
- The variable object if found, `None` if not accessible
|
||||
|
||||
#### Behavior
|
||||
1. Starts at the root object provided
|
||||
2. Follows each step in the hierarchy path
|
||||
3. Each step contains a type ("members" or "children") and an index
|
||||
4. Returns the object found at the end of the path
|
||||
5. Returns None if any step in the path is invalid
|
||||
|
||||
#### Usage Example
|
||||
```python
|
||||
# Get flattened variables with hierarchy paths
|
||||
flat_vars = flatten_db_structure(my_db)
|
||||
|
||||
# Find a specific variable
|
||||
target_var = next(var for var in flat_vars if var["full_path"] == "MyStruct.MyField")
|
||||
|
||||
# Access the original variable directly in the hierarchy
|
||||
original_var = access_by_hierarchy_path(my_db, target_var["_hierarchy_path"])
|
||||
|
||||
# Now you can modify the original variable
|
||||
if original_var:
|
||||
original_var["current_value"] = "New Value"
|
||||
```
|
||||
|
||||
#### Notes
|
||||
- Critical for updating values in the original structure
|
||||
- Much more efficient than manually traversing the hierarchy
|
||||
- Works because hierarchy paths are preserved across deep copies
|
||||
- Provides error checking for invalid paths
|
||||
|
||||
---
|
||||
|
||||
### format_data_type_for_source
|
||||
|
||||
#### Purpose
|
||||
Formats a variable's data type information as it would appear in S7 source code.
|
||||
|
||||
#### Signature
|
||||
```python
|
||||
def format_data_type_for_source(var_info: Dict[str, Any]) -> str:
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
- `var_info` (Dict): Variable information dictionary
|
||||
|
||||
#### Returns
|
||||
- `str`: Formatted data type string as it would appear in S7 source code
|
||||
|
||||
#### Behavior
|
||||
1. Uses the UDT source name if available, otherwise uses the data type
|
||||
2. Adds ARRAY declarations with dimensions for array variables
|
||||
3. Adds size specifications for STRING types
|
||||
4. Returns the complete type declaration as it would appear in source code
|
||||
|
||||
#### Usage Example
|
||||
```python
|
||||
# Format a simple INT variable
|
||||
simple_var = {"data_type": "INT"}
|
||||
type_str = format_data_type_for_source(simple_var) # Returns "INT"
|
||||
|
||||
# Format an array of REALs
|
||||
array_var = {
|
||||
"data_type": "REAL",
|
||||
"array_dimensions": [{"lower_bound": 1, "upper_bound": 10}]
|
||||
}
|
||||
type_str = format_data_type_for_source(array_var) # Returns "ARRAY [1..10] OF REAL"
|
||||
|
||||
# Format a STRING variable
|
||||
string_var = {"data_type": "STRING", "string_length": 32}
|
||||
type_str = format_data_type_for_source(string_var) # Returns "STRING[32]"
|
||||
```
|
||||
|
||||
#### Notes
|
||||
- Essential for generating S7 source code declarations
|
||||
- Handles UDT references, arrays, and string length specifications
|
||||
- Properly formats complex types like arrays of UDTs or strings
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Complete Example: Processing and Updating a DB
|
||||
|
||||
```python
|
||||
# Load JSON data
|
||||
with open("my_db.json", "r", encoding="utf-8") as f:
|
||||
db_data = json.load(f)
|
||||
|
||||
# 1. Flatten the structure for easier processing
|
||||
flat_vars = flatten_db_structure(db_data["dbs"][0])
|
||||
|
||||
# 2. Display all variables with their addresses
|
||||
for var in flat_vars:
|
||||
address = var.get("address_display", format_address_for_display(var["byte_offset"], var.get("bit_size", 0)))
|
||||
data_type = format_data_type_for_source(var)
|
||||
print(f"{address}: {var['full_path']} ({data_type})")
|
||||
|
||||
# 3. Update a specific variable in the original structure
|
||||
target_var = next(var for var in flat_vars if var["full_path"] == "Settings.Timeout")
|
||||
original_var = access_by_hierarchy_path(db_data["dbs"][0], target_var["_hierarchy_path"])
|
||||
if original_var:
|
||||
original_var["current_value"] = "5000" # Update the value
|
||||
print(f"Updated {target_var['full_path']} to 5000")
|
||||
|
||||
# 4. Save the modified structure back to JSON
|
||||
with open("updated_db.json", "w", encoding="utf-8") as f:
|
||||
json.dump(db_data, f, default=custom_json_serializer, indent=2)
|
||||
```
|
||||
|
||||
### Example: Creating a Filtered View of Variables
|
||||
|
||||
```python
|
||||
# Load DB data
|
||||
working_dir = find_working_directory()
|
||||
input_file = os.path.join(working_dir, "my_db.json")
|
||||
with open(input_file, "r", encoding="utf-8") as f:
|
||||
db_data = json.load(f)
|
||||
|
||||
# Get flattened structure
|
||||
flat_vars = flatten_db_structure(db_data["dbs"][0])
|
||||
|
||||
# Filter by element type
|
||||
simple_vars = [var for var in flat_vars if var.get("element_type") == "SIMPLE_VAR"]
|
||||
array_elements = [var for var in flat_vars if var.get("element_type") == "ARRAY_ELEMENT"]
|
||||
structs = [var for var in flat_vars if var.get("element_type") == "STRUCT"]
|
||||
|
||||
# Create address-based lookup for quick access
|
||||
vars_by_address = {}
|
||||
for var in flat_vars:
|
||||
if var.get("element_type") in ["SIMPLE_VAR", "ARRAY_ELEMENT"]:
|
||||
vars_by_address[var["byte_offset"]] = var
|
||||
|
||||
# Look up a variable by address
|
||||
target_address = 24.0
|
||||
if target_address in vars_by_address:
|
||||
target_var = vars_by_address[target_address]
|
||||
print(f"Variable at address {target_address}: {target_var['full_path']}")
|
||||
|
||||
# Access and modify the original variable in the hierarchy
|
||||
original_var = access_by_hierarchy_path(db_data["dbs"][0], target_var["_hierarchy_path"])
|
||||
if original_var:
|
||||
print(f"Current value: {original_var.get('current_value')}")
|
||||
```
|
|
@ -1,808 +0,0 @@
|
|||
import re
|
||||
import os
|
||||
import pandas as pd
|
||||
import tkinter as tk
|
||||
from tkinter import filedialog
|
||||
import subprocess
|
||||
from openpyxl import Workbook, load_workbook
|
||||
from openpyxl.utils import get_column_letter
|
||||
from openpyxl.styles import Alignment, Font, Border, Side, PatternFill
|
||||
from copy import deepcopy
|
||||
|
||||
# Constantes para los tamaños de tipos de datos
|
||||
TYPE_SIZES = {
|
||||
"Byte": 1,
|
||||
"Char": 1,
|
||||
"Int": 2,
|
||||
"DInt": 4,
|
||||
"Word": 2,
|
||||
"DWord": 4,
|
||||
"Real": 4,
|
||||
"Date": 2,
|
||||
"Time": 4,
|
||||
"Time_Of_Day": 4,
|
||||
"S5Time": 2,
|
||||
"Bool": 0.125, # 1 bit, normalmente agrupado en 8 bits = 1 Byte
|
||||
"String": 256, # String[256] serían 258 bytes (256 caracteres + 2 para longitud)
|
||||
"WString": 512,
|
||||
"LReal": 8, # Punto flotante de doble precisión
|
||||
"UDInt": 4, # Entero sin signo de 32 bits
|
||||
"USInt": 1, # Entero sin signo de 8 bits (Byte)
|
||||
"UInt": 2, # Entero sin signo de 16 bits (Word)
|
||||
"ULInt": 8, # Entero sin signo de 64 bits (Doble DWord)
|
||||
"LWord": 8, # Entero sin signo de 64 bits (Doble DWord)
|
||||
"LInt": 8, # Entero con signo de 64 bits
|
||||
"Date_And_Time": 8, # Fecha y hora combinadas, 8 bytes
|
||||
"DTL": 12, # Date and time long (fecha, hora y precisión a microsegundos, 12 bytes)
|
||||
}
|
||||
|
||||
#=================================
|
||||
# FUNCIONES DE PARSEO DE ARCHIVOS
|
||||
#=================================
|
||||
|
||||
def clean_line(line):
|
||||
"""Limpia la línea de BOM y espacios o comillas extra."""
|
||||
# Elimina UTF-8 BOM si existe y elimina espacios iniciales/finales
|
||||
line = line.replace("\ufeff", "").strip()
|
||||
# Estandariza las definiciones TYPE y DATA_BLOCK
|
||||
line = re.sub(r'\s*TYPE\s+"?', 'TYPE "', line)
|
||||
line = re.sub(r'\s*DATA_BLOCK\s+"?', 'DATA_BLOCK "', line)
|
||||
line = remove_text_inside_brackets(line)
|
||||
return line
|
||||
|
||||
def remove_text_inside_brackets(text):
|
||||
"""Elimina texto dentro de corchetes."""
|
||||
pattern = r"\{.*?\}"
|
||||
cleaned_text = re.sub(pattern, '', text)
|
||||
return cleaned_text
|
||||
|
||||
def extract_name(line):
|
||||
"""Extrae el nombre de una línea de definición TYPE o DATA_BLOCK."""
|
||||
# Intenta encontrar un nombre entrecomillado primero
|
||||
match = re.search(r'(TYPE|DATA_BLOCK)\s+"([^"]+)"', line)
|
||||
if match:
|
||||
return match.group(2).strip() # El nombre está entre comillas
|
||||
|
||||
# Si no hay nombre entrecomillado, busca un nombre sin comillas
|
||||
match = re.search(r"(TYPE|DATA_BLOCK)\s+(\S+)", line)
|
||||
if match:
|
||||
return match.group(2).strip() # El nombre está sin comillas
|
||||
|
||||
def parse_udts(lines):
|
||||
"""Parsea User Defined Types de las líneas de código."""
|
||||
udt_json = {}
|
||||
udt_name = None
|
||||
nested_structs = []
|
||||
current_struct = None
|
||||
is_within_struct = False
|
||||
|
||||
for line in lines:
|
||||
line = clean_line(line)
|
||||
if "TYPE" in line and "END_TYPE" not in line:
|
||||
udt_name = extract_name(line)
|
||||
udt_json[udt_name] = {}
|
||||
current_struct = udt_json[udt_name]
|
||||
print(f"Creado UDT: {udt_name}")
|
||||
elif "END_TYPE" in line:
|
||||
print(f"Completado UDT: {udt_name}")
|
||||
udt_name = None
|
||||
nested_structs = []
|
||||
current_struct = None
|
||||
is_within_struct = False
|
||||
elif "STRUCT" in line and "END_STRUCT" not in line and udt_name is not None:
|
||||
struct_name = (
|
||||
"Struct" if "STRUCT" == line.strip() else line.split(":")[0].strip()
|
||||
)
|
||||
new_struct = {}
|
||||
current_struct[struct_name] = new_struct
|
||||
nested_structs.append(current_struct)
|
||||
current_struct = new_struct
|
||||
is_within_struct = True
|
||||
print(f"Creado STRUCT: {struct_name}")
|
||||
elif "END_STRUCT" in line and udt_name is not None:
|
||||
current_struct = nested_structs.pop() if nested_structs else None
|
||||
is_within_struct = bool(nested_structs)
|
||||
print(f"Cerrado STRUCT en UDT '{udt_name}'")
|
||||
elif udt_name and ":" in line and is_within_struct:
|
||||
parts = line.split(":")
|
||||
field_name = parts[0].strip()
|
||||
field_details = parts[1].strip().split("//")
|
||||
field_type = (
|
||||
field_details[0].replace(";", "").strip()
|
||||
) # Eliminando ';' del tipo de campo
|
||||
field_comment = parts[1].split("//")[1].strip() if "//" in parts[1] else ""
|
||||
if "Struct" in field_type:
|
||||
new_struct = {}
|
||||
current_struct[field_name] = new_struct
|
||||
nested_structs.append(current_struct)
|
||||
current_struct = new_struct
|
||||
print(f"Abierto STRUCT en línea en el campo '{field_name}'")
|
||||
else:
|
||||
current_struct[field_name] = {
|
||||
"type": field_type,
|
||||
"comment": field_comment,
|
||||
}
|
||||
print(
|
||||
f"Añadido campo '{field_name}' a STRUCT: Tipo={field_type}, Comentario={field_comment}"
|
||||
)
|
||||
|
||||
return udt_json
|
||||
|
||||
def parse_dbs(lines, udts):
|
||||
"""Parsea Data Blocks de las líneas de código."""
|
||||
db_json = {}
|
||||
db_name = None
|
||||
nested_structs = []
|
||||
current_struct = None
|
||||
is_within_struct = False
|
||||
|
||||
for line in lines:
|
||||
line = clean_line(line)
|
||||
if "DATA_BLOCK" in line and "END_DATA_BLOCK" not in line:
|
||||
db_name = extract_name(line)
|
||||
db_json[db_name] = {}
|
||||
current_struct = db_json[db_name]
|
||||
print(f"Creado DATA_BLOCK: {db_name}")
|
||||
elif "END_DATA_BLOCK" in line:
|
||||
print(f"Completado DATA_BLOCK: {db_name}")
|
||||
db_name = None
|
||||
nested_structs = []
|
||||
current_struct = None
|
||||
is_within_struct = False
|
||||
elif "STRUCT" in line and "END_STRUCT" not in line and db_name is not None:
|
||||
struct_name = (
|
||||
"Struct" if "STRUCT" == line.strip() else line.split(":")[0].strip()
|
||||
)
|
||||
new_struct = {}
|
||||
current_struct[struct_name] = new_struct
|
||||
nested_structs.append(current_struct)
|
||||
current_struct = new_struct
|
||||
is_within_struct = True
|
||||
print(f"Creado STRUCT en DB '{db_name}': {struct_name}")
|
||||
elif "END_STRUCT" in line and db_name is not None:
|
||||
current_struct = nested_structs.pop() if nested_structs else None
|
||||
is_within_struct = bool(nested_structs)
|
||||
print(f"Cerrado STRUCT en DB '{db_name}'")
|
||||
elif db_name and ":" in line and is_within_struct:
|
||||
parts = line.split(":")
|
||||
field_name = parts[0].strip()
|
||||
field_details = parts[1].strip().split("//")
|
||||
field_type = (
|
||||
field_details[0].replace(";", "").strip()
|
||||
) # Eliminando ';' del tipo de campo
|
||||
field_comment = parts[1].split("//")[1].strip() if "//" in parts[1] else ""
|
||||
if "Struct" in field_type:
|
||||
new_struct = {}
|
||||
current_struct[field_name] = new_struct
|
||||
nested_structs.append(current_struct)
|
||||
current_struct = new_struct
|
||||
print(f"Abierto STRUCT en línea en el campo '{field_name}' en DB '{db_name}'")
|
||||
else:
|
||||
current_struct[field_name] = {
|
||||
"type": field_type,
|
||||
"comment": field_comment,
|
||||
}
|
||||
print(
|
||||
f"Añadido campo '{field_name}' a STRUCT en DB '{db_name}': Tipo={field_type}, Comentario={field_comment}"
|
||||
)
|
||||
|
||||
return db_json
|
||||
|
||||
#=================================
|
||||
# FUNCIONES DE EXPANSIÓN DE UDT
|
||||
#=================================
|
||||
|
||||
def expand_udt_references(db_struct, udts):
|
||||
"""
|
||||
Expande recursivamente las referencias UDT en la estructura DB utilizando las definiciones UDT.
|
||||
"""
|
||||
if isinstance(db_struct, dict):
|
||||
for key, value in list(db_struct.items()):
|
||||
if isinstance(value, dict):
|
||||
# Recursión en diccionarios
|
||||
expand_udt_references(value, udts)
|
||||
elif isinstance(value, str) and key == "type": # Solo expande campos 'type'
|
||||
type_name = value.strip(
|
||||
'"'
|
||||
) # Elimina comillas que pueden envolver nombres UDT con espacios
|
||||
if type_name in udts:
|
||||
# Reemplaza la referencia UDT con su definición copiada en profundidad
|
||||
db_struct["is_udt_definition"] = True
|
||||
db_struct["fields"] = deepcopy(udts[type_name])
|
||||
|
||||
print(f"Expandido UDT '{type_name}' en el campo '{key}'")
|
||||
elif isinstance(db_struct, list):
|
||||
for item in db_struct:
|
||||
expand_udt_references(item, udts)
|
||||
|
||||
def handle_array_types(db_struct):
|
||||
"""
|
||||
Maneja tipos de arrays para expandirlos en múltiples campos como sub-elementos.
|
||||
Esta función procesa completamente los arrays expandiéndolos en elementos individuales.
|
||||
"""
|
||||
if isinstance(db_struct, dict):
|
||||
# Lista para almacenar nuevas entradas de array a agregar
|
||||
new_entries = {}
|
||||
# Lista de claves a eliminar después de procesar
|
||||
keys_to_remove = []
|
||||
|
||||
for key, value in list(db_struct.items()):
|
||||
if isinstance(value, dict):
|
||||
# Procesa recursivamente diccionarios anidados
|
||||
handle_array_types(value)
|
||||
|
||||
# Verificar si es un tipo array
|
||||
if "type" in value and isinstance(value["type"], str):
|
||||
array_match = re.match(r"ARRAY\s*\[(\d+)\s*\.\.\s*(\d+)\]\s*OF\s*(\w+)", value["type"], re.IGNORECASE)
|
||||
if array_match:
|
||||
lower_bound = int(array_match.group(1))
|
||||
upper_bound = int(array_match.group(2))
|
||||
base_type = array_match.group(3).strip()
|
||||
comment = value.get("comment", "")
|
||||
|
||||
print(f"Expandiendo array '{key}': {lower_bound}..{upper_bound} of {base_type}")
|
||||
|
||||
# Marcar para eliminar la definición original después
|
||||
keys_to_remove.append(key)
|
||||
|
||||
# Crear entrada para la definición del array
|
||||
new_entries[key] = {
|
||||
"type": f"Array[{lower_bound}..{upper_bound}] of {base_type}",
|
||||
"comment": comment,
|
||||
"is_array_definition": True
|
||||
}
|
||||
|
||||
# Crear elementos individuales del array
|
||||
for i in range(lower_bound, upper_bound + 1):
|
||||
array_key = f"{key}[{i}]"
|
||||
new_entries[array_key] = {
|
||||
"type": base_type,
|
||||
"comment": comment,
|
||||
"is_array_element": True
|
||||
}
|
||||
|
||||
# Eliminar los originales y agregar los nuevos
|
||||
for key in keys_to_remove:
|
||||
if key in db_struct:
|
||||
del db_struct[key]
|
||||
|
||||
# Agregar las nuevas entradas
|
||||
db_struct.update(new_entries)
|
||||
|
||||
def expand_dbs(udts, dbs):
|
||||
"""
|
||||
Expande todas las referencias UDT en todos los DBs y luego maneja tipos de arrays.
|
||||
"""
|
||||
for db_name, db_content in dbs.items():
|
||||
print(f"Expandiendo DB: {db_name}")
|
||||
# Primero expandir las referencias UDT
|
||||
expand_udt_references(db_content, udts)
|
||||
|
||||
# Luego, manejar y expandir los tipos de arrays
|
||||
print(f"Procesando arrays en DB: {db_name}")
|
||||
handle_array_types(db_content)
|
||||
|
||||
print(f"Completada expansión para DB: {db_name}")
|
||||
|
||||
# Registrar el resultado de la expansión para depuración
|
||||
print("\nEstructura DB después de la expansión:")
|
||||
for db_name, db_content in dbs.items():
|
||||
print(f"DB: {db_name} - Número de campos: {count_fields(db_content)}")
|
||||
|
||||
def count_fields(struct):
|
||||
"""Función auxiliar para contar campos en una estructura."""
|
||||
count = 0
|
||||
if isinstance(struct, dict):
|
||||
for key, value in struct.items():
|
||||
if isinstance(value, dict):
|
||||
if "type" in value:
|
||||
count += 1
|
||||
else:
|
||||
count += count_fields(value)
|
||||
return count
|
||||
|
||||
#=================================
|
||||
# FUNCIONES DE CÁLCULO DE OFFSET
|
||||
#=================================
|
||||
|
||||
def calculate_plc_address(type_name, byte_offset):
|
||||
"""
|
||||
Calcula la notación de dirección PLC basada en el tipo y offset.
|
||||
"""
|
||||
byte_size = TYPE_SIZES.get(type_name, 0)
|
||||
bit_offset = int((byte_offset - int(byte_offset)) * 8)
|
||||
byte_offset = int(byte_offset)
|
||||
if type_name == "Bool":
|
||||
return f"DBX{byte_offset}.{bit_offset}" # Dirección para bits individuales
|
||||
elif type_name == "Byte":
|
||||
return f"DBB{byte_offset}" # Dirección para bytes individuales
|
||||
elif byte_size == 2:
|
||||
return f"DBW{byte_offset}" # Dirección para words de dos bytes
|
||||
elif byte_size == 4:
|
||||
return f"DBD{byte_offset}" # Dirección para double words de cuatro bytes
|
||||
else:
|
||||
return f"DBX{byte_offset}.0" # Por defecto a dirección de bit para tipos de más de 4 bytes
|
||||
|
||||
def calculate_plc_size(size):
|
||||
"""Calcula la representación del tamaño PLC."""
|
||||
byte_size = size
|
||||
bit_offset = int((size - int(size)) * 8)
|
||||
size = int(size)
|
||||
if bit_offset > 0:
|
||||
return f"{size}.{bit_offset}"
|
||||
else:
|
||||
return f"{size}"
|
||||
|
||||
class OffsetState:
|
||||
"""Clase para mantener el estado durante el cálculo de offset."""
|
||||
def __init__(self):
|
||||
self.last_key_was_bool = False
|
||||
self.last_bit_offset = 0 # Para rastrear offsets de bit dentro de un byte
|
||||
self.current_offset = 0
|
||||
|
||||
def calculate_offsets(value, state, field_name="unknown"):
|
||||
"""Calcula offsets de memoria para elementos DB."""
|
||||
type_name = value["type"].strip() # Eliminar espacios en blanco
|
||||
is_array_element = value.get("is_array_element", False)
|
||||
is_array_definition = value.get("array_definition", False)
|
||||
is_udt_definition = value.get("is_udt_definition", False)
|
||||
|
||||
# No calculamos offsets para definiciones de array, solo para sus elementos
|
||||
if is_array_definition:
|
||||
print(f"→ Definición de array '{field_name}': no calculando offset")
|
||||
return state
|
||||
|
||||
print(f"Calculando offset para '{field_name}' (Tipo: {type_name}, Offset actual: {state.current_offset})")
|
||||
|
||||
if state.last_key_was_bool:
|
||||
is_array_element = True
|
||||
size = 0
|
||||
|
||||
# Alineación a boundaries de datos
|
||||
if not is_array_element:
|
||||
if state.current_offset % 2 != 0:
|
||||
old_offset = state.current_offset
|
||||
state.current_offset += 1 # Alinea al siguiente offset par si no es elemento de array
|
||||
print(f" → Alineación: Offset ajustado de {old_offset} a {state.current_offset}")
|
||||
|
||||
# Ajustando tamaños Bool basados en agrupación
|
||||
if type_name.upper() == "BOOL":
|
||||
state.last_key_was_bool = True
|
||||
size += 1 / 8
|
||||
print(f" → Tipo Bool detectado: usando {size} bytes")
|
||||
|
||||
else:
|
||||
if state.last_key_was_bool: # Después de bools
|
||||
state.last_key_was_bool = False # No es Bool
|
||||
if (
|
||||
state.last_bit_offset > 0
|
||||
or int(state.current_offset) != state.current_offset
|
||||
):
|
||||
state.last_bit_offset = 0
|
||||
old_offset = state.current_offset
|
||||
state.current_offset = int(state.current_offset) + 1
|
||||
print(f" → Post-Bool: Ajustando offset de {old_offset} a {state.current_offset}")
|
||||
|
||||
if state.current_offset % 2 != 0:
|
||||
old_offset = state.current_offset
|
||||
state.current_offset += 1 # Alinea al siguiente offset par
|
||||
print(f" → Post-Bool: Alineación a par: {old_offset} → {state.current_offset}")
|
||||
|
||||
# Manejo especial para tipos String
|
||||
if type_name.upper().startswith("STRING"):
|
||||
match = re.match(r"String\[(\d+)\]", type_name, re.IGNORECASE)
|
||||
state.last_bit_offset = 0
|
||||
if match:
|
||||
length = int(match.group(1))
|
||||
size = length + 2 # Cuenta para terminación nula y prefijo de longitud de cadena
|
||||
print(f" → String[{length}] detectado: usando {size} bytes")
|
||||
else:
|
||||
size = TYPE_SIZES.get("String", 0) # Tamaño estándar para strings
|
||||
print(f" → String genérico detectado: usando {size} bytes")
|
||||
|
||||
else: # Otros Tipos de Datos
|
||||
# Buscar el tipo ignorando mayúsculas/minúsculas
|
||||
type_upper = type_name.upper()
|
||||
type_size = None
|
||||
for key, value_size in TYPE_SIZES.items():
|
||||
if key.upper() == type_upper:
|
||||
type_size = value_size
|
||||
break
|
||||
|
||||
if type_size is not None:
|
||||
size = type_size
|
||||
print(f" → Tipo {type_name} encontrado: usando {size} bytes")
|
||||
else:
|
||||
print(f" → ADVERTENCIA: Tipo {type_name} no reconocido directamente")
|
||||
# Para arrays, manejo especial
|
||||
if "ARRAY" in type_upper:
|
||||
print(f" → Array detectado pero no se procesa directamente aquí")
|
||||
size = 0 # Los arrays se procesarán elemento por elemento
|
||||
else:
|
||||
size = 2 # Asumimos INT por defecto
|
||||
print(f" → Asumiendo tamaño de 2 bytes como valor predeterminado para {type_name}")
|
||||
|
||||
if size == 0 and not is_array_definition and not is_udt_definition:
|
||||
print(f"⚠️ ADVERTENCIA: Tipo '{type_name}' tiene tamaño cero. Asumiendo 2 bytes.")
|
||||
size = 2 # Tamaño mínimo
|
||||
|
||||
# Calcular dirección PLC
|
||||
plc_address = calculate_plc_address(type_name, state.current_offset)
|
||||
value["offset"] = state.current_offset
|
||||
value["plc_address"] = plc_address # Almacena la dirección PLC calculada
|
||||
value["size"] = calculate_plc_size(size)
|
||||
|
||||
# Actualizar offset y mostrar resultado
|
||||
old_offset = state.current_offset
|
||||
state.current_offset += size
|
||||
print(f" → Resultado: Dirección={plc_address}, Tamaño={size}, Nuevo offset={state.current_offset}")
|
||||
|
||||
return state
|
||||
|
||||
def collect_data_for_table(db_struct, offset_state, level=0, parent_prefix="", collected_data=None, relative_offset=0):
|
||||
"""
|
||||
Recoge datos recursivamente de la estructura DB para mostrar en formato tabular.
|
||||
Añade soporte para offsets relativos dentro de estructuras.
|
||||
"""
|
||||
if collected_data is None:
|
||||
collected_data = []
|
||||
|
||||
is_array_element = False
|
||||
increase_level = 0
|
||||
current_struct_base = offset_state.current_offset
|
||||
|
||||
if isinstance(db_struct, dict):
|
||||
for key, value in db_struct.items():
|
||||
# Omite claves 'fields' y 'Struct' en la ruta de nombre
|
||||
if key == "fields" or key == "Struct":
|
||||
next_prefix = parent_prefix # Continúa con el prefijo actual
|
||||
collect_data_for_table(value, offset_state, level, next_prefix, collected_data, relative_offset)
|
||||
continue
|
||||
|
||||
# Determinar el prefijo de nombre para este elemento
|
||||
if isinstance(value, dict):
|
||||
is_array_element = value.get("is_array_element", False)
|
||||
is_array_definition = value.get("array_definition", False)
|
||||
|
||||
# Construir el nombre del campo
|
||||
if not is_array_element:
|
||||
next_prefix = f"{parent_prefix}.{key}" if parent_prefix else key
|
||||
else:
|
||||
next_prefix = f"{parent_prefix}{key}" if parent_prefix else key
|
||||
|
||||
# Si es una definición de array, añadirla a la tabla sin calcular offset
|
||||
if isinstance(value, dict) and value.get("array_definition", False):
|
||||
field_data = {
|
||||
"Nombre": next_prefix,
|
||||
"Tipo": value.get("type", "N/A"),
|
||||
"Offset": relative_offset,
|
||||
"Dirección PLC": "N/A",
|
||||
"Comentario": value.get("comment", ""),
|
||||
}
|
||||
collected_data.append(field_data)
|
||||
print(f"✓ Añadida definición de array: {next_prefix} - (sin dirección)")
|
||||
|
||||
# No incrementar offset para definiciones de array, continuar con siguiente elemento
|
||||
continue
|
||||
|
||||
# Procesar campo normal con tipo
|
||||
if isinstance(value, dict) and "type" in value:
|
||||
# Calcular offset si no es una definición de array
|
||||
if not value.get("array_definition", False):
|
||||
# Pasar el nombre del campo para mejorar los logs
|
||||
offset_state = calculate_offsets(value, offset_state, field_name=next_prefix)
|
||||
|
||||
# Calcular offset relativo si es necesario
|
||||
element_relative_offset = value.get("offset", 0) - current_struct_base
|
||||
if is_array_element:
|
||||
element_relative_offset = value.get("offset", 0) - offset_state.current_offset + relative_offset
|
||||
|
||||
field_data = {
|
||||
"Nombre": next_prefix,
|
||||
"Tipo": value.get("type", "N/A"),
|
||||
"Offset": element_relative_offset if is_array_element else value.get("offset", 0),
|
||||
"Dirección PLC": value.get("plc_address", "N/A"),
|
||||
"Comentario": value.get("comment", ""),
|
||||
}
|
||||
collected_data.append(field_data)
|
||||
increase_level = 1
|
||||
print(f"✓ Añadido a tabla: {next_prefix} - {value.get('plc_address', 'N/A')}")
|
||||
|
||||
# Maneja recursivamente diccionarios y listas anidados
|
||||
if isinstance(value, dict) and not "type" in value:
|
||||
new_relative = offset_state.current_offset if not is_array_element else relative_offset
|
||||
collect_data_for_table(
|
||||
value,
|
||||
offset_state,
|
||||
level + increase_level,
|
||||
next_prefix,
|
||||
collected_data,
|
||||
new_relative
|
||||
)
|
||||
elif isinstance(db_struct, list):
|
||||
for index, item in enumerate(db_struct):
|
||||
item_prefix = f"{parent_prefix}[{index}]" if parent_prefix else f"[{index}]"
|
||||
collect_data_for_table(
|
||||
item, offset_state, level + increase_level, item_prefix, collected_data, relative_offset
|
||||
)
|
||||
|
||||
return collected_data
|
||||
|
||||
def initiate_conversion_to_table(db_struct):
|
||||
"""Inicia el proceso de conversión con un estado de offset nuevo."""
|
||||
offset_state = OffsetState()
|
||||
return collect_data_for_table(db_struct, offset_state)
|
||||
|
||||
def convert_to_table(dbs):
|
||||
"""
|
||||
Convierte los datos DB recogidos en un DataFrame de pandas.
|
||||
"""
|
||||
all_data = []
|
||||
for db_name, db_content in dbs.items():
|
||||
print(f"Procesando DB: {db_name}")
|
||||
db_data = initiate_conversion_to_table(db_content)
|
||||
all_data.extend(db_data)
|
||||
|
||||
df = pd.DataFrame(all_data)
|
||||
# Reordenar las columnas al formato deseado
|
||||
if not df.empty and all(col in df.columns for col in ["Nombre", "Tipo", "Offset", "Dirección PLC", "Comentario"]):
|
||||
df = df[["Nombre", "Tipo", "Offset", "Dirección PLC", "Comentario"]]
|
||||
return df
|
||||
|
||||
#=================================
|
||||
# FUNCIONES DE EXCEL
|
||||
#=================================
|
||||
|
||||
def format_excel_worksheet(worksheet):
|
||||
"""
|
||||
Formatea la hoja de cálculo de Excel con estilos profesionales.
|
||||
"""
|
||||
# Definir estilos
|
||||
header_font = Font(name='Arial', size=11, bold=True, color="FFFFFF")
|
||||
header_fill = PatternFill(start_color="4472C4", end_color="4472C4", fill_type="solid")
|
||||
array_fill = PatternFill(start_color="E2EFDA", end_color="E2EFDA", fill_type="solid")
|
||||
struct_fill = PatternFill(start_color="DEEBF7", end_color="DEEBF7", fill_type="solid")
|
||||
thin_border = Border(
|
||||
left=Side(style='thin'),
|
||||
right=Side(style='thin'),
|
||||
top=Side(style='thin'),
|
||||
bottom=Side(style='thin')
|
||||
)
|
||||
|
||||
# Aplicar estilos a la fila de encabezado
|
||||
for cell in worksheet[1]:
|
||||
cell.font = header_font
|
||||
cell.fill = header_fill
|
||||
cell.alignment = Alignment(horizontal='center', vertical='center')
|
||||
cell.border = thin_border
|
||||
|
||||
# Obtener el número de filas y columnas
|
||||
max_row = worksheet.max_row
|
||||
max_col = worksheet.max_column
|
||||
|
||||
# Aplicar estilos a las filas de datos
|
||||
for row in range(2, max_row + 1):
|
||||
# Verificar si es una estructura o array
|
||||
cell_name = worksheet.cell(row=row, column=1).value if worksheet.cell(row=row, column=1).value else ""
|
||||
cell_type = worksheet.cell(row=row, column=2).value if worksheet.cell(row=row, column=2).value else ""
|
||||
|
||||
# Aplicar fondos especiales para estructuras y arrays
|
||||
is_struct = "Struct" in str(cell_type)
|
||||
is_array = "Array" in str(cell_type)
|
||||
|
||||
# Aplicar bordes y alineación a todas las celdas
|
||||
for col in range(1, max_col + 1):
|
||||
cell = worksheet.cell(row=row, column=col)
|
||||
cell.border = thin_border
|
||||
|
||||
# Aplicar color de fondo según el tipo
|
||||
if is_struct:
|
||||
cell.fill = struct_fill
|
||||
elif is_array:
|
||||
cell.fill = array_fill
|
||||
|
||||
# Centrar columnas numéricas
|
||||
if col in [3, 4]:
|
||||
cell.alignment = Alignment(horizontal='center')
|
||||
|
||||
# Ajustar ancho de columnas
|
||||
column_widths = {
|
||||
1: 30, # Nombre
|
||||
2: 15, # Tipo
|
||||
3: 10, # Offset
|
||||
4: 15, # Dirección PLC
|
||||
5: 30 # Comentario
|
||||
}
|
||||
|
||||
# Importamos get_column_letter directamente de openpyxl.utils en las importaciones del script
|
||||
for col_num, width in column_widths.items():
|
||||
if col_num <= max_col:
|
||||
worksheet.column_dimensions[get_column_letter(col_num)].width = width
|
||||
|
||||
# Congelar la fila del encabezado
|
||||
worksheet.freeze_panes = "A2"
|
||||
|
||||
def save_dataframe_to_excel(df, filename, sheet_name):
|
||||
"""
|
||||
Guarda el DataFrame proporcionado en un archivo Excel y lo formatea.
|
||||
"""
|
||||
# Guardar el DataFrame en Excel
|
||||
df.to_excel(filename, index=False, sheet_name=sheet_name)
|
||||
print(f"Datos guardados en {filename}")
|
||||
|
||||
# Abrir el archivo Excel guardado para aplicar formato
|
||||
workbook = load_workbook(filename)
|
||||
worksheet = workbook[sheet_name]
|
||||
|
||||
# Aplicar formato a la hoja de cálculo
|
||||
format_excel_worksheet(worksheet)
|
||||
|
||||
# Guardar el libro de trabajo formateado
|
||||
workbook.save(filename)
|
||||
print(f"Formato aplicado a {filename}")
|
||||
|
||||
return workbook, worksheet
|
||||
|
||||
#=================================
|
||||
# FUNCIONES DE UTILIDAD DE ARCHIVOS
|
||||
#=================================
|
||||
|
||||
def select_file():
|
||||
"""
|
||||
Abre un diálogo de archivo para seleccionar un archivo .db y devuelve la ruta del archivo seleccionado.
|
||||
"""
|
||||
root = tk.Tk()
|
||||
root.withdraw() # Oculta la ventana raíz de tkinter
|
||||
|
||||
# Abre el diálogo de archivo y devuelve la ruta del archivo seleccionado
|
||||
file_path = filedialog.askopenfilename(
|
||||
title="Selecciona un archivo .db",
|
||||
filetypes=(("Archivos DB", "*.db"), ("Todos los archivos", "*.*"))
|
||||
)
|
||||
return file_path
|
||||
|
||||
def extract_file_details(file_path):
|
||||
"""
|
||||
Extrae y devuelve el nombre del archivo sin extensión, la extensión del archivo y la ruta del archivo.
|
||||
"""
|
||||
# Extrae la ruta completa del directorio
|
||||
path_only = os.path.dirname(file_path)
|
||||
|
||||
# Extrae el nombre completo del archivo con extensión
|
||||
full_file_name = os.path.basename(file_path)
|
||||
|
||||
# Separa la extensión del nombre del archivo
|
||||
file_name_without_extension, file_extension = os.path.splitext(full_file_name)
|
||||
|
||||
return (file_name_without_extension, file_extension, path_only)
|
||||
|
||||
def build_file_path(base_path, file_name, extension):
|
||||
"""
|
||||
Construye una ruta de archivo completa dada una ruta base, un nombre de archivo y una extensión.
|
||||
"""
|
||||
# Asegúrese de que la extensión esté en el formato correcto (es decir, comience con un punto)
|
||||
if not extension.startswith('.'):
|
||||
extension = '.' + extension
|
||||
|
||||
# Separe el nombre base del archivo de su extensión si está presente
|
||||
file_name_without_extension, _ = os.path.splitext(file_name)
|
||||
|
||||
# Reconstruir el nombre del archivo con la extensión correcta
|
||||
file_name_corrected = file_name_without_extension + extension
|
||||
|
||||
# Construir la ruta completa del archivo
|
||||
full_path = os.path.join(base_path, file_name_corrected)
|
||||
|
||||
return full_path
|
||||
|
||||
def open_file_explorer(path):
|
||||
"""
|
||||
Abre el explorador de archivos en la ruta dada.
|
||||
"""
|
||||
# Normaliza la ruta para asegurarse de que esté en el formato correcto
|
||||
normalized_path = os.path.normpath(path)
|
||||
|
||||
# Comprueba si la ruta es un directorio o un archivo y formatea el comando en consecuencia
|
||||
if os.path.isdir(normalized_path):
|
||||
# Si es un directorio, usa el comando 'explorer' directamente
|
||||
command = f'explorer "{normalized_path}"'
|
||||
else:
|
||||
# Si es un archivo, usa el comando 'explorer /select,' para resaltar el archivo en su carpeta
|
||||
command = f'explorer /select,"{normalized_path}"'
|
||||
|
||||
# Ejecuta el comando usando subprocess.run
|
||||
subprocess.run(command, shell=True)
|
||||
|
||||
#=================================
|
||||
# FUNCIÓN PRINCIPAL
|
||||
#=================================
|
||||
|
||||
def main():
|
||||
"""
|
||||
Función principal para ejecutar la conversión de DB a Excel.
|
||||
"""
|
||||
print("==================================================")
|
||||
print(" Convertidor de DB a Excel para Siemens S7 PLC")
|
||||
print("==================================================\n")
|
||||
|
||||
# Seleccionar archivo
|
||||
print("Por favor, seleccione un archivo .db para procesar:")
|
||||
file_path = select_file()
|
||||
if not file_path: # No se seleccionó ningún archivo
|
||||
print("❌ No se seleccionó ningún archivo. Operación cancelada.")
|
||||
return
|
||||
|
||||
print(f"✓ Archivo seleccionado: {file_path}")
|
||||
|
||||
try:
|
||||
# Leer el contenido del archivo
|
||||
with open(file_path, "r", encoding="utf-8-sig") as file:
|
||||
lines = file.readlines()
|
||||
|
||||
# Extraer detalles del archivo
|
||||
file_name, extension, dest_path = extract_file_details(file_path)
|
||||
|
||||
# Crear ruta de salida
|
||||
excel_path = build_file_path(dest_path, file_name, "xlsx")
|
||||
log_path = build_file_path(dest_path, f"{file_name}_log", "txt")
|
||||
|
||||
print("\n▶ Iniciando procesamiento del archivo DB...")
|
||||
print(f" Nombre del archivo: {file_name}")
|
||||
print(f" Ruta de destino: {excel_path}")
|
||||
|
||||
# Configurar logging a archivo
|
||||
import sys
|
||||
import datetime
|
||||
original_stdout = sys.stdout
|
||||
log_file = open(log_path, 'w', encoding='utf-8')
|
||||
sys.stdout = log_file
|
||||
|
||||
print("=== LOG DE PROCESAMIENTO ===")
|
||||
print(f"Archivo procesado: {file_path}")
|
||||
print(f"Fecha de procesamiento: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
print("============================\n")
|
||||
|
||||
# Parsear UDTs y DBs
|
||||
print("\n=== PARSEANDO UDTs Y DBs ===")
|
||||
udt_json = parse_udts(lines)
|
||||
db_json = parse_dbs(lines, udt_json)
|
||||
|
||||
# Expandir DBs con definiciones UDT
|
||||
print("\n=== EXPANDIENDO ESTRUCTURAS ===")
|
||||
expand_dbs(udt_json, db_json)
|
||||
|
||||
# Convertir a tabla
|
||||
print("\n=== CALCULANDO OFFSETS Y DIRECCIONES ===")
|
||||
df = convert_to_table(db_json)
|
||||
|
||||
# Restaurar stdout para mostrar mensajes en la consola
|
||||
sys.stdout = original_stdout
|
||||
log_file.close()
|
||||
|
||||
# Guardar en Excel
|
||||
print("\n▶ Generando archivo Excel...")
|
||||
workbook, worksheet = save_dataframe_to_excel(df, excel_path, file_name)
|
||||
|
||||
# Mostrar resumen
|
||||
print("\n=== RESUMEN DE CONVERSIÓN ===")
|
||||
print(f"✓ Total de variables procesadas: {len(df)}")
|
||||
print(f"✓ Tamaño total del DB: {df['Offset'].max() if not df.empty else 0} bytes")
|
||||
print(f"✓ Archivo Excel generado: {excel_path}")
|
||||
print(f"✓ Archivo de log generado: {log_path}")
|
||||
|
||||
print("\n✅ ¡Conversión completada con éxito!")
|
||||
|
||||
# Abrir el archivo de salida en el Explorador
|
||||
print("\n▶ Abriendo el archivo Excel en el Explorador...")
|
||||
open_file_explorer(excel_path)
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ ERROR: Se produjo un error durante la conversión:")
|
||||
print(f" {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
print("\nPor favor, revise el archivo de entrada y vuelva a intentarlo.")
|
||||
|
||||
# Ejecutar la función principal cuando se ejecuta el script
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "S7 DB Utilities",
|
||||
"name": "Siemens-S7 : 01 : S7 DB Utilities",
|
||||
"description": "Utilidades para Trabajar con DBs de Siemens S7",
|
||||
"version": "1.0",
|
||||
"author": "Miguel"
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
--- Log de Ejecución: x4.py ---
|
||||
Grupo: S7_DB_Utils
|
||||
Directorio de Trabajo: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001
|
||||
Inicio: 2025-05-18 02:13:16
|
||||
Fin: 2025-05-18 02:13:16
|
||||
Duración: 0:00:00.162328
|
||||
Inicio: 2025-05-18 13:15:28
|
||||
Fin: 2025-05-18 13:15:28
|
||||
Duración: 0:00:00.188819
|
||||
Estado: SUCCESS (Código de Salida: 0)
|
||||
|
||||
--- SALIDA ESTÁNDAR (STDOUT) ---
|
||||
Using working directory: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001
|
||||
Los archivos de documentación generados se guardarán en: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation
|
||||
Archivos JSON encontrados para procesar: 2
|
||||
Archivos JSON encontrados para procesar: 3
|
||||
|
||||
--- Procesando archivo JSON: db1001_data.json ---
|
||||
Archivo JSON 'db1001_data.json' cargado correctamente.
|
||||
|
@ -21,6 +21,11 @@ Archivo JSON 'db1001_format.json' cargado correctamente.
|
|||
Archivo S7 reconstruido generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_format.txt
|
||||
Archivo Markdown de documentación generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_format.md
|
||||
|
||||
--- Procesando archivo JSON: db1001_updated.json ---
|
||||
Archivo JSON 'db1001_updated.json' cargado correctamente.
|
||||
Archivo S7 reconstruido generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_updated.txt
|
||||
Archivo Markdown de documentación generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_updated.md
|
||||
|
||||
--- Proceso de generación de documentación completado ---
|
||||
|
||||
--- ERRORES (STDERR) ---
|
||||
|
|
|
@ -1,25 +1,30 @@
|
|||
--- Log de Ejecución: x6.py ---
|
||||
Grupo: S7_DB_Utils
|
||||
Directorio de Trabajo: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001
|
||||
Inicio: 2025-05-18 02:20:21
|
||||
Fin: 2025-05-18 02:20:22
|
||||
Duración: 0:00:01.130771
|
||||
Inicio: 2025-05-18 12:06:45
|
||||
Fin: 2025-05-18 12:06:46
|
||||
Duración: 0:00:00.564906
|
||||
Estado: SUCCESS (Código de Salida: 0)
|
||||
|
||||
--- SALIDA ESTÁNDAR (STDOUT) ---
|
||||
Using working directory: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001
|
||||
Los archivos Excel de documentación se guardarán en: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation
|
||||
Archivos JSON encontrados para procesar: 2
|
||||
Archivos JSON encontrados para procesar: 3
|
||||
|
||||
--- Procesando archivo JSON para Excel: db1001_data.json ---
|
||||
Archivo JSON 'db1001_data.json' cargado correctamente.
|
||||
Generando documentación Excel para DB: 'HMI_Blender_Parameters' (desde db1001_data.json) -> C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_data.json_HMI_Blender_Parameters.xlsx
|
||||
Excel documentation generated: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_data.json_HMI_Blender_Parameters.xlsx
|
||||
Generando documentación Excel para DB: 'HMI_Blender_Parameters' (desde db1001_data.json) -> C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_data.json.xlsx
|
||||
Excel documentation generated: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_data.json.xlsx
|
||||
|
||||
--- Procesando archivo JSON para Excel: db1001_format.json ---
|
||||
Archivo JSON 'db1001_format.json' cargado correctamente.
|
||||
Generando documentación Excel para DB: 'HMI_Blender_Parameters' (desde db1001_format.json) -> C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_format.json_HMI_Blender_Parameters.xlsx
|
||||
Excel documentation generated: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_format.json_HMI_Blender_Parameters.xlsx
|
||||
Generando documentación Excel para DB: 'HMI_Blender_Parameters' (desde db1001_format.json) -> C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_format.json.xlsx
|
||||
Excel documentation generated: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_format.json.xlsx
|
||||
|
||||
--- Procesando archivo JSON para Excel: db1001_updated.json ---
|
||||
Archivo JSON 'db1001_updated.json' cargado correctamente.
|
||||
Generando documentación Excel para DB: 'HMI_Blender_Parameters' (desde db1001_updated.json) -> C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_updated.json.xlsx
|
||||
Excel documentation generated: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_updated.json.xlsx
|
||||
|
||||
--- Proceso de generación de documentación Excel completado ---
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
--- Log de Ejecución: x7_value_updater.py ---
|
||||
Grupo: S7_DB_Utils
|
||||
Directorio de Trabajo: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001
|
||||
Inicio: 2025-05-18 02:56:24
|
||||
Fin: 2025-05-18 02:56:25
|
||||
Duración: 0:00:00.761362
|
||||
Inicio: 2025-05-18 15:48:00
|
||||
Fin: 2025-05-18 15:48:01
|
||||
Duración: 0:00:00.811974
|
||||
Estado: SUCCESS (Código de Salida: 0)
|
||||
|
||||
--- SALIDA ESTÁNDAR (STDOUT) ---
|
||||
|
@ -13,19 +13,21 @@ Los archivos de documentación se guardarán en: C:\Trabajo\SIDEL\09 - SAE452 -
|
|||
Se encontraron 1 pares de archivos para procesar.
|
||||
|
||||
--- Procesando par de archivos ---
|
||||
Data file: db1001_data.db
|
||||
Format file: db1001_format.db
|
||||
Parseando archivo data: db1001_data.db
|
||||
Parseando archivo format: db1001_format.db
|
||||
Archivos JSON generados: db1001_data.json y db1001_format.json
|
||||
Data file: DB1001_data.AWL
|
||||
Format file: DB1001_format.AWL
|
||||
Parseando archivo data: DB1001_data.AWL
|
||||
Parseando archivo format: DB1001_format.AWL
|
||||
Archivos JSON generados: DB1001_data.json y DB1001_format.json
|
||||
Comparando estructuras para DB 'HMI_Blender_Parameters': 284 variables en _data, 284 variables en _format
|
||||
|
||||
Los archivos son compatibles. Creando el archivo _updated...
|
||||
Archivo _updated generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\json\db1001_updated.json
|
||||
Archivo de comparación Excel generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_comparison.xlsx
|
||||
Archivo Markdown generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_updated.md
|
||||
Archivo S7 generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\db1001_updated.txt
|
||||
Archivo S7 copiado a: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\db1001_updated.db
|
||||
Procesando DB 'HMI_Blender_Parameters': 284 variables en _format, 284 variables en _data
|
||||
Estadísticas para DB 'HMI_Blender_Parameters': 280 variables actualizadas, 0 no encontradas
|
||||
Archivo _updated generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\json\DB1001_updated.json
|
||||
Archivo Excel de comparación generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\DB1001_comparison.xlsx
|
||||
Archivo Markdown generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\DB1001_updated.md
|
||||
Archivo S7 generado: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\documentation\DB1001_updated.txt
|
||||
Archivo S7 copiado a: C:\Trabajo\SIDEL\09 - SAE452 - Diet as Regular - San Giovanni in Bosco\Reporte\DB1001\DB1001_updated.AWL
|
||||
|
||||
--- Proceso completado ---
|
||||
|
||||
|
|
|
@ -1,22 +1,4 @@
|
|||
{
|
||||
"x1.py": {
|
||||
"display_name": "01: Copiar Valores Actuales y de Inizio",
|
||||
"short_description": "Copia los valores desde una DB exportada como .DB a otra",
|
||||
"long_description": "Copia los valores desde una DB exportada como .DB a otra usando la posición de cada variable. Primero controla que el tipo de datos sea correcto para continuar con la asignacion.",
|
||||
"hidden": false
|
||||
},
|
||||
"x2.py": {
|
||||
"display_name": "02: Generar comparativa",
|
||||
"short_description": "Genera archivo Excel con la comparación de los 3 archivos.",
|
||||
"long_description": "",
|
||||
"hidden": false
|
||||
},
|
||||
"DB_Parser.py": {
|
||||
"display_name": "DB_Parser",
|
||||
"short_description": "Sin descripción corta.",
|
||||
"long_description": "",
|
||||
"hidden": true
|
||||
},
|
||||
"x3.py": {
|
||||
"display_name": "03: Parse DB/AWL",
|
||||
"short_description": "Crear archivos json haciendo parsing de los archivos .db o .awl",
|
||||
|
@ -30,19 +12,19 @@
|
|||
"hidden": false
|
||||
},
|
||||
"x5.py": {
|
||||
"display_name": "05: Generar Descripción MD del JSON",
|
||||
"display_name": "05: Generar Descripción MD",
|
||||
"short_description": "Genera documentación descriptiva de archivos JSON en Markdown.",
|
||||
"long_description": "Crea un archivo Markdown que documenta la estructura interna de los archivos JSON (generados por x3.py). Detalla UDTs y DBs, incluyendo sus miembros, offsets, tipos de datos, y valores iniciales/actuales, facilitando la comprensión del contenido del JSON.",
|
||||
"hidden": false
|
||||
},
|
||||
"x6.py": {
|
||||
"display_name": "06: Generar Excel desde JSON",
|
||||
"display_name": "06: Generar Excel",
|
||||
"short_description": "Genera documentación de DBs en formato Excel (.xlsx) desde JSON.",
|
||||
"long_description": "Procesa archivos JSON (generados por x3.py) y exporta la información de cada Bloque de Datos (DB) a un archivo Excel (.xlsx). La hoja de cálculo incluye detalles como direcciones, nombres de variables, tipos de datos, valores iniciales, valores actuales y comentarios.",
|
||||
"hidden": false
|
||||
},
|
||||
"x7_value_updater.py": {
|
||||
"display_name": "07: Actualizar Valores de DB (JSON)",
|
||||
"display_name": "07: Actualizar Valores data+format->updated",
|
||||
"short_description": "Busca archivos .db o .awl con la terminacion _data y _format. Si los encuentra y son compatibles usa los datos de _data para generar un _updated con los nombres de las variables de _format",
|
||||
"long_description": "Procesa pares de archivos a JSON (_data.json y _format.json, generados por x3.py). Compara sus estructuras por offset para asegurar compatibilidad. Si son compatibles, crea un nuevo archivo _updated.json que combina la estructura del _format.json con los valores actuales del _data.json.",
|
||||
"hidden": false
|
||||
|
|
|
@ -1,497 +0,0 @@
|
|||
import re
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import glob
|
||||
|
||||
script_root = os.path.dirname(
|
||||
os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
)
|
||||
sys.path.append(script_root)
|
||||
from backend.script_utils import load_configuration
|
||||
|
||||
def find_working_directory():
|
||||
configs = load_configuration()
|
||||
working_directory = configs.get("working_directory")
|
||||
if not working_directory:
|
||||
print("No working directory specified in the configuration file.")
|
||||
sys.exit(1)
|
||||
return working_directory
|
||||
|
||||
def find_data_files(working_dir, source_pattern_suffix="_data.", target_pattern_suffix="_format."):
|
||||
"""Find source and target files based on glob patterns and suffixes."""
|
||||
all_db_files = glob.glob(os.path.join(working_dir, "*.db"))
|
||||
all_awl_files = glob.glob(os.path.join(working_dir, "*.awl"))
|
||||
all_txt_files = glob.glob(os.path.join(working_dir, "*.txt")) # For .db.txt style
|
||||
|
||||
potential_files = all_db_files + all_awl_files + all_txt_files
|
||||
|
||||
source_files_found = []
|
||||
target_files_found = []
|
||||
|
||||
for f_path in potential_files:
|
||||
f_name = os.path.basename(f_path)
|
||||
# Check for source pattern (e.g., ends with _data.db or _data.db.txt)
|
||||
# We check if `source_pattern_suffix` is part of the name before the final extension
|
||||
name_part, ext_part = os.path.splitext(f_name)
|
||||
if name_part.endswith(source_pattern_suffix.rstrip('.')): # Handles cases like _data.db or _data.db (if suffix includes .)
|
||||
source_files_found.append(f_path)
|
||||
elif source_pattern_suffix.rstrip('.') in name_part and f_name.endswith(".txt") and ".db" in name_part : # for like _data.db.txt
|
||||
if name_part.split(".db")[0].endswith(source_pattern_suffix.rstrip('.')):
|
||||
source_files_found.append(f_path)
|
||||
|
||||
|
||||
# Check for target pattern
|
||||
name_part_target, ext_part_target = os.path.splitext(f_name)
|
||||
if name_part_target.endswith(target_pattern_suffix.rstrip('.')):
|
||||
target_files_found.append(f_path)
|
||||
elif target_pattern_suffix.rstrip('.') in name_part_target and f_name.endswith(".txt") and ".db" in name_part_target: # for like _format.db.txt
|
||||
if name_part_target.split(".db")[0].endswith(target_pattern_suffix.rstrip('.')):
|
||||
target_files_found.append(f_path)
|
||||
|
||||
|
||||
if not source_files_found:
|
||||
print(f"Warning: No source files found matching pattern ending with '{source_pattern_suffix}*' in '{working_dir}'.")
|
||||
# Try a broader search for any file containing '_data' if the strict one fails
|
||||
source_files_found = [f for f in potential_files if "_data" in os.path.basename(f)]
|
||||
if source_files_found:
|
||||
print(f"Found potential source files with less strict '_data' search: {source_files_found}")
|
||||
|
||||
|
||||
if not target_files_found:
|
||||
print(f"Warning: No target files found matching pattern ending with '{target_pattern_suffix}*' in '{working_dir}'.")
|
||||
# Try a broader search for any file containing '_format'
|
||||
target_files_found = [f for f in potential_files if "_format" in os.path.basename(f)]
|
||||
if target_files_found:
|
||||
print(f"Found potential target files with less strict '_format' search: {target_files_found}")
|
||||
|
||||
|
||||
# Logic to select the best match if multiple are found (e.g. prefer .db over .txt, or based on modification time)
|
||||
# For now, just take the first one found.
|
||||
source_file = source_files_found[0] if source_files_found else None
|
||||
target_file = target_files_found[0] if target_files_found else None
|
||||
|
||||
if source_file: print(f"Selected source file: {os.path.basename(source_file)}")
|
||||
if target_file: print(f"Selected target file: {os.path.basename(target_file)}")
|
||||
|
||||
return source_file, target_file
|
||||
|
||||
|
||||
def extract_sections(content):
|
||||
content = content.replace('\r\n', '\n') # Normalize line endings
|
||||
|
||||
udt_definitions_content = ""
|
||||
udt_matches = list(re.finditer(r'(?s)(TYPE\s+.*?\s+END_TYPE\s*\n?)', content, re.IGNORECASE))
|
||||
|
||||
content_after_udts = content
|
||||
if udt_matches:
|
||||
udt_definitions_content = "".join(match.group(0) for match in udt_matches)
|
||||
last_udt_end = udt_matches[-1].end()
|
||||
content_after_udts = content[last_udt_end:]
|
||||
|
||||
header_match = re.search(r'(?s)^(.*?(?:DATA_BLOCK.*?VERSION.*?\n))(.*?STRUCT)', content_after_udts, re.IGNORECASE)
|
||||
if header_match:
|
||||
header = header_match.group(1)
|
||||
# The rest_of_content should start from "STRUCT"
|
||||
rest_of_content = content_after_udts[header_match.start(2):]
|
||||
else: # Fallback if the specific DATA_BLOCK header is not found
|
||||
header_fallback_match = re.search(r'(?s)(.*?)(STRUCT)', content_after_udts, re.IGNORECASE)
|
||||
if header_fallback_match:
|
||||
header = header_fallback_match.group(1)
|
||||
# Ensure rest_of_content starts with "STRUCT"
|
||||
rest_of_content = content_after_udts[header_fallback_match.start(2) - len("STRUCT") if header_fallback_match.start(2) >= len("STRUCT") else 0:]
|
||||
if not rest_of_content.lstrip().upper().startswith("STRUCT"): # Verification
|
||||
# This might happen if STRUCT was at the very beginning.
|
||||
rest_of_content = "STRUCT" + rest_of_content # Prepend if missing
|
||||
else: # No STRUCT found at all after UDTs
|
||||
print("Critical Warning: No 'STRUCT' keyword found for DATA_BLOCK content.")
|
||||
return udt_definitions_content, content_after_udts, "", "", ""
|
||||
|
||||
decl_match = re.search(r'(?s)STRUCT\s+(.*?)BEGIN', rest_of_content, re.IGNORECASE)
|
||||
decl_section = decl_match.group(1) if decl_match else ""
|
||||
|
||||
init_match = re.search(r'(?s)BEGIN\s+(.*?)END_DATA_BLOCK', rest_of_content, re.IGNORECASE)
|
||||
init_section = init_match.group(1) if init_match else ""
|
||||
|
||||
footer_match = re.search(r'(?s)END_DATA_BLOCK(.*?)$', rest_of_content, re.IGNORECASE)
|
||||
footer = footer_match.group(1) if footer_match else ""
|
||||
|
||||
return udt_definitions_content, header, decl_section, init_section, footer
|
||||
|
||||
|
||||
def analyze_source_file(decl_section, init_section):
|
||||
source_decl_values = []
|
||||
source_init_values = []
|
||||
|
||||
decl_idx = 0
|
||||
for line_content in decl_section.split('\n'):
|
||||
line = line_content.strip()
|
||||
if not line or line.startswith('//') or \
|
||||
(line.upper().startswith('STRUCT') and ';' not in line) or \
|
||||
(line.upper().startswith('END_STRUCT') and ';' not in line) :
|
||||
continue
|
||||
|
||||
if ';' in line:
|
||||
type_match = re.search(r':\s*([^:=;]+)', line)
|
||||
var_type = type_match.group(1).strip() if type_match else ""
|
||||
value = None
|
||||
comment = ''
|
||||
|
||||
assignment_match = re.search(r':=\s*([^;]+)', line)
|
||||
if assignment_match:
|
||||
value = assignment_match.group(1).strip()
|
||||
comment_match = re.search(r';(.*)', line[assignment_match.end():])
|
||||
if comment_match: comment = comment_match.group(1).strip()
|
||||
else:
|
||||
comment_match = re.search(r';(.*)', line)
|
||||
if comment_match: comment = comment_match.group(1).strip()
|
||||
|
||||
source_decl_values.append({
|
||||
"index": decl_idx, "type": var_type, "value": value,
|
||||
"comment": comment, "original_line_for_debug": line
|
||||
})
|
||||
decl_idx += 1
|
||||
|
||||
init_idx = 0
|
||||
for line_content in init_section.split('\n'):
|
||||
line = line_content.strip()
|
||||
if not line or line.startswith('//'): continue
|
||||
|
||||
assignment_match = re.search(r':=\s*([^;]+)', line)
|
||||
if assignment_match and ';' in line:
|
||||
value = assignment_match.group(1).strip()
|
||||
comment_match = re.search(r';(.*)', line[assignment_match.end():])
|
||||
comment = comment_match.group(1).strip() if comment_match else ""
|
||||
|
||||
source_init_values.append({
|
||||
"index": init_idx, "value": value, "comment": comment,
|
||||
"original_line_for_debug": line
|
||||
})
|
||||
init_idx += 1
|
||||
|
||||
return source_decl_values, source_init_values
|
||||
|
||||
|
||||
def analyze_target_declarations(decl_section):
|
||||
target_decl_info_list = []
|
||||
current_var_idx = 0
|
||||
decl_lines_split = decl_section.split('\n')
|
||||
|
||||
for line_num, line_content in enumerate(decl_lines_split):
|
||||
original_line = line_content
|
||||
line = line_content.strip()
|
||||
is_udt_ref = False
|
||||
udt_name = None
|
||||
var_type_str = None
|
||||
|
||||
entry = {
|
||||
"line_index_in_section": line_num, "var_idx": -1,
|
||||
"is_udt_instance": False, "udt_name_if_any": None,
|
||||
"original_line": original_line, "type": None
|
||||
}
|
||||
|
||||
if not line or line.startswith('//') or \
|
||||
(line.upper().startswith('STRUCT') and ';' not in line and ':' not in line) or \
|
||||
(line.upper().startswith('END_STRUCT') and ';' not in line and ':' not in line):
|
||||
target_decl_info_list.append(entry)
|
||||
continue
|
||||
|
||||
if ';' in line:
|
||||
var_type_match = re.search(r':\s*([^:=;]+)', line)
|
||||
var_type_str = var_type_match.group(1).strip() if var_type_match else ""
|
||||
|
||||
udt_match = re.search(r':\s*"(.*?)"', line)
|
||||
if udt_match:
|
||||
is_udt_ref = True
|
||||
udt_name = udt_match.group(1)
|
||||
|
||||
entry.update({
|
||||
"var_idx": current_var_idx,
|
||||
"is_udt_instance": is_udt_ref,
|
||||
"udt_name_if_any": udt_name,
|
||||
"type": var_type_str
|
||||
})
|
||||
current_var_idx += 1
|
||||
|
||||
target_decl_info_list.append(entry)
|
||||
|
||||
return target_decl_info_list
|
||||
|
||||
def analyze_target_assignments(init_section):
|
||||
target_init_info_list = []
|
||||
current_assign_idx = 0
|
||||
init_lines_split = init_section.split('\n')
|
||||
|
||||
for line_num, line_content in enumerate(init_lines_split):
|
||||
original_line = line_content
|
||||
line = line_content.strip()
|
||||
|
||||
entry = {"line_index_in_section": line_num, "assign_idx": -1, "original_line": original_line}
|
||||
|
||||
if not line or line.startswith('//'):
|
||||
target_init_info_list.append(entry)
|
||||
continue
|
||||
|
||||
if ':=' in line and ';' in line:
|
||||
entry["assign_idx"] = current_assign_idx
|
||||
current_assign_idx += 1
|
||||
|
||||
target_init_info_list.append(entry)
|
||||
|
||||
return target_init_info_list
|
||||
|
||||
def is_compatible_type(source_value_str, target_type_str):
|
||||
if source_value_str is None: return True
|
||||
if not target_type_str: return True
|
||||
|
||||
s_val = source_value_str.upper()
|
||||
t_type = target_type_str.upper()
|
||||
|
||||
if "STRING" in t_type: return s_val.startswith("'") and s_val.endswith("'")
|
||||
if "BOOL" == t_type: return s_val in ["TRUE", "FALSE", "1", "0"]
|
||||
if "BYTE" == t_type: return s_val.startswith(("B#16#", "16#")) or (s_val.isdigit() and 0 <= int(s_val) <= 255)
|
||||
if "WORD" == t_type or "DWORD" == t_type : return s_val.startswith(("W#16#", "DW#16#", "16#"))
|
||||
if "INT" == t_type:
|
||||
try: int(s_val); return True
|
||||
except ValueError: return False
|
||||
if "DINT" == t_type:
|
||||
try:
|
||||
int(s_val[2:]) if s_val.startswith("L#") else int(s_val)
|
||||
return True
|
||||
except ValueError: return False
|
||||
if "REAL" == t_type:
|
||||
try: float(s_val.replace('E', 'e')); return True
|
||||
except ValueError: return False
|
||||
if t_type.startswith("ARRAY"): return True
|
||||
return True
|
||||
|
||||
|
||||
def transfer_values_by_position(source_file_path, target_file_path, output_file_path):
|
||||
try:
|
||||
with open(source_file_path, 'r', encoding='utf-8-sig') as f:
|
||||
source_content = f.read()
|
||||
with open(target_file_path, 'r', encoding='utf-8-sig') as f:
|
||||
target_content = f.read()
|
||||
|
||||
source_udt_defs_ignored, source_header_ignored, source_decl_sec, source_init_sec, source_footer_ignored = extract_sections(source_content)
|
||||
target_udt_defs, target_header, target_decl_sec, target_init_sec, target_footer = extract_sections(target_content)
|
||||
|
||||
source_decl_values, source_init_values = analyze_source_file(source_decl_sec, source_init_sec)
|
||||
|
||||
s_decl_ptr = 0
|
||||
decl_values_transferred_count = 0
|
||||
init_values_transferred_count = 0
|
||||
|
||||
processed_target_udt_lines = []
|
||||
if target_udt_defs:
|
||||
udt_section_lines = target_udt_defs.split('\n')
|
||||
in_udt_struct_definition = False
|
||||
|
||||
for udt_line_content in udt_section_lines:
|
||||
line_ws = udt_line_content
|
||||
stripped_line = udt_line_content.strip()
|
||||
modified_udt_line = line_ws
|
||||
|
||||
if stripped_line.upper().startswith("TYPE"): in_udt_struct_definition = False
|
||||
if stripped_line.upper().startswith("STRUCT") and not stripped_line.upper().startswith("END_STRUCT"):
|
||||
prev_lines = [l.strip().upper() for l in processed_target_udt_lines if l.strip()]
|
||||
if prev_lines and prev_lines[-1].startswith("TYPE"): in_udt_struct_definition = True
|
||||
|
||||
if stripped_line.upper().startswith("END_STRUCT"): in_udt_struct_definition = False
|
||||
|
||||
if in_udt_struct_definition and ';' in stripped_line and \
|
||||
not stripped_line.upper().startswith(("STRUCT", "END_STRUCT", "//")):
|
||||
if s_decl_ptr < len(source_decl_values):
|
||||
src_data = source_decl_values[s_decl_ptr]
|
||||
src_val_str = src_data["value"]
|
||||
src_comment = src_data["comment"]
|
||||
|
||||
type_m = re.search(r':\s*([^:=;]+)', stripped_line)
|
||||
target_member_type = type_m.group(1).strip() if type_m else ""
|
||||
|
||||
if src_val_str is not None:
|
||||
if is_compatible_type(src_val_str, target_member_type):
|
||||
parts = line_ws.split(';',1)
|
||||
decl_part = parts[0]
|
||||
comment_part = f";{parts[1]}" if len(parts) > 1 else ";"
|
||||
|
||||
if ':=' in decl_part: mod_decl = re.sub(r':=\s*[^;]+', f':= {src_val_str}', decl_part.rstrip())
|
||||
else: mod_decl = decl_part.rstrip() + f' := {src_val_str}'
|
||||
|
||||
final_comment = comment_part
|
||||
if comment_part == ";" and src_comment: final_comment = f"; {src_comment}"
|
||||
modified_udt_line = mod_decl + final_comment
|
||||
decl_values_transferred_count +=1
|
||||
else:
|
||||
parts = line_ws.split(';',1)
|
||||
decl_part = parts[0]
|
||||
comment_part = f";{parts[1]}" if len(parts) > 1 else ";"
|
||||
if ':=' in decl_part:
|
||||
mod_decl = re.sub(r'\s*:=\s*[^;]+', '', decl_part.rstrip())
|
||||
modified_udt_line = mod_decl + comment_part
|
||||
s_decl_ptr += 1
|
||||
processed_target_udt_lines.append(modified_udt_line)
|
||||
target_udt_defs_updated = '\n'.join(processed_target_udt_lines)
|
||||
else:
|
||||
target_udt_defs_updated = target_udt_defs
|
||||
|
||||
target_decl_block_info = analyze_target_declarations(target_decl_sec)
|
||||
output_decl_block_lines = target_decl_sec.split('\n')
|
||||
|
||||
for target_info in target_decl_block_info:
|
||||
line_idx_in_sec = target_info["line_index_in_section"]
|
||||
if target_info["var_idx"] == -1 or target_info["is_udt_instance"]: continue
|
||||
|
||||
if s_decl_ptr < len(source_decl_values):
|
||||
src_data = source_decl_values[s_decl_ptr]
|
||||
src_val_str = src_data["value"]
|
||||
src_comment = src_data["comment"]
|
||||
target_type = target_info["type"]
|
||||
original_target_line_ws = target_info["original_line"]
|
||||
|
||||
if src_val_str is not None:
|
||||
if is_compatible_type(src_val_str, target_type):
|
||||
parts = original_target_line_ws.split(';',1)
|
||||
decl_part = parts[0]
|
||||
comment_part = f";{parts[1]}" if len(parts) > 1 else ";"
|
||||
|
||||
if ':=' in decl_part: mod_decl = re.sub(r':=\s*[^;]+', f':= {src_val_str}', decl_part.rstrip())
|
||||
else: mod_decl = decl_part.rstrip() + f' := {src_val_str}'
|
||||
|
||||
final_comment = comment_part
|
||||
if comment_part == ";" and src_comment: final_comment = f"; {src_comment}"
|
||||
output_decl_block_lines[line_idx_in_sec] = mod_decl + final_comment
|
||||
decl_values_transferred_count +=1
|
||||
else:
|
||||
parts = original_target_line_ws.split(';',1)
|
||||
decl_part = parts[0]
|
||||
comment_part = f";{parts[1]}" if len(parts) > 1 else ";"
|
||||
if ':=' in decl_part:
|
||||
mod_decl = re.sub(r'\s*:=\s*[^;]+', '', decl_part.rstrip())
|
||||
output_decl_block_lines[line_idx_in_sec] = mod_decl + comment_part
|
||||
s_decl_ptr += 1
|
||||
else: pass
|
||||
|
||||
target_init_block_info = analyze_target_assignments(target_init_sec)
|
||||
output_init_block_lines = target_init_sec.split('\n')
|
||||
|
||||
for target_info in target_init_block_info:
|
||||
line_idx_in_sec = target_info["line_index_in_section"]
|
||||
if target_info["assign_idx"] == -1: continue
|
||||
|
||||
current_target_assign_idx = target_info["assign_idx"]
|
||||
original_target_line_ws = target_info["original_line"]
|
||||
|
||||
if current_target_assign_idx < len(source_init_values):
|
||||
src_data = source_init_values[current_target_assign_idx]
|
||||
src_val_str = src_data["value"]
|
||||
src_comment = src_data["comment"]
|
||||
|
||||
if src_val_str is not None:
|
||||
parts = original_target_line_ws.split(';',1)
|
||||
assign_part_target = parts[0]
|
||||
comment_part_target = f";{parts[1]}" if len(parts) > 1 else ";"
|
||||
|
||||
mod_assign = re.sub(r':=\s*.*$', f':= {src_val_str}', assign_part_target.rstrip())
|
||||
|
||||
final_comment = comment_part_target
|
||||
if comment_part_target == ";" and src_comment: final_comment = f"; {src_comment}"
|
||||
output_init_block_lines[line_idx_in_sec] = mod_assign + final_comment
|
||||
init_values_transferred_count += 1
|
||||
|
||||
final_parts = []
|
||||
if target_udt_defs_updated.strip(): final_parts.append(target_udt_defs_updated.rstrip('\n') + '\n\n') # Ensure space after UDTs
|
||||
elif target_udt_defs: final_parts.append(target_udt_defs)
|
||||
|
||||
|
||||
if target_header.strip() : final_parts.append(target_header) # Header already includes its spacing
|
||||
elif target_header and not target_udt_defs_updated.strip(): # if header has only newlines but no UDTs before it
|
||||
final_parts.append(target_header)
|
||||
|
||||
|
||||
if target_decl_sec.strip():
|
||||
final_parts.append("STRUCT\n")
|
||||
final_parts.append('\n'.join(output_decl_block_lines))
|
||||
final_parts.append("\n")
|
||||
elif target_decl_sec:
|
||||
final_parts.append(target_decl_sec)
|
||||
|
||||
final_parts.append("BEGIN\n")
|
||||
final_parts.append('\n'.join(output_init_block_lines))
|
||||
# Ensure END_DATA_BLOCK is on its own line or correctly spaced
|
||||
final_parts.append("\nEND_DATA_BLOCK")
|
||||
if target_footer: final_parts.append(target_footer.rstrip('\n') + '\n' if target_footer.strip() else target_footer)
|
||||
|
||||
|
||||
final_content = "".join(final_parts)
|
||||
# Ensure there's a newline at the end of the file
|
||||
if not final_content.endswith('\n'): final_content += '\n'
|
||||
# Remove potential multiple blank lines at the end, keep one
|
||||
final_content = re.sub(r'\n\s*\n$', '\n', final_content)
|
||||
|
||||
|
||||
with open(output_file_path, 'w', encoding='utf-8') as f:
|
||||
f.write(final_content)
|
||||
|
||||
print(f"\nSuccessfully transferred {decl_values_transferred_count} initial values and {init_values_transferred_count} current values.")
|
||||
print(f"Output file created: {output_file_path}")
|
||||
return True
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: File not found. Source: '{source_file_path}', Target: '{target_file_path}'")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"An error occurred during transfer: {e}")
|
||||
import traceback
|
||||
print(traceback.format_exc())
|
||||
return False
|
||||
|
||||
def main():
|
||||
print("PLC Data Block Adapter - Advanced UDT Handling (Restored Auto File Find)")
|
||||
print("========================================================================")
|
||||
|
||||
working_dir = find_working_directory()
|
||||
print(f"Using working directory: {working_dir}")
|
||||
|
||||
# Using automatic file finding based on patterns
|
||||
# "_data." will match _data.db, _data.awl, _data.db.txt (if .txt is handled in find_data_files)
|
||||
source_f, target_f = find_data_files(working_dir,
|
||||
source_pattern_suffix="_data",
|
||||
target_pattern_suffix="_format")
|
||||
|
||||
if not source_f or not target_f:
|
||||
print("Error: Could not automatically find required source or target files using patterns.")
|
||||
print("Please ensure files ending with e.g., '_data.db' (source) and '_format.db' (target) exist.")
|
||||
return False
|
||||
|
||||
# Construct output name
|
||||
target_basename = os.path.basename(target_f)
|
||||
name_part, first_ext = os.path.splitext(target_basename)
|
||||
if first_ext.lower() == ".txt" and ".db" in name_part.lower(): # Handles .db.txt
|
||||
name_part, second_ext = os.path.splitext(name_part) # name_part is now "xxx_format"
|
||||
output_basename = name_part + "_updated" + second_ext + first_ext # e.g. xxx_format_updated.db.txt
|
||||
elif first_ext.lower() in ['.db', '.awl']:
|
||||
output_basename = name_part + "_updated" + first_ext
|
||||
else: # Fallback for other extensions or no extension
|
||||
output_basename = target_basename.rsplit('.',1)[0] if '.' in target_basename else target_basename
|
||||
output_basename += "_updated" + ('.' + target_basename.rsplit('.',1)[1] if '.' in target_basename else ".db")
|
||||
|
||||
|
||||
output_f = os.path.join(working_dir, output_basename)
|
||||
|
||||
print(f"\nProcessing:")
|
||||
print(f" Source: {os.path.basename(source_f)}")
|
||||
print(f" Target: {os.path.basename(target_f)}")
|
||||
print(f" Output: {os.path.basename(output_f)}")
|
||||
|
||||
success = transfer_values_by_position(source_f, target_f, output_f)
|
||||
|
||||
if success:
|
||||
print(f"\nSUCCESS: Script finished. Output: '{os.path.basename(output_f)}'")
|
||||
else:
|
||||
print(f"\nERROR: Script failed. Please check messages above.")
|
||||
|
||||
return success
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,335 +0,0 @@
|
|||
import re
|
||||
import os
|
||||
import sys # Not strictly needed by this version but often kept from original
|
||||
import glob
|
||||
import pandas as pd # For Excel writing
|
||||
|
||||
# --- Functions for script operation ---
|
||||
|
||||
def find_working_directory():
|
||||
"""
|
||||
Finds the working directory.
|
||||
Defaults to current directory. Adapt if specific configuration is needed.
|
||||
"""
|
||||
print("Info: `find_working_directory_from_x1` is using the current directory.")
|
||||
return os.getcwd()
|
||||
|
||||
def extract_sections(content):
|
||||
"""
|
||||
Extracts UDT definitions, main declaration section, and initialization section from S7 AWL/DB content.
|
||||
Uses re.IGNORECASE and re.DOTALL (via ?is) for matching keywords across different casings and newlines.
|
||||
"""
|
||||
content = content.replace('\r\n', '\n') # Normalize line endings
|
||||
|
||||
udt_definitions_content = ""
|
||||
# Regex to find TYPE...END_TYPE blocks (UDT definitions)
|
||||
udt_matches = list(re.finditer(r'(?is)(TYPE\s+.*?\s+END_TYPE\s*\n?)', content))
|
||||
|
||||
content_after_udts = content
|
||||
if udt_matches:
|
||||
udt_definitions_content = "".join(match.group(0) for match in udt_matches)
|
||||
# Get content after the last UDT definition
|
||||
last_udt_end = udt_matches[-1].end()
|
||||
content_after_udts = content[last_udt_end:]
|
||||
|
||||
header_text = "" # Placeholder, not actively used in this script's comparison logic
|
||||
rest_of_content_for_struct = content_after_udts
|
||||
|
||||
# Try to find the main DATA_BLOCK header and the start of its STRUCT
|
||||
header_match = re.search(r'(?is)^(.*?(?:DATA_BLOCK.*?VERSION.*?\n))(.*?STRUCT)', content_after_udts)
|
||||
if header_match:
|
||||
# Content for further parsing starts at "STRUCT"
|
||||
rest_of_content_for_struct = content_after_udts[header_match.start(2):]
|
||||
else:
|
||||
# Fallback: find the first "STRUCT" if the specific header pattern isn't met
|
||||
header_fallback_match = re.search(r'(?is)(.*?)(STRUCT)', content_after_udts)
|
||||
if header_fallback_match:
|
||||
rest_of_content_for_struct = content_after_udts[header_fallback_match.start(2):]
|
||||
else:
|
||||
# If no STRUCT is found, declaration section will be empty
|
||||
print(f"Warning: No 'STRUCT' keyword found for main DB declarations in a content block.")
|
||||
|
||||
# Declaration section: from the found STRUCT up to BEGIN
|
||||
decl_match = re.search(r'(?is)STRUCT\s*(.*?)BEGIN', rest_of_content_for_struct)
|
||||
decl_section = decl_match.group(1).strip() if decl_match else ""
|
||||
|
||||
# Initialization section: from BEGIN up to END_DATA_BLOCK
|
||||
init_match = re.search(r'(?is)BEGIN\s*(.*?)END_DATA_BLOCK', rest_of_content_for_struct)
|
||||
init_section = init_match.group(1).strip() if init_match else ""
|
||||
|
||||
# Footer after END_DATA_BLOCK isn't used
|
||||
return udt_definitions_content, header_text, decl_section, init_section, ""
|
||||
|
||||
|
||||
def find_comparison_files_detailed(working_dir, data_suffix="_data", format_suffix="_format", updated_suffix_part="_updated"):
|
||||
"""Finds data, format, and _updated files based on naming conventions."""
|
||||
all_files_in_dir = []
|
||||
for ext_pattern in ["*.db", "*.awl", "*.txt"]: # Common S7 export extensions
|
||||
all_files_in_dir.extend(glob.glob(os.path.join(working_dir, ext_pattern)))
|
||||
# Normalize paths for consistent comparisons and ensure uniqueness
|
||||
all_files_in_dir = sorted(list(set(os.path.normpath(f) for f in all_files_in_dir)))
|
||||
|
||||
found_paths = {'data': None, 'format': None, 'updated': None}
|
||||
|
||||
def select_best_file(file_list):
|
||||
if not file_list: return None
|
||||
# Prioritize: .db, then .awl, then .txt
|
||||
file_list.sort(key=lambda x: ('.db' not in x.lower(), '.awl' not in x.lower(), '.txt' not in x.lower()))
|
||||
return file_list[0]
|
||||
|
||||
# Find _data file: contains data_suffix, does not contain updated_suffix_part
|
||||
data_candidates = [f for f in all_files_in_dir if data_suffix in os.path.basename(f).lower() and updated_suffix_part not in os.path.basename(f).lower()]
|
||||
found_paths['data'] = select_best_file(data_candidates)
|
||||
|
||||
# Find _format file: contains format_suffix, does not contain updated_suffix_part
|
||||
format_candidates = [f for f in all_files_in_dir if format_suffix in os.path.basename(f).lower() and updated_suffix_part not in os.path.basename(f).lower()]
|
||||
if found_paths['data'] and format_candidates: # Ensure it's not the same as _data file
|
||||
format_candidates = [f for f in format_candidates if f != found_paths['data']]
|
||||
found_paths['format'] = select_best_file(format_candidates)
|
||||
|
||||
# Find _updated file:
|
||||
# Strategy 1: Based on format_file name (most reliable if format_file found)
|
||||
if found_paths['format']:
|
||||
format_basename = os.path.basename(found_paths['format'])
|
||||
name_part, first_ext = os.path.splitext(format_basename)
|
||||
updated_basename_candidate = ""
|
||||
# Handle double extensions like ".db.txt" or ".awl.txt"
|
||||
if first_ext.lower() == ".txt" and ('.db' in name_part.lower() or '.awl' in name_part.lower()):
|
||||
base_name_for_main_ext, second_ext = os.path.splitext(name_part)
|
||||
updated_basename_candidate = base_name_for_main_ext + updated_suffix_part + second_ext + first_ext
|
||||
else: # Single extension
|
||||
updated_basename_candidate = name_part + updated_suffix_part + first_ext
|
||||
|
||||
potential_updated_path = os.path.join(working_dir, updated_basename_candidate)
|
||||
if os.path.exists(potential_updated_path) and potential_updated_path in all_files_in_dir:
|
||||
found_paths['updated'] = potential_updated_path
|
||||
|
||||
# Strategy 2: If not found by deriving from format_file, search more broadly
|
||||
if not found_paths['updated']:
|
||||
updated_candidates = [f for f in all_files_in_dir if updated_suffix_part in os.path.basename(f).lower()]
|
||||
if found_paths['format'] and updated_candidates: # Prefer updated file related to format file's base name
|
||||
format_base = os.path.basename(found_paths['format']).split(format_suffix)[0]
|
||||
updated_candidates = [f for f in updated_candidates if format_base in os.path.basename(f)]
|
||||
|
||||
# Exclude already identified data and format files
|
||||
if found_paths['data'] and updated_candidates: updated_candidates = [f for f in updated_candidates if f != found_paths['data']]
|
||||
if found_paths['format'] and updated_candidates: updated_candidates = [f for f in updated_candidates if f != found_paths['format']]
|
||||
found_paths['updated'] = select_best_file(updated_candidates)
|
||||
|
||||
print("Identified files for comparison:")
|
||||
for key, val in found_paths.items():
|
||||
print(f" {key.capitalize()} file: {os.path.basename(val) if val else 'Not found'}")
|
||||
return found_paths['data'], found_paths['format'], found_paths['updated']
|
||||
|
||||
|
||||
def get_variables_from_section_content(section_str, section_type="declaration"):
|
||||
""" Parses a declaration or initialization section string and returns a list of variable dicts. """
|
||||
variables = []
|
||||
idx = 0
|
||||
lines = section_str.replace('\r\n', '\n').split('\n')
|
||||
|
||||
for line_content in lines:
|
||||
line = line_content.strip()
|
||||
if not line or line.startswith('//'): continue # Skip empty or comment lines
|
||||
|
||||
line_upper = line.upper()
|
||||
# Skip lines that are purely structural (STRUCT, TYPE, END_STRUCT)
|
||||
# unless they also contain a full declaration/assignment on the same line.
|
||||
if (line_upper == 'STRUCT' or line_upper.startswith('TYPE ') or line_upper == 'END_STRUCT' or line_upper == 'BEGIN' or line_upper == 'END_DATA_BLOCK'):
|
||||
if not (':' in line and ';' in line or ':=' in line and ';' in line ): # if not also a var line
|
||||
continue
|
||||
|
||||
var_name, var_type, value = None, None, None
|
||||
|
||||
if section_type == "declaration": # Expect: VarName : VarType [:= InitialValue] ;
|
||||
if ':' in line and ';' in line:
|
||||
# Name: part before ':' (handles simple and "quoted" names)
|
||||
name_match = re.match(r'^\s*(\"(?:\\\"|[^\"])*\"|[a-zA-Z_][\w]*)', line, re.IGNORECASE)
|
||||
var_name = name_match.group(1).strip().replace('"', "") if name_match else None
|
||||
|
||||
# Type: part between ':' and potential ':=' or ';' (handles "UDT", simple, ARRAY)
|
||||
type_match = re.search(r':\s*(\"[^\"]+\"|[^:=;]+)', line, re.IGNORECASE)
|
||||
var_type = type_match.group(1).strip().replace('"', "") if type_match else None
|
||||
|
||||
# Value: part between ':=' and ';'
|
||||
assign_match = re.search(r':=\s*([^;]+)', line, re.IGNORECASE)
|
||||
if assign_match: value = assign_match.group(1).strip()
|
||||
|
||||
if not var_name or not var_type: continue # Must have name and type for a declaration
|
||||
else: continue # Not a declaration line by this rule
|
||||
|
||||
elif section_type == "initialization": # Expect: VarNameOrPath := Value ;
|
||||
if ':=' in line and ';' in line:
|
||||
# Name/Path: part before ':=' (handles "Quoted.Path", Simple.Path, Array[1].Path)
|
||||
name_match = re.match(r'^\s*(\"(?:\\\"|[^\"])*\"|[a-zA-Z_][\w"\[\],\.]*(?:\[.*?\]|\.[a-zA-Z_][\w"\[\],\.]*)*)\s*:=', line, re.IGNORECASE)
|
||||
var_name = name_match.group(1).strip().replace('"', "") if name_match else None
|
||||
|
||||
# Value: part between ':=' and ';'
|
||||
value_match = re.search(r':=\s*([^;]+)', line, re.IGNORECASE)
|
||||
value = value_match.group(1).strip() if value_match else None
|
||||
|
||||
if not var_name or value is None : continue # Must have name and value for assignment
|
||||
else: continue # Not an assignment line
|
||||
|
||||
if var_name is not None: # If a name was captured (and other conditions met), record it
|
||||
variables.append({
|
||||
"index": idx, "name": var_name, "type": var_type, "value": value,
|
||||
"original_line": line_content
|
||||
})
|
||||
idx += 1
|
||||
return variables
|
||||
|
||||
def process_file_for_vars(file_path):
|
||||
"""
|
||||
Reads a file, extracts main STRUCT declarations and BEGIN block initializations.
|
||||
UDT definitions themselves are not included in the returned `main_struct_decl_vars`.
|
||||
"""
|
||||
if not file_path or not os.path.exists(file_path):
|
||||
return [], [] # Return empty lists if file not found
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8-sig') as f: # utf-8-sig handles BOM
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
print(f"Error reading file {file_path}: {e}")
|
||||
return [], []
|
||||
|
||||
# udt_definitions_content is extracted but not directly used for the comparison lists below
|
||||
_udt_definitions_content, _header, decl_content_main, init_content, _footer = extract_sections(content)
|
||||
|
||||
# "main_struct_decl_vars" are from the main DATA_BLOCK's STRUCT section (initial values).
|
||||
main_struct_decl_vars = get_variables_from_section_content(decl_content_main, "declaration")
|
||||
|
||||
# "begin_block_init_vars" are from the BEGIN...END_DATA_BLOCK section (current values).
|
||||
begin_block_init_vars = get_variables_from_section_content(init_content, "initialization")
|
||||
|
||||
return main_struct_decl_vars, begin_block_init_vars
|
||||
|
||||
|
||||
def generate_excel_comparison(data_file, format_file, updated_file, output_excel_path):
|
||||
"""Generates an Excel file with two sheets comparing variables from three source files."""
|
||||
|
||||
print(f"\nProcessing _data file: {os.path.basename(data_file) if data_file else 'N/A'}")
|
||||
data_decl_vars, data_init_vars = process_file_for_vars(data_file)
|
||||
print(f" Found {len(data_decl_vars)} declaration vars, {len(data_init_vars)} initialization vars in _data file.")
|
||||
|
||||
print(f"Processing _format file: {os.path.basename(format_file) if format_file else 'N/A'}")
|
||||
format_decl_vars, format_init_vars = process_file_for_vars(format_file)
|
||||
print(f" Found {len(format_decl_vars)} declaration vars, {len(format_init_vars)} initialization vars in _format file.")
|
||||
|
||||
print(f"Processing _updated file: {os.path.basename(updated_file) if updated_file else 'N/A'}")
|
||||
updated_decl_vars, updated_init_vars = process_file_for_vars(updated_file)
|
||||
print(f" Found {len(updated_decl_vars)} declaration vars, {len(updated_init_vars)} initialization vars in _updated file.")
|
||||
|
||||
|
||||
placeholder_var = {"name": "", "type": "", "value": "", "original_line": ""}
|
||||
# Define column order once, will be used for both sheets
|
||||
column_order = ["Variable Name (_data / _format)", "Data Type", "Value (_data)", "Value (_format)", "Value (_updated)"]
|
||||
|
||||
# --- Prepare data for "Declarations (Initial Values)" sheet ---
|
||||
decl_excel_rows = []
|
||||
# Determine max length for declaration rows based on non-empty lists
|
||||
decl_lengths = [len(lst) for lst in [data_decl_vars, format_decl_vars, updated_decl_vars] if lst is not None]
|
||||
max_decl_len = max(decl_lengths) if decl_lengths else 0
|
||||
|
||||
print(f"\nComparing {max_decl_len} positional declaration entries (STRUCT section)...")
|
||||
for i in range(max_decl_len):
|
||||
var_d = data_decl_vars[i] if data_decl_vars and i < len(data_decl_vars) else placeholder_var
|
||||
var_f = format_decl_vars[i] if format_decl_vars and i < len(format_decl_vars) else placeholder_var
|
||||
var_u = updated_decl_vars[i] if updated_decl_vars and i < len(updated_decl_vars) else placeholder_var
|
||||
|
||||
# Construct combined name
|
||||
name_d_str = var_d['name'] if var_d['name'] else ""
|
||||
name_f_str = var_f['name'] if var_f['name'] else ""
|
||||
combined_name = f"{name_d_str} / {name_f_str}".strip(" /")
|
||||
if not combined_name: combined_name = var_u['name'] or name_d_str or name_f_str # Fallback
|
||||
|
||||
# Determine Data Type: Priority: format, then updated, then data
|
||||
type_to_use = var_f['type'] or var_u['type'] or var_d['type'] or "N/A"
|
||||
|
||||
decl_excel_rows.append({
|
||||
"Variable Name (_data / _format)": combined_name,
|
||||
"Data Type": type_to_use,
|
||||
"Value (_data)": str(var_d['value']) if var_d['value'] is not None else "",
|
||||
"Value (_format)": str(var_f['value']) if var_f['value'] is not None else "",
|
||||
"Value (_updated)": str(var_u['value']) if var_u['value'] is not None else ""
|
||||
})
|
||||
df_declarations = pd.DataFrame(decl_excel_rows)
|
||||
if not df_declarations.empty: # Apply column order if DataFrame is not empty
|
||||
for col in column_order:
|
||||
if col not in df_declarations.columns: df_declarations[col] = "" # Ensure all columns exist
|
||||
df_declarations = df_declarations[column_order]
|
||||
|
||||
|
||||
# --- Prepare data for "Initializations (Current Values)" sheet ---
|
||||
init_excel_rows = []
|
||||
init_lengths = [len(lst) for lst in [data_init_vars, format_init_vars, updated_init_vars] if lst is not None]
|
||||
max_init_len = max(init_lengths) if init_lengths else 0
|
||||
|
||||
print(f"Comparing {max_init_len} positional initialization entries (BEGIN block)...")
|
||||
for i in range(max_init_len):
|
||||
var_d = data_init_vars[i] if data_init_vars and i < len(data_init_vars) else placeholder_var
|
||||
var_f = format_init_vars[i] if format_init_vars and i < len(format_init_vars) else placeholder_var
|
||||
var_u = updated_init_vars[i] if updated_init_vars and i < len(updated_init_vars) else placeholder_var
|
||||
|
||||
name_d_str = var_d['name'] if var_d['name'] else ""
|
||||
name_f_str = var_f['name'] if var_f['name'] else ""
|
||||
combined_name = f"{name_d_str} / {name_f_str}".strip(" /")
|
||||
if not combined_name: combined_name = var_u['name'] or name_d_str or name_f_str
|
||||
|
||||
init_excel_rows.append({
|
||||
"Variable Name (_data / _format)": combined_name,
|
||||
"Data Type": "N/A", # Type is not usually re-declared in initialization lines
|
||||
"Value (_data)": str(var_d['value']) if var_d['value'] is not None else "",
|
||||
"Value (_format)": str(var_f['value']) if var_f['value'] is not None else "",
|
||||
"Value (_updated)": str(var_u['value']) if var_u['value'] is not None else ""
|
||||
})
|
||||
df_initializations = pd.DataFrame(init_excel_rows)
|
||||
if not df_initializations.empty: # Apply column order
|
||||
for col in column_order:
|
||||
if col not in df_initializations.columns: df_initializations[col] = ""
|
||||
df_initializations = df_initializations[column_order]
|
||||
|
||||
# --- Write to Excel with two sheets ---
|
||||
try:
|
||||
with pd.ExcelWriter(output_excel_path, engine='openpyxl') as writer:
|
||||
if not df_declarations.empty:
|
||||
df_declarations.to_excel(writer, sheet_name='Declarations (Initial Values)', index=False)
|
||||
print(f"Written 'Declarations (Initial Values)' sheet with {len(df_declarations)} rows.")
|
||||
else:
|
||||
print("No data for 'Declarations (Initial Values)' sheet.")
|
||||
|
||||
if not df_initializations.empty:
|
||||
df_initializations.to_excel(writer, sheet_name='Initializations (Current Values)', index=False)
|
||||
print(f"Written 'Initializations (Current Values)' sheet with {len(df_initializations)} rows.")
|
||||
else:
|
||||
print("No data for 'Initializations (Current Values)' sheet.")
|
||||
|
||||
if df_declarations.empty and df_initializations.empty:
|
||||
print("No data written to Excel as both datasets are empty.")
|
||||
else:
|
||||
print(f"\nSuccessfully generated Excel comparison: {output_excel_path}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error writing Excel file {output_excel_path}: {e}")
|
||||
|
||||
|
||||
def main_comparator():
|
||||
print("S7 Data Block Comparator to Excel (Multi-Sheet)")
|
||||
print("==============================================")
|
||||
working_dir = find_working_directory()
|
||||
print(f"Using working directory: {working_dir}")
|
||||
|
||||
data_f, format_f, updated_f = find_comparison_files_detailed(working_dir)
|
||||
|
||||
if not any([data_f, format_f, updated_f]): # Check if at least one relevant file was found
|
||||
print("\nError: Could not find a sufficient set of input files (_data, _format, _updated). Exiting.")
|
||||
return
|
||||
|
||||
output_filename = "S7_DB_Comparison_MultiSheet.xlsx"
|
||||
output_excel_file = os.path.join(working_dir, output_filename)
|
||||
|
||||
generate_excel_comparison(data_f, format_f, updated_f, output_excel_file)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main_comparator()
|
|
@ -48,6 +48,7 @@ class VariableInfo:
|
|||
children: List['VariableInfo'] = field(default_factory=list)
|
||||
is_udt_expanded_member: bool = False
|
||||
current_element_values: Optional[Dict[str, str]] = None
|
||||
element_type: str = "SIMPLE_VAR" # New field with default value
|
||||
|
||||
@dataclass
|
||||
class UdtInfo:
|
||||
|
@ -147,9 +148,9 @@ class S7Parser:
|
|||
S7Parser._adjust_children_offsets(child.children, base_offset_add)
|
||||
|
||||
def _parse_struct_members(self, lines: List[str], current_line_idx: int,
|
||||
parent_members_list: List[VariableInfo],
|
||||
active_context: OffsetContext,
|
||||
is_top_level_struct_in_block: bool = False) -> int:
|
||||
parent_members_list: List[VariableInfo],
|
||||
active_context: OffsetContext,
|
||||
is_top_level_struct_in_block: bool = False) -> int:
|
||||
idx_to_process = current_line_idx
|
||||
while idx_to_process < len(lines):
|
||||
original_line_text = lines[idx_to_process].strip()
|
||||
|
@ -166,9 +167,9 @@ class S7Parser:
|
|||
is_nested_end_struct = self.end_struct_regex.match(line_to_parse) and not is_top_level_struct_in_block
|
||||
is_main_block_end_struct = self.end_struct_regex.match(line_to_parse) and is_top_level_struct_in_block
|
||||
is_block_terminator = is_top_level_struct_in_block and \
|
||||
(self.end_type_regex.match(line_to_parse) or \
|
||||
self.end_db_regex.match(line_to_parse) or \
|
||||
self.begin_regex.match(line_to_parse))
|
||||
(self.end_type_regex.match(line_to_parse) or \
|
||||
self.end_db_regex.match(line_to_parse) or \
|
||||
self.begin_regex.match(line_to_parse))
|
||||
|
||||
if is_nested_end_struct:
|
||||
active_context.align_to_byte()
|
||||
|
@ -191,6 +192,17 @@ class S7Parser:
|
|||
data_type=clean_data_type,
|
||||
byte_offset=0, size_in_bytes=0,
|
||||
udt_source_name=udt_source_name_val)
|
||||
|
||||
# Set element_type based on what we know about the variable
|
||||
if var_data['arraydims']:
|
||||
var_info.element_type = "ARRAY"
|
||||
elif clean_data_type.upper() == "STRUCT":
|
||||
var_info.element_type = "STRUCT"
|
||||
elif udt_source_name_val:
|
||||
var_info.element_type = "UDT_INSTANCE"
|
||||
else:
|
||||
var_info.element_type = "SIMPLE_VAR"
|
||||
|
||||
if var_data.get('initval'): var_info.initial_value = var_data['initval'].strip()
|
||||
if line_comment: var_info.comment = line_comment
|
||||
num_array_elements = 1
|
||||
|
@ -245,10 +257,10 @@ class S7Parser:
|
|||
var_info.children.append(expanded_member)
|
||||
parent_members_list.append(var_info)
|
||||
elif line_to_parse and \
|
||||
not self.struct_start_regex.match(line_to_parse) and \
|
||||
not is_main_block_end_struct and \
|
||||
not is_nested_end_struct and \
|
||||
not is_block_terminator :
|
||||
not self.struct_start_regex.match(line_to_parse) and \
|
||||
not is_main_block_end_struct and \
|
||||
not is_nested_end_struct and \
|
||||
not is_block_terminator :
|
||||
print(f"DEBUG (_parse_struct_members): Line not parsed: Original='{original_line_text}' | Processed='{line_to_parse}'")
|
||||
return idx_to_process
|
||||
|
||||
|
@ -633,20 +645,25 @@ def calculate_array_element_offset(var: VariableInfo, indices_str: str) -> float
|
|||
# Para tipos regulares, simplemente sumar el offset lineal * tamaño elemento
|
||||
return var.byte_offset + (linear_index * element_size)
|
||||
|
||||
|
||||
def flatten_db_structure(db_info: Dict[str, Any]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Función genérica que aplana completamente una estructura de DB/UDT,
|
||||
Función que aplana completamente una estructura de DB/UDT,
|
||||
expandiendo todas las variables anidadas, UDTs y elementos de array.
|
||||
Garantiza ordenamiento estricto por offset (byte.bit).
|
||||
Añade punteros jerárquicos para acceso directo a las variables originales.
|
||||
|
||||
Returns:
|
||||
List[Dict]: Lista de variables aplanadas con todos sus atributos
|
||||
y un path completo, ordenada por offset estricto.
|
||||
List[Dict]: Lista de variables aplanadas con todos sus atributos,
|
||||
rutas completas y punteros jerárquicos, ordenada por offset.
|
||||
"""
|
||||
flat_variables = []
|
||||
processed_ids = set() # Para evitar duplicados
|
||||
|
||||
def process_variable(var: Dict[str, Any], path_prefix: str = "", is_expansion: bool = False):
|
||||
def process_variable(var: Dict[str, Any], path_prefix: str = "", is_expansion: bool = False, hierarchy_path=None):
|
||||
# Inicializar hierarchy_path si es None
|
||||
if hierarchy_path is None:
|
||||
hierarchy_path = []
|
||||
|
||||
# Identificador único para esta variable en este contexto
|
||||
var_id = f"{path_prefix}{var['name']}_{var['byte_offset']}"
|
||||
|
||||
|
@ -661,7 +678,22 @@ def flatten_db_structure(db_info: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|||
flat_var["full_path"] = f"{path_prefix}{var['name']}"
|
||||
flat_var["is_array_element"] = False # Por defecto no es elemento de array
|
||||
|
||||
# Determinar si es array con valores específicos
|
||||
# NUEVO: Guardar el camino jerárquico para acceso directo
|
||||
flat_var["_hierarchy_path"] = copy.deepcopy(hierarchy_path)
|
||||
|
||||
# Preservar o inferir element_type
|
||||
if "element_type" not in flat_var:
|
||||
# Inferir tipo para compatibilidad hacia atrás
|
||||
if var.get("array_dimensions"):
|
||||
flat_var["element_type"] = "ARRAY"
|
||||
elif var.get("children") and var["data_type"].upper() == "STRUCT":
|
||||
flat_var["element_type"] = "STRUCT"
|
||||
elif var.get("udt_source_name"):
|
||||
flat_var["element_type"] = "UDT_INSTANCE"
|
||||
else:
|
||||
flat_var["element_type"] = "SIMPLE_VAR"
|
||||
|
||||
# Determinar si es un array con valores específicos
|
||||
is_array = bool(var.get("array_dimensions"))
|
||||
has_array_values = is_array and var.get("current_element_values")
|
||||
|
||||
|
@ -671,7 +703,7 @@ def flatten_db_structure(db_info: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|||
flat_var["address_display"] = format_address_for_display(var["byte_offset"], var.get("bit_size", 0))
|
||||
flat_variables.append(flat_var)
|
||||
|
||||
# Si es array con valores específicos, expandir cada elemento como variable individual
|
||||
# Si es un array con valores específicos, expandir cada elemento como variable individual
|
||||
if has_array_values:
|
||||
for idx, element_data in var.get("current_element_values", {}).items():
|
||||
# Extraer valor y offset del elemento
|
||||
|
@ -692,6 +724,11 @@ def flatten_db_structure(db_info: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|||
array_element["current_value"] = value
|
||||
array_element["byte_offset"] = element_offset # Usar offset calculado
|
||||
array_element["address_display"] = format_address_for_display(element_offset, var.get("bit_size", 0))
|
||||
array_element["element_type"] = "ARRAY_ELEMENT"
|
||||
|
||||
# Para elementos de array, guardamos el camino al array + índice
|
||||
array_element["_hierarchy_path"] = copy.deepcopy(hierarchy_path)
|
||||
array_element["_array_index"] = idx # Para acceso directo al elemento específico
|
||||
|
||||
# Eliminar current_element_values para evitar redundancia
|
||||
if "current_element_values" in array_element:
|
||||
|
@ -701,16 +738,19 @@ def flatten_db_structure(db_info: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|||
|
||||
# Procesar recursivamente todos los hijos
|
||||
if var.get("children"):
|
||||
for child in var.get("children", []):
|
||||
for i, child in enumerate(var.get("children", [])):
|
||||
child_hierarchy = copy.deepcopy(hierarchy_path)
|
||||
child_hierarchy.append({"type": "children", "index": i})
|
||||
process_variable(
|
||||
child,
|
||||
f"{path_prefix}{var['name']}.",
|
||||
is_expansion=bool(var.get("udt_source_name"))
|
||||
is_expansion=bool(var.get("udt_source_name")),
|
||||
hierarchy_path=child_hierarchy
|
||||
)
|
||||
|
||||
# Procesar todos los miembros desde el nivel superior
|
||||
for member in db_info.get("members", []):
|
||||
process_variable(member)
|
||||
for i, member in enumerate(db_info.get("members", [])):
|
||||
process_variable(member, hierarchy_path=[{"type": "members", "index": i}])
|
||||
|
||||
# Ordenar estrictamente por offset byte.bit
|
||||
flat_variables.sort(key=lambda x: (
|
||||
|
@ -720,6 +760,43 @@ def flatten_db_structure(db_info: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|||
|
||||
return flat_variables
|
||||
|
||||
def access_by_hierarchy_path(root_obj: Dict[str, Any], hierarchy_path: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Accede directamente a un elemento usando su ruta jerárquica.
|
||||
|
||||
Args:
|
||||
root_obj: Objeto raíz (generalmente un DB) donde comienza la navegación
|
||||
hierarchy_path: Lista de pasos jerárquicos, cada uno con "type" e "index"
|
||||
|
||||
Returns:
|
||||
El objeto encontrado o None si no se puede acceder
|
||||
"""
|
||||
if not hierarchy_path:
|
||||
return None
|
||||
|
||||
current = root_obj
|
||||
|
||||
for path_step in hierarchy_path:
|
||||
container_type = path_step["type"] # "members" o "children"
|
||||
index = path_step["index"]
|
||||
|
||||
# Verificar que el contenedor existe
|
||||
if container_type not in current:
|
||||
print(f"Error: No se encontró el contenedor '{container_type}' en la ruta jerárquica")
|
||||
return None
|
||||
|
||||
container = current[container_type]
|
||||
|
||||
# Verificar que el índice es válido
|
||||
if not isinstance(container, list) or len(container) <= index:
|
||||
print(f"Error: Índice {index} fuera de rango en la ruta jerárquica")
|
||||
return None
|
||||
|
||||
# Navegar al siguiente nivel
|
||||
current = container[index]
|
||||
|
||||
return current
|
||||
|
||||
if __name__ == "__main__":
|
||||
working_dir = find_working_directory()
|
||||
print(f"Using working directory: {working_dir}")
|
||||
|
|
|
@ -147,7 +147,8 @@ def main():
|
|||
|
||||
if data_from_json.get("dbs"):
|
||||
for db_to_document in data_from_json["dbs"]:
|
||||
excel_output_filename = os.path.join(documentation_dir, f"{current_json_filename}_{db_to_document['name'].replace('"', '')}.xlsx")
|
||||
## excel_output_filename = os.path.join(documentation_dir, f"{current_json_filename}_{db_to_document['name'].replace('"', '')}.xlsx")
|
||||
excel_output_filename = os.path.join(documentation_dir, f"{current_json_filename}.xlsx")
|
||||
|
||||
print(f"Generando documentación Excel para DB: '{db_to_document['name']}' (desde {current_json_filename}) -> {excel_output_filename}")
|
||||
try:
|
||||
|
|
|
@ -15,7 +15,7 @@ sys.path.append(script_root)
|
|||
from backend.script_utils import load_configuration
|
||||
|
||||
# Importar desde x3
|
||||
from x3 import S7Parser, find_working_directory, custom_json_serializer, flatten_db_structure, format_address_for_display
|
||||
from x3 import S7Parser, find_working_directory, custom_json_serializer, flatten_db_structure, format_address_for_display, access_by_hierarchy_path
|
||||
from x4 import format_data_type_for_source
|
||||
|
||||
# Importar desde x4 para generar archivos
|
||||
|
@ -132,174 +132,6 @@ def compare_structures_by_offset(data_vars: List[Dict], format_vars: List[Dict])
|
|||
return len(issues) == 0, issues
|
||||
|
||||
|
||||
def create_updated_json(data_json: Dict, format_json: Dict) -> Dict:
|
||||
"""
|
||||
Crea JSON actualizado basado en la estructura de _format con valores de _data.
|
||||
Utiliza offset como clave principal para encontrar variables correspondientes.
|
||||
Reporta errores si no se encuentra un offset correspondiente.
|
||||
"""
|
||||
# Copia profunda de format_json para no modificar el original
|
||||
updated_json = copy.deepcopy(format_json)
|
||||
|
||||
# Procesar cada DB
|
||||
for db_idx, format_db in enumerate(format_json.get("dbs", [])):
|
||||
# Buscar el DB correspondiente en data_json
|
||||
data_db = next((db for db in data_json.get("dbs", []) if db["name"] == format_db["name"]), None)
|
||||
if not data_db:
|
||||
print(f"Error: No se encontró DB '{format_db['name']}' en data_json")
|
||||
continue # No hay DB correspondiente en data_json
|
||||
|
||||
# Aplanar variables de ambos DBs
|
||||
flat_data_vars = flatten_db_structure(data_db)
|
||||
flat_format_vars = flatten_db_structure(format_db)
|
||||
|
||||
# Crear mapa de offset a variable para data
|
||||
data_by_offset = {var["byte_offset"]: var for var in flat_data_vars}
|
||||
|
||||
# Para cada variable en format, buscar su correspondiente en data por offset
|
||||
for format_var in flat_format_vars:
|
||||
offset = format_var["byte_offset"]
|
||||
path = format_var["full_path"]
|
||||
|
||||
# Buscar la variable correspondiente en data_json por offset
|
||||
if offset in data_by_offset:
|
||||
data_var = data_by_offset[offset]
|
||||
|
||||
# Encontrar la variable original en la estructura jerárquica
|
||||
path_parts = format_var["full_path"].split('.')
|
||||
current_node = updated_json["dbs"][db_idx]
|
||||
|
||||
# Variable para rastrear si se encontró la ruta
|
||||
path_found = True
|
||||
|
||||
# Navegar la jerarquía hasta encontrar el nodo padre
|
||||
for i in range(len(path_parts) - 1):
|
||||
if "members" in current_node:
|
||||
# Buscar el miembro correspondiente
|
||||
member_name = path_parts[i]
|
||||
matching_members = [m for m in current_node["members"] if m["name"] == member_name]
|
||||
if matching_members:
|
||||
current_node = matching_members[0]
|
||||
else:
|
||||
print(f"Error: No se encontró el miembro '{member_name}' en la ruta '{path}'")
|
||||
path_found = False
|
||||
break # No se encontró la ruta
|
||||
elif "children" in current_node:
|
||||
# Buscar el hijo correspondiente
|
||||
child_name = path_parts[i]
|
||||
matching_children = [c for c in current_node["children"] if c["name"] == child_name]
|
||||
if matching_children:
|
||||
current_node = matching_children[0]
|
||||
else:
|
||||
print(f"Error: No se encontró el hijo '{child_name}' en la ruta '{path}'")
|
||||
path_found = False
|
||||
break # No se encontró la ruta
|
||||
else:
|
||||
print(f"Error: No se puede navegar más en la ruta '{path}', nodo actual no tiene members ni children")
|
||||
path_found = False
|
||||
break # No se puede navegar más
|
||||
|
||||
# Si encontramos el nodo padre, actualizar el hijo
|
||||
if path_found and ("members" in current_node or "children" in current_node):
|
||||
target_list = current_node.get("members", current_node.get("children", []))
|
||||
target_name = path_parts[-1]
|
||||
|
||||
# Si es un elemento de array, extraer el nombre base y el índice
|
||||
if '[' in target_name and ']' in target_name:
|
||||
base_name = target_name.split('[')[0]
|
||||
index_str = target_name[target_name.find('[')+1:target_name.find(']')]
|
||||
|
||||
# Buscar el array base
|
||||
array_var = next((var for var in target_list if var["name"] == base_name), None)
|
||||
if array_var:
|
||||
# Asegurarse que existe current_element_values
|
||||
if "current_element_values" not in array_var:
|
||||
array_var["current_element_values"] = {}
|
||||
|
||||
# Copiar el valor del elemento del array
|
||||
if "current_value" in data_var:
|
||||
array_var["current_element_values"][index_str] = {
|
||||
"value": data_var["current_value"],
|
||||
"offset": data_var["byte_offset"]
|
||||
}
|
||||
else:
|
||||
# Buscar la variable a actualizar
|
||||
target_var_found = False
|
||||
for target_var in target_list:
|
||||
if target_var["name"] == target_name:
|
||||
target_var_found = True
|
||||
|
||||
# Limpiar y copiar initial_value si existe
|
||||
if "initial_value" in target_var:
|
||||
del target_var["initial_value"]
|
||||
if "initial_value" in data_var and data_var["initial_value"] is not None:
|
||||
target_var["initial_value"] = data_var["initial_value"]
|
||||
|
||||
# Limpiar y copiar current_value si existe
|
||||
if "current_value" in target_var:
|
||||
del target_var["current_value"]
|
||||
if "current_value" in data_var and data_var["current_value"] is not None:
|
||||
target_var["current_value"] = data_var["current_value"]
|
||||
|
||||
# Limpiar y copiar current_element_values si existe
|
||||
if "current_element_values" in target_var:
|
||||
del target_var["current_element_values"]
|
||||
if "current_element_values" in data_var and data_var["current_element_values"]:
|
||||
target_var["current_element_values"] = copy.deepcopy(data_var["current_element_values"])
|
||||
|
||||
break
|
||||
|
||||
if not target_var_found and not ('[' in target_name and ']' in target_name):
|
||||
print(f"Error: No se encontró la variable '{target_name}' en la ruta '{path}'")
|
||||
else:
|
||||
# El offset no existe en data_json, reportar error
|
||||
print(f"Error: Offset {offset} (para '{path}') no encontrado en los datos source (_data)")
|
||||
|
||||
# Eliminar valores si es una variable que no es elemento de array
|
||||
if '[' not in path or ']' not in path:
|
||||
# Encontrar la variable original en la estructura jerárquica
|
||||
path_parts = path.split('.')
|
||||
current_node = updated_json["dbs"][db_idx]
|
||||
|
||||
# Navegar hasta el nodo padre para limpiar valores
|
||||
path_found = True
|
||||
for i in range(len(path_parts) - 1):
|
||||
if "members" in current_node:
|
||||
member_name = path_parts[i]
|
||||
matching_members = [m for m in current_node["members"] if m["name"] == member_name]
|
||||
if matching_members:
|
||||
current_node = matching_members[0]
|
||||
else:
|
||||
path_found = False
|
||||
break
|
||||
elif "children" in current_node:
|
||||
child_name = path_parts[i]
|
||||
matching_children = [c for c in current_node["children"] if c["name"] == child_name]
|
||||
if matching_children:
|
||||
current_node = matching_children[0]
|
||||
else:
|
||||
path_found = False
|
||||
break
|
||||
else:
|
||||
path_found = False
|
||||
break
|
||||
|
||||
if path_found and ("members" in current_node or "children" in current_node):
|
||||
target_list = current_node.get("members", current_node.get("children", []))
|
||||
target_name = path_parts[-1]
|
||||
|
||||
for target_var in target_list:
|
||||
if target_var["name"] == target_name:
|
||||
# Eliminar valores iniciales y actuales
|
||||
if "initial_value" in target_var:
|
||||
del target_var["initial_value"]
|
||||
if "current_value" in target_var:
|
||||
del target_var["current_value"]
|
||||
if "current_element_values" in target_var:
|
||||
del target_var["current_element_values"]
|
||||
break
|
||||
|
||||
return updated_json
|
||||
|
||||
def process_updated_json(updated_json: Dict, updated_json_path: str, working_dir: str, documentation_dir: str, original_format_file: str):
|
||||
"""
|
||||
|
@ -347,11 +179,138 @@ def process_updated_json(updated_json: Dict, updated_json_path: str, working_dir
|
|||
except Exception as e:
|
||||
print(f"Error al generar archivo S7 para {base_name}: {e}")
|
||||
|
||||
def create_updated_json(data_json: Dict, format_json: Dict) -> Dict:
|
||||
"""
|
||||
Crea un JSON actualizado basado en la estructura de _format con valores de _data.
|
||||
Usa punteros jerárquicos para acceso directo a variables, evitando errores de tipo.
|
||||
|
||||
Args:
|
||||
data_json: JSON con los datos fuente (_data)
|
||||
format_json: JSON con la estructura y nombres (_format)
|
||||
|
||||
Returns:
|
||||
JSON actualizado con estructura de format_json y valores de data_json
|
||||
"""
|
||||
# Copia profunda para no modificar el original
|
||||
updated_json = copy.deepcopy(format_json)
|
||||
|
||||
# Procesar cada DB
|
||||
for db_idx, format_db in enumerate(format_json.get("dbs", [])):
|
||||
# Buscar el DB correspondiente en data_json
|
||||
data_db = next((db for db in data_json.get("dbs", []) if db["name"] == format_db["name"]), None)
|
||||
if not data_db:
|
||||
print(f"Error: No se encontró DB '{format_db['name']}' en data_json")
|
||||
continue
|
||||
|
||||
# Aplanar variables de ambos DBs
|
||||
flat_data_vars = flatten_db_structure(data_db)
|
||||
flat_format_vars = flatten_db_structure(format_db)
|
||||
|
||||
print(f"Procesando DB '{format_db['name']}': {len(flat_format_vars)} variables en _format, {len(flat_data_vars)} variables en _data")
|
||||
|
||||
# Crear mapa de offset a variable para data - solo incluir tipos de valor
|
||||
data_by_offset = {
|
||||
var["byte_offset"]: var for var in flat_data_vars
|
||||
if var.get("element_type") in ["SIMPLE_VAR", "ARRAY_ELEMENT"]
|
||||
}
|
||||
|
||||
# Contar variables actualizadas para estadísticas
|
||||
variables_updated = 0
|
||||
variables_missing = 0
|
||||
|
||||
# Para cada variable en format, buscar su correspondiente en data por offset
|
||||
for format_var in flat_format_vars:
|
||||
# Solo procesar tipos de valor (variables y elementos de array)
|
||||
if format_var.get("element_type") not in ["SIMPLE_VAR", "ARRAY_ELEMENT"]:
|
||||
continue
|
||||
|
||||
offset = format_var["byte_offset"]
|
||||
path = format_var["full_path"]
|
||||
|
||||
# Encontrar la variable correspondiente en data_json por offset
|
||||
if offset in data_by_offset:
|
||||
data_var = data_by_offset[offset]
|
||||
|
||||
# Verificar compatibilidad de tipos
|
||||
format_element_type = format_var.get("element_type")
|
||||
data_element_type = data_var.get("element_type")
|
||||
|
||||
if format_element_type != data_element_type and not (
|
||||
format_element_type in ["SIMPLE_VAR", "ARRAY_ELEMENT"] and
|
||||
data_element_type in ["SIMPLE_VAR", "ARRAY_ELEMENT"]
|
||||
):
|
||||
print(f"Advertencia: Tipos no compatibles en offset {offset}: {format_element_type} vs {data_element_type}")
|
||||
variables_missing += 1
|
||||
continue
|
||||
|
||||
# Usar el puntero jerárquico para acceso directo
|
||||
hierarchy_path = format_var.get("_hierarchy_path")
|
||||
if not hierarchy_path:
|
||||
print(f"Error: No se encontró ruta jerárquica para {path}")
|
||||
variables_missing += 1
|
||||
continue
|
||||
|
||||
# Caso especial para elementos de array
|
||||
if format_var.get("is_array_element") and "_array_index" in format_var:
|
||||
# Obtener el array padre
|
||||
array_var = access_by_hierarchy_path(updated_json["dbs"][db_idx], hierarchy_path)
|
||||
if array_var and array_var.get("element_type") == "ARRAY":
|
||||
# Asegurar que current_element_values existe
|
||||
if "current_element_values" not in array_var:
|
||||
array_var["current_element_values"] = {}
|
||||
|
||||
# Copiar el valor del elemento
|
||||
array_index = format_var["_array_index"]
|
||||
if "current_value" in data_var:
|
||||
array_var["current_element_values"][array_index] = {
|
||||
"value": data_var["current_value"],
|
||||
"offset": data_var["byte_offset"]
|
||||
}
|
||||
variables_updated += 1
|
||||
else:
|
||||
print(f"Error: El nodo padre para el elemento de array {path} no es un array válido")
|
||||
variables_missing += 1
|
||||
else:
|
||||
# Para variables normales, acceder directamente
|
||||
target_var = access_by_hierarchy_path(updated_json["dbs"][db_idx], hierarchy_path)
|
||||
if target_var and target_var.get("element_type") in ["SIMPLE_VAR", "ARRAY"]:
|
||||
# Copiar initial_value si existe
|
||||
if "initial_value" in target_var:
|
||||
del target_var["initial_value"]
|
||||
if "initial_value" in data_var and data_var["initial_value"] is not None:
|
||||
target_var["initial_value"] = data_var["initial_value"]
|
||||
|
||||
# Copiar current_value si existe
|
||||
if "current_value" in target_var:
|
||||
del target_var["current_value"]
|
||||
if "current_value" in data_var and data_var["current_value"] is not None:
|
||||
target_var["current_value"] = data_var["current_value"]
|
||||
|
||||
# Para variables tipo ARRAY, también copiar current_element_values
|
||||
if target_var.get("element_type") == "ARRAY" and "current_element_values" in data_var:
|
||||
if "current_element_values" in target_var:
|
||||
del target_var["current_element_values"]
|
||||
if data_var["current_element_values"]:
|
||||
target_var["current_element_values"] = copy.deepcopy(data_var["current_element_values"])
|
||||
|
||||
variables_updated += 1
|
||||
else:
|
||||
type_info = f" (tipo: {target_var.get('element_type')})" if target_var else ""
|
||||
print(f"Error: No se pudo acceder o actualizar la variable {path}{type_info}")
|
||||
variables_missing += 1
|
||||
else:
|
||||
# Offset no encontrado en data_json, reportar error
|
||||
print(f"Error: Offset {offset} (para '{path}') no encontrado en los datos source (_data)")
|
||||
variables_missing += 1
|
||||
|
||||
print(f"Estadísticas para DB '{format_db['name']}': {variables_updated} variables actualizadas, {variables_missing} no encontradas")
|
||||
|
||||
return updated_json
|
||||
|
||||
def generate_comparison_excel(format_json: Dict, data_json: Dict, updated_json: Dict, excel_filename: str):
|
||||
"""
|
||||
Genera un archivo Excel con dos hojas que comparan los valores iniciales y actuales
|
||||
entre los archivos format_json, data_json y updated_json.
|
||||
Filtra STRUCTs y solo compara variables con valores reales.
|
||||
Genera un archivo Excel comparando valores entre format, data y updated JSONs.
|
||||
Usa flatten_db_structure con punteros jerárquicos y filtra por tipos de elemento.
|
||||
|
||||
Args:
|
||||
format_json: JSON con la estructura y nombres de formato
|
||||
|
@ -361,217 +320,269 @@ def generate_comparison_excel(format_json: Dict, data_json: Dict, updated_json:
|
|||
"""
|
||||
import openpyxl
|
||||
from openpyxl.utils import get_column_letter
|
||||
from openpyxl.styles import PatternFill, Font
|
||||
from openpyxl.styles import PatternFill, Font, Alignment, Border, Side
|
||||
|
||||
# Crear un nuevo libro de Excel
|
||||
workbook = openpyxl.Workbook()
|
||||
sheet = workbook.active
|
||||
sheet.title = "Value_Comparison"
|
||||
|
||||
# Definir estilos para resaltar diferencias
|
||||
diff_fill = PatternFill(start_color="FFFF00", end_color="FFFF00", fill_type="solid") # Amarillo
|
||||
# Definir estilos
|
||||
diff_fill = PatternFill(start_color="FFFF00", end_color="FFFF00", fill_type="solid")
|
||||
type_mismatch_fill = PatternFill(start_color="FF9999", end_color="FF9999", fill_type="solid") # Light red
|
||||
header_font = Font(bold=True)
|
||||
header_fill = PatternFill(start_color="DDDDDD", end_color="DDDDDD", fill_type="solid")
|
||||
thin_border = Border(left=Side(style='thin'), right=Side(style='thin'),
|
||||
top=Side(style='thin'), bottom=Side(style='thin'))
|
||||
|
||||
# Configurar encabezados
|
||||
headers = ["Address", "Name", "Type", "Element Type",
|
||||
"Format Initial", "Data Initial", "Updated Initial",
|
||||
"Format Current", "Data Current", "Updated Current",
|
||||
"Type Match", "Value Differences"]
|
||||
|
||||
for col_num, header in enumerate(headers, 1):
|
||||
cell = sheet.cell(row=1, column=col_num, value=header)
|
||||
cell.font = header_font
|
||||
cell.fill = header_fill
|
||||
cell.border = thin_border
|
||||
cell.alignment = Alignment(horizontal='center')
|
||||
|
||||
# Congelar primera fila
|
||||
sheet.freeze_panes = "A2"
|
||||
|
||||
current_row = 2
|
||||
|
||||
# Procesar cada DB
|
||||
for db_idx, format_db in enumerate(format_json.get("dbs", [])):
|
||||
# Buscar los DBs correspondientes
|
||||
db_name = format_db["name"]
|
||||
data_db = next((db for db in data_json.get("dbs", []) if db["name"] == db_name), None)
|
||||
updated_db = next((db for db in updated_json.get("dbs", []) if db["name"] == db_name), None)
|
||||
|
||||
if not data_db or not updated_db:
|
||||
print(f"Error: No se encontró el DB '{db_name}' en alguno de los archivos JSON")
|
||||
print(f"Error: DB '{db_name}' no encontrado en alguno de los archivos JSON")
|
||||
continue
|
||||
|
||||
# Crear hojas para valores iniciales y actuales para este DB
|
||||
initial_sheet = workbook.active if db_idx == 0 else workbook.create_sheet()
|
||||
initial_sheet.title = f"{db_name}_Initial"[:31] # Limitar longitud del nombre de hoja
|
||||
# Añadir título con el nombre del DB
|
||||
sheet.merge_cells(f'A{current_row}:L{current_row}')
|
||||
header_cell = sheet.cell(row=current_row, column=1, value=f"DB: {db_name}")
|
||||
header_cell.font = Font(bold=True, size=12)
|
||||
header_cell.fill = PatternFill(start_color="CCCCFF", end_color="CCCCFF", fill_type="solid") # Light blue
|
||||
header_cell.alignment = Alignment(horizontal='center')
|
||||
current_row += 1
|
||||
|
||||
current_sheet = workbook.create_sheet()
|
||||
current_sheet.title = f"{db_name}_Current"[:31]
|
||||
|
||||
# Aplanar variables de los tres DBs
|
||||
# Obtener variables aplanadas de todas las fuentes
|
||||
flat_format_vars = flatten_db_structure(format_db)
|
||||
flat_data_vars = flatten_db_structure(data_db)
|
||||
flat_updated_vars = flatten_db_structure(updated_db)
|
||||
|
||||
# Filtrar STRUCTs - solo trabajamos con variables que tienen valores reales
|
||||
flat_format_vars = [var for var in flat_format_vars
|
||||
if var["data_type"].upper() != "STRUCT" and not var.get("children")]
|
||||
# Crear mapas por offset para búsqueda rápida - FILTRAR por element_type
|
||||
data_by_offset = {
|
||||
var["byte_offset"]: var for var in flat_data_vars
|
||||
if var.get("element_type") in ["SIMPLE_VAR", "ARRAY_ELEMENT"]
|
||||
}
|
||||
|
||||
# Crear mapas de offset a variable para búsqueda rápida
|
||||
data_by_offset = {var["byte_offset"]: var for var in flat_data_vars
|
||||
if var["data_type"].upper() != "STRUCT" and not var.get("children")}
|
||||
updated_by_offset = {var["byte_offset"]: var for var in flat_updated_vars
|
||||
if var["data_type"].upper() != "STRUCT" and not var.get("children")}
|
||||
|
||||
# Configurar encabezados para la hoja de valores iniciales
|
||||
headers_initial = ["Address", "Name", "Type", "Format Initial", "Data Initial", "Updated Initial", "Difference"]
|
||||
for col_num, header in enumerate(headers_initial, 1):
|
||||
cell = initial_sheet.cell(row=1, column=col_num, value=header)
|
||||
cell.font = header_font
|
||||
|
||||
# Configurar encabezados para la hoja de valores actuales
|
||||
headers_current = ["Address", "Name", "Type", "Format Current", "Data Current", "Updated Current", "Difference"]
|
||||
for col_num, header in enumerate(headers_current, 1):
|
||||
cell = current_sheet.cell(row=1, column=col_num, value=header)
|
||||
cell.font = header_font
|
||||
|
||||
# Llenar las hojas con datos
|
||||
initial_row = 2
|
||||
current_row = 2
|
||||
updated_by_offset = {
|
||||
var["byte_offset"]: var for var in flat_updated_vars
|
||||
if var.get("element_type") in ["SIMPLE_VAR", "ARRAY_ELEMENT"]
|
||||
}
|
||||
|
||||
# Procesar cada variable desde format_json
|
||||
for format_var in flat_format_vars:
|
||||
# Omitir tipos que no son valores (STRUCT, UDT_INSTANCE)
|
||||
element_type = format_var.get("element_type", "UNKNOWN")
|
||||
if element_type not in ["SIMPLE_VAR", "ARRAY_ELEMENT"]:
|
||||
continue
|
||||
|
||||
offset = format_var["byte_offset"]
|
||||
path = format_var["full_path"]
|
||||
data_type = format_data_type_for_source(format_var)
|
||||
address = format_var.get("address_display", format_address_for_display(offset, format_var.get("bit_size", 0)))
|
||||
|
||||
# Obtener variables correspondientes por offset
|
||||
# Encontrar variables correspondientes por offset
|
||||
data_var = data_by_offset.get(offset)
|
||||
updated_var = updated_by_offset.get(offset)
|
||||
|
||||
# Procesar valores iniciales (solo si la variable puede tener initial_value)
|
||||
format_initial = format_var.get("initial_value", "")
|
||||
data_initial = data_var.get("initial_value", "") if data_var else ""
|
||||
updated_initial = updated_var.get("initial_value", "") if updated_var else ""
|
||||
# Comparar tipos de elemento
|
||||
data_element_type = data_var.get("element_type", "MISSING") if data_var else "MISSING"
|
||||
updated_element_type = updated_var.get("element_type", "MISSING") if updated_var else "MISSING"
|
||||
|
||||
# Solo incluir en la hoja de valores iniciales si al menos uno tiene valor inicial
|
||||
if format_initial or data_initial or updated_initial:
|
||||
# Determinar si hay diferencias en valores iniciales
|
||||
has_initial_diff = (format_initial != data_initial or
|
||||
format_initial != updated_initial or
|
||||
data_initial != updated_initial)
|
||||
|
||||
# Escribir datos de valores iniciales
|
||||
initial_sheet.cell(row=initial_row, column=1, value=address)
|
||||
initial_sheet.cell(row=initial_row, column=2, value=path)
|
||||
initial_sheet.cell(row=initial_row, column=3, value=data_type)
|
||||
initial_sheet.cell(row=initial_row, column=4, value=str(format_initial))
|
||||
initial_sheet.cell(row=initial_row, column=5, value=str(data_initial))
|
||||
initial_sheet.cell(row=initial_row, column=6, value=str(updated_initial))
|
||||
|
||||
# Resaltar diferencias en valores iniciales
|
||||
if has_initial_diff:
|
||||
initial_sheet.cell(row=initial_row, column=7, value="Sí")
|
||||
for col in range(4, 7):
|
||||
initial_sheet.cell(row=initial_row, column=col).fill = diff_fill
|
||||
# Determinar compatibilidad de tipos
|
||||
type_match = "Yes"
|
||||
if data_var and element_type != data_element_type:
|
||||
# Verificar tipos compatibles
|
||||
if (element_type in ["SIMPLE_VAR", "ARRAY_ELEMENT"] and
|
||||
data_element_type in ["SIMPLE_VAR", "ARRAY_ELEMENT"]):
|
||||
type_match = "Compatible"
|
||||
else:
|
||||
initial_sheet.cell(row=initial_row, column=7, value="No")
|
||||
type_match = "No"
|
||||
elif not data_var:
|
||||
type_match = "Missing"
|
||||
|
||||
initial_row += 1
|
||||
# Obtener valores (con cadenas vacías por defecto)
|
||||
format_initial = str(format_var.get("initial_value", ""))
|
||||
data_initial = str(data_var.get("initial_value", "")) if data_var else ""
|
||||
updated_initial = str(updated_var.get("initial_value", "")) if updated_var else ""
|
||||
|
||||
# Procesar valores actuales
|
||||
format_current = format_var.get("current_value", "")
|
||||
data_current = data_var.get("current_value", "") if data_var else ""
|
||||
updated_current = updated_var.get("current_value", "") if updated_var else ""
|
||||
format_current = str(format_var.get("current_value", ""))
|
||||
data_current = str(data_var.get("current_value", "")) if data_var else ""
|
||||
updated_current = str(updated_var.get("current_value", "")) if updated_var else ""
|
||||
|
||||
# Solo incluir en la hoja de valores actuales si al menos uno tiene valor actual
|
||||
if format_current or data_current or updated_current:
|
||||
# Determinar si hay diferencias en valores actuales
|
||||
has_current_diff = (format_current != data_current or
|
||||
format_current != updated_current or
|
||||
data_current != updated_current)
|
||||
# Verificar diferencias
|
||||
has_initial_diff = (format_initial != data_initial or
|
||||
format_initial != updated_initial or
|
||||
data_initial != updated_initial)
|
||||
|
||||
# Escribir datos de valores actuales
|
||||
current_sheet.cell(row=current_row, column=1, value=address)
|
||||
current_sheet.cell(row=current_row, column=2, value=path)
|
||||
current_sheet.cell(row=current_row, column=3, value=data_type)
|
||||
current_sheet.cell(row=current_row, column=4, value=str(format_current))
|
||||
current_sheet.cell(row=current_row, column=5, value=str(data_current))
|
||||
current_sheet.cell(row=current_row, column=6, value=str(updated_current))
|
||||
has_current_diff = (format_current != data_current or
|
||||
format_current != updated_current or
|
||||
data_current != updated_current)
|
||||
|
||||
# Resaltar diferencias en valores actuales
|
||||
if has_current_diff:
|
||||
current_sheet.cell(row=current_row, column=7, value="Sí")
|
||||
for col in range(4, 7):
|
||||
current_sheet.cell(row=current_row, column=col).fill = diff_fill
|
||||
else:
|
||||
current_sheet.cell(row=current_row, column=7, value="No")
|
||||
# Crear descripción detallada de diferencias
|
||||
diff_desc = []
|
||||
if has_initial_diff:
|
||||
diff_desc.append("Initial values differ")
|
||||
if has_current_diff:
|
||||
diff_desc.append("Current values differ")
|
||||
if not diff_desc:
|
||||
diff_desc.append("None")
|
||||
|
||||
current_row += 1
|
||||
# Escribir datos
|
||||
sheet.cell(row=current_row, column=1, value=address)
|
||||
sheet.cell(row=current_row, column=2, value=path)
|
||||
sheet.cell(row=current_row, column=3, value=data_type)
|
||||
sheet.cell(row=current_row, column=4, value=element_type)
|
||||
sheet.cell(row=current_row, column=5, value=format_initial)
|
||||
sheet.cell(row=current_row, column=6, value=data_initial)
|
||||
sheet.cell(row=current_row, column=7, value=updated_initial)
|
||||
sheet.cell(row=current_row, column=8, value=format_current)
|
||||
sheet.cell(row=current_row, column=9, value=data_current)
|
||||
sheet.cell(row=current_row, column=10, value=updated_current)
|
||||
sheet.cell(row=current_row, column=11, value=type_match)
|
||||
sheet.cell(row=current_row, column=12, value=", ".join(diff_desc))
|
||||
|
||||
# Si es un array, procesamos también sus elementos
|
||||
if format_var.get("current_element_values") or (data_var and data_var.get("current_element_values")) or (updated_var and updated_var.get("current_element_values")):
|
||||
format_elements = format_var.get("current_element_values", {})
|
||||
data_elements = data_var.get("current_element_values", {}) if data_var else {}
|
||||
updated_elements = updated_var.get("current_element_values", {}) if updated_var else {}
|
||||
# Añadir bordes a todas las celdas
|
||||
for col in range(1, 13):
|
||||
sheet.cell(row=current_row, column=col).border = thin_border
|
||||
|
||||
# Unir todos los índices disponibles
|
||||
all_indices = set(list(format_elements.keys()) +
|
||||
list(data_elements.keys()) +
|
||||
list(updated_elements.keys()))
|
||||
# Resaltar diferencias
|
||||
if has_initial_diff:
|
||||
for col in range(5, 8):
|
||||
sheet.cell(row=current_row, column=col).fill = diff_fill
|
||||
|
||||
# Ordenar índices numéricamente
|
||||
sorted_indices = sorted(all_indices, key=lambda x: [int(i) for i in x.split(',')]) if all_indices else []
|
||||
if has_current_diff:
|
||||
for col in range(8, 11):
|
||||
sheet.cell(row=current_row, column=col).fill = diff_fill
|
||||
|
||||
for idx in sorted_indices:
|
||||
elem_path = f"{path}[{idx}]"
|
||||
# Resaltar incompatibilidades de tipo
|
||||
if type_match == "No" or type_match == "Missing":
|
||||
sheet.cell(row=current_row, column=11).fill = type_mismatch_fill
|
||||
|
||||
# Valores actuales para elementos de array
|
||||
format_elem_val = ""
|
||||
if idx in format_elements:
|
||||
if isinstance(format_elements[idx], dict) and "value" in format_elements[idx]:
|
||||
format_elem_val = format_elements[idx]["value"]
|
||||
else:
|
||||
format_elem_val = format_elements[idx]
|
||||
current_row += 1
|
||||
|
||||
data_elem_val = ""
|
||||
if idx in data_elements:
|
||||
if isinstance(data_elements[idx], dict) and "value" in data_elements[idx]:
|
||||
data_elem_val = data_elements[idx]["value"]
|
||||
else:
|
||||
data_elem_val = data_elements[idx]
|
||||
# Añadir filtro a los encabezados
|
||||
sheet.auto_filter.ref = f"A1:L{current_row-1}"
|
||||
|
||||
updated_elem_val = ""
|
||||
if idx in updated_elements:
|
||||
if isinstance(updated_elements[idx], dict) and "value" in updated_elements[idx]:
|
||||
updated_elem_val = updated_elements[idx]["value"]
|
||||
else:
|
||||
updated_elem_val = updated_elements[idx]
|
||||
# Auto-ajustar anchos de columna
|
||||
for col_idx, column_cells in enumerate(sheet.columns, 1):
|
||||
max_length = 3
|
||||
column = get_column_letter(col_idx)
|
||||
for cell in column_cells:
|
||||
try:
|
||||
if len(str(cell.value)) > max_length:
|
||||
max_length = len(str(cell.value))
|
||||
except:
|
||||
pass
|
||||
adjusted_width = min(max_length, 20) # Limitar ancho máximo
|
||||
sheet.column_dimensions[column].width = adjusted_width
|
||||
|
||||
# Determinar si hay diferencias
|
||||
has_elem_diff = (str(format_elem_val) != str(data_elem_val) or
|
||||
str(format_elem_val) != str(updated_elem_val) or
|
||||
str(data_elem_val) != str(updated_elem_val))
|
||||
# Añadir hoja de resumen
|
||||
summary_sheet = workbook.create_sheet(title="Summary")
|
||||
summary_sheet.column_dimensions['A'].width = 30
|
||||
summary_sheet.column_dimensions['B'].width = 15
|
||||
summary_sheet.column_dimensions['C'].width = 50
|
||||
|
||||
# Escribir datos de elementos de array (solo en hoja de valores actuales)
|
||||
current_sheet.cell(row=current_row, column=1, value=address)
|
||||
current_sheet.cell(row=current_row, column=2, value=elem_path)
|
||||
current_sheet.cell(row=current_row, column=3, value=data_type.replace("ARRAY", "").strip())
|
||||
current_sheet.cell(row=current_row, column=4, value=str(format_elem_val))
|
||||
current_sheet.cell(row=current_row, column=5, value=str(data_elem_val))
|
||||
current_sheet.cell(row=current_row, column=6, value=str(updated_elem_val))
|
||||
# Añadir encabezado al resumen
|
||||
summary_headers = ["Database", "Value Variables", "Notes"]
|
||||
for col_num, header in enumerate(summary_headers, 1):
|
||||
cell = summary_sheet.cell(row=1, column=col_num, value=header)
|
||||
cell.font = header_font
|
||||
cell.fill = header_fill
|
||||
|
||||
# Resaltar diferencias
|
||||
if has_elem_diff:
|
||||
current_sheet.cell(row=current_row, column=7, value="Sí")
|
||||
for col in range(4, 7):
|
||||
current_sheet.cell(row=current_row, column=col).fill = diff_fill
|
||||
else:
|
||||
current_sheet.cell(row=current_row, column=7, value="No")
|
||||
# Añadir datos de resumen
|
||||
summary_row = 2
|
||||
for db_idx, format_db in enumerate(format_json.get("dbs", [])):
|
||||
db_name = format_db["name"]
|
||||
data_db = next((db for db in data_json.get("dbs", []) if db["name"] == db_name), None)
|
||||
updated_db = next((db for db in updated_json.get("dbs", []) if db["name"] == db_name), None)
|
||||
|
||||
current_row += 1
|
||||
if not data_db or not updated_db:
|
||||
continue
|
||||
|
||||
# Auto-ajustar anchos de columna
|
||||
for sheet in [initial_sheet, current_sheet]:
|
||||
for col_idx, column_cells in enumerate(sheet.columns, 1):
|
||||
max_length = 0
|
||||
column = get_column_letter(col_idx)
|
||||
for cell in column_cells:
|
||||
try:
|
||||
if len(str(cell.value)) > max_length:
|
||||
max_length = len(str(cell.value))
|
||||
except:
|
||||
pass
|
||||
adjusted_width = min(max_length + 2, 100) # Limitar ancho máximo
|
||||
sheet.column_dimensions[column].width = adjusted_width
|
||||
flat_format_vars = flatten_db_structure(format_db)
|
||||
flat_data_vars = flatten_db_structure(data_db)
|
||||
|
||||
# Filtrar solo variables de valor (SIMPLE_VAR y ARRAY_ELEMENT)
|
||||
format_value_vars = [var for var in flat_format_vars
|
||||
if var.get("element_type") in ["SIMPLE_VAR", "ARRAY_ELEMENT"]]
|
||||
|
||||
# Contar por tipo de elemento
|
||||
format_type_counts = {}
|
||||
for var in flat_format_vars:
|
||||
element_type = var.get("element_type", "UNKNOWN")
|
||||
format_type_counts[element_type] = format_type_counts.get(element_type, 0) + 1
|
||||
|
||||
# Crear mapa filtrado de offset de datos
|
||||
data_by_offset = {
|
||||
var["byte_offset"]: var for var in flat_data_vars
|
||||
if var.get("element_type") in ["SIMPLE_VAR", "ARRAY_ELEMENT"]
|
||||
}
|
||||
|
||||
# Contar diferencias de valor y desajustes de tipo
|
||||
diff_count = 0
|
||||
type_mismatch_count = 0
|
||||
missing_count = 0
|
||||
|
||||
for format_var in format_value_vars:
|
||||
offset = format_var["byte_offset"]
|
||||
data_var = data_by_offset.get(offset)
|
||||
|
||||
if data_var:
|
||||
# Verificar desajuste de tipo
|
||||
if format_var.get("element_type") != data_var.get("element_type"):
|
||||
type_mismatch_count += 1
|
||||
|
||||
# Verificar diferencias de valor
|
||||
format_initial = str(format_var.get("initial_value", ""))
|
||||
data_initial = str(data_var.get("initial_value", ""))
|
||||
format_current = str(format_var.get("current_value", ""))
|
||||
data_current = str(data_var.get("current_value", ""))
|
||||
|
||||
if format_initial != data_initial or format_current != data_current:
|
||||
diff_count += 1
|
||||
else:
|
||||
missing_count += 1
|
||||
|
||||
# Escribir en el resumen
|
||||
summary_sheet.cell(row=summary_row, column=1, value=db_name)
|
||||
summary_sheet.cell(row=summary_row, column=2, value=len(format_value_vars))
|
||||
|
||||
notes = []
|
||||
for element_type, count in format_type_counts.items():
|
||||
notes.append(f"{element_type}: {count}")
|
||||
notes.append(f"Value differences: {diff_count}")
|
||||
notes.append(f"Type mismatches: {type_mismatch_count}")
|
||||
notes.append(f"Missing in data: {missing_count}")
|
||||
|
||||
summary_sheet.cell(row=summary_row, column=3, value=", ".join(notes))
|
||||
summary_row += 1
|
||||
|
||||
# Guardar el archivo Excel
|
||||
try:
|
||||
workbook.save(excel_filename)
|
||||
print(f"Archivo de comparación Excel generado: {excel_filename}")
|
||||
print(f"Archivo Excel de comparación generado: {excel_filename}")
|
||||
except Exception as e:
|
||||
print(f"Error al escribir el archivo Excel {excel_filename}: {e}")
|
||||
|
||||
|
||||
def main():
|
||||
working_dir = find_working_directory()
|
||||
print(f"Using working directory: {working_dir}")
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "Procesador de XML LAD-SCL-AWL exportado de TIA a SCL / Markdown",
|
||||
"name": "Siemens-Tia : 03 : Procesador de XML LAD-SCL-AWL exportado de TIA a SCL / Markdown",
|
||||
"description": "Conjunto de scripts que procesan archivos XML exportados de TIA, conviertiendo los objetos LAD a SCL y generando documentación en formato Markdown. ",
|
||||
"version": "1.0",
|
||||
"author": "Miguel"
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
"hidden": true
|
||||
},
|
||||
"x4_cross_reference.py": {
|
||||
"display_name": "4: Generar Cross References",
|
||||
"display_name": "2: Procesar un archivo individual usando x4",
|
||||
"short_description": "LadderToSCL - Conversor de Siemens LAD/FUP XML a SCL",
|
||||
"long_description": "",
|
||||
"hidden": false
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"name": "Grupo de Ejemplo Auto",
|
||||
"name": "Code : 00 : Grupo de Ejemplo Auto",
|
||||
"description": "Scripts de demostración que muestran las funcionalidades básicas del sistema.",
|
||||
"version": "1.0",
|
||||
"author": "Admin"
|
||||
|
|
1489
data/log.txt
1489
data/log.txt
File diff suppressed because it is too large
Load Diff
|
@ -913,6 +913,12 @@ async function initializeApp() {
|
|||
// Agregar el nuevo manejador de eventos
|
||||
selectElement.addEventListener('change', handleGroupChange);
|
||||
|
||||
// Event listener para el nuevo botón de abrir en explorador
|
||||
const openInExplorerButton = document.getElementById('open-in-explorer-btn');
|
||||
if (openInExplorerButton) {
|
||||
openInExplorerButton.addEventListener('click', openCurrentWorkingDirectoryInExplorer);
|
||||
}
|
||||
|
||||
// Cargar datos iniciales
|
||||
updateGroupDescription();
|
||||
await initWorkingDirectory();
|
||||
|
@ -1315,3 +1321,39 @@ function openMinicondaConsole() {
|
|||
showNotification('Error al comunicarse con el servidor', 'error');
|
||||
});
|
||||
}
|
||||
|
||||
async function openCurrentWorkingDirectoryInExplorer() {
|
||||
const group = currentGroup; // Asumiendo que currentGroup está disponible globalmente
|
||||
const wdInput = document.getElementById('working-directory');
|
||||
const path = wdInput.value;
|
||||
|
||||
if (!group) {
|
||||
showToast("Por favor, selecciona un grupo primero.", "warning"); // O usa alert() si showToast no está definida
|
||||
// alert("Por favor, selecciona un grupo primero.");
|
||||
return;
|
||||
}
|
||||
if (!path || path.trim() === "") {
|
||||
showToast("El directorio de trabajo no está establecido.", "warning");
|
||||
// alert("El directorio de trabajo no está establecido.");
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/open-explorer', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ path: path, group: group })
|
||||
});
|
||||
const result = await response.json();
|
||||
if (result.status === "success") {
|
||||
// No es necesario un toast para éxito, la acción es visible
|
||||
} else {
|
||||
showToast(result.message || "Error al intentar abrir el explorador.", "error");
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error de red al abrir en explorador:", error);
|
||||
showToast("Error de red al intentar abrir el explorador.", "error");
|
||||
}
|
||||
}
|
|
@ -119,6 +119,9 @@
|
|||
<button class="bg-gray-500 text-white px-4 py-2 rounded" onclick="browseDirectory()">
|
||||
Explorar
|
||||
</button>
|
||||
<button id="open-in-explorer-btn" class="bg-indigo-500 hover:bg-indigo-600 text-white px-4 py-2 rounded" title="Abrir directorio actual en el explorador de archivos">
|
||||
Abrir Carpeta
|
||||
</button>
|
||||
</div>
|
||||
<button class="bg-blue-500 text-white px-4 py-2 rounded" onclick="setWorkingDirectory()">
|
||||
Confirmar
|
||||
|
|
Loading…
Reference in New Issue