450 lines
14 KiB
Python
450 lines
14 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
TSNet Phase 2 - Performance Benchmark Test
|
|
Mediciones detalladas de rendimiento del sistema TSNet
|
|
"""
|
|
|
|
import requests
|
|
import time
|
|
import statistics
|
|
from datetime import datetime
|
|
|
|
|
|
class TSNetBenchmarkSuite:
|
|
def __init__(self, base_url="http://localhost:5006"):
|
|
self.base_url = base_url
|
|
self.benchmarks = []
|
|
|
|
def send_request(self, method, params=None, timeout=30):
|
|
"""Enviar request con medición de tiempo"""
|
|
start_time = time.time()
|
|
try:
|
|
payload = {
|
|
"jsonrpc": "2.0",
|
|
"method": method,
|
|
"id": int(time.time() * 1000),
|
|
}
|
|
if params:
|
|
payload["params"] = params
|
|
|
|
response = requests.post(self.base_url, json=payload, timeout=timeout)
|
|
elapsed = time.time() - start_time
|
|
|
|
if response.status_code == 200:
|
|
result = response.json()
|
|
success = "error" not in result
|
|
return success, result, elapsed
|
|
return False, {"error": f"HTTP {response.status_code}"}, elapsed
|
|
|
|
except Exception as e:
|
|
elapsed = time.time() - start_time
|
|
return False, {"error": f"Exception: {str(e)}"}, elapsed
|
|
|
|
def log_benchmark(self, test_name, elapsed_time, success, details=""):
|
|
"""Registrar resultado de benchmark"""
|
|
status = "✅" if success else "❌"
|
|
print(f"{status} {test_name}: {elapsed_time:.3f}s")
|
|
if details:
|
|
print(f" {details}")
|
|
|
|
self.benchmarks.append(
|
|
{
|
|
"test": test_name,
|
|
"elapsed": elapsed_time,
|
|
"success": success,
|
|
"details": details,
|
|
"timestamp": datetime.now(),
|
|
}
|
|
)
|
|
|
|
def benchmark_object_creation(self, iterations=5):
|
|
"""Benchmark: Creación de objetos hidráulicos"""
|
|
print("\n⏱️ Benchmarking Object Creation...")
|
|
|
|
times = []
|
|
object_types = ["osHydTank", "osHydPump", "osHydPipe"]
|
|
|
|
for i in range(iterations):
|
|
start_time = time.time()
|
|
created_objects = []
|
|
|
|
for j, obj_type in enumerate(object_types):
|
|
success, result, _ = self.send_request(
|
|
"create_object",
|
|
{"type": obj_type, "x": float(i * 3 + j), "y": float(i)},
|
|
)
|
|
if success:
|
|
created_objects.append(obj_type)
|
|
|
|
elapsed = time.time() - start_time
|
|
times.append(elapsed)
|
|
|
|
# Limpiar objetos creados
|
|
if created_objects:
|
|
success, result, _ = self.send_request("list_objects")
|
|
if success and "result" in result:
|
|
objects = result["result"].get("objects", [])
|
|
if objects:
|
|
object_ids = [str(obj["id"]["Value"]) for obj in objects]
|
|
self.send_request("delete_objects", {"ids": object_ids})
|
|
|
|
avg_time = statistics.mean(times)
|
|
std_dev = statistics.stdev(times) if len(times) > 1 else 0
|
|
|
|
self.log_benchmark(
|
|
f"Object Creation ({iterations} iterations)",
|
|
avg_time,
|
|
True,
|
|
f"Avg: {avg_time:.3f}s ± {std_dev:.3f}s",
|
|
)
|
|
|
|
def benchmark_tsnet_registration(self, iterations=3):
|
|
"""Benchmark: Registro de objetos en TSNet"""
|
|
print("\n⏱️ Benchmarking TSNet Registration...")
|
|
|
|
# Crear objetos de prueba primero
|
|
test_objects = []
|
|
for i, obj_type in enumerate(["osHydTank", "osHydPump", "osHydPipe"]):
|
|
success, result, _ = self.send_request(
|
|
"create_object", {"type": obj_type, "x": float(i), "y": 0.0}
|
|
)
|
|
if success:
|
|
test_objects.append(obj_type)
|
|
|
|
if not test_objects:
|
|
self.log_benchmark(
|
|
"TSNet Registration", 0, False, "No test objects created"
|
|
)
|
|
return
|
|
|
|
times = []
|
|
|
|
for i in range(iterations):
|
|
code = f"""
|
|
import time
|
|
start_time = time.time()
|
|
|
|
try:
|
|
# Reset manager
|
|
app.tsnetSimulationManager.ResetAllCalculatedValues()
|
|
|
|
# Register all hydraulic objects
|
|
registered_count = 0
|
|
for obj in app.ObjetosSimulables:
|
|
if obj.GetType().Name.startswith("osHyd"):
|
|
obj.CheckData()
|
|
app.tsnetSimulationManager.RegisterHydraulicObject(obj)
|
|
registered_count += 1
|
|
|
|
elapsed = time.time() - start_time
|
|
result = f"{{elapsed:.4f}},{registered_count}"
|
|
|
|
except Exception as e:
|
|
elapsed = time.time() - start_time
|
|
result = f"{{elapsed:.4f}},0,ERROR:{str(e)}"
|
|
|
|
print(result)
|
|
"""
|
|
|
|
success, response, _ = self.send_request("execute_python", {"code": code})
|
|
|
|
if success and "result" in response:
|
|
output = str(response["result"])
|
|
try:
|
|
parts = output.strip().split(",")
|
|
elapsed = float(parts[0])
|
|
times.append(elapsed)
|
|
except:
|
|
pass
|
|
|
|
if times:
|
|
avg_time = statistics.mean(times)
|
|
std_dev = statistics.stdev(times) if len(times) > 1 else 0
|
|
self.log_benchmark(
|
|
f"TSNet Registration ({iterations} iterations)",
|
|
avg_time,
|
|
True,
|
|
f"Avg: {avg_time:.3f}s ± {std_dev:.3f}s, {len(test_objects)} objects",
|
|
)
|
|
else:
|
|
self.log_benchmark("TSNet Registration", 0, False, "No valid measurements")
|
|
|
|
def benchmark_configuration_validation(self, iterations=5):
|
|
"""Benchmark: Validación de configuraciones"""
|
|
print("\n⏱️ Benchmarking Configuration Validation...")
|
|
|
|
times = []
|
|
|
|
for i in range(iterations):
|
|
code = f"""
|
|
import time
|
|
start_time = time.time()
|
|
|
|
try:
|
|
# Ensure objects are registered
|
|
for obj in app.ObjetosSimulables:
|
|
if obj.GetType().Name.startswith("osHyd"):
|
|
obj.CheckData()
|
|
app.tsnetSimulationManager.RegisterHydraulicObject(obj)
|
|
|
|
# Validate configurations
|
|
config_errors = app.tsnetSimulationManager.ValidateAllConfigurations()
|
|
|
|
elapsed = time.time() - start_time
|
|
result = f"{{elapsed:.4f}},{len(config_errors)}"
|
|
|
|
except Exception as e:
|
|
elapsed = time.time() - start_time
|
|
result = f"{{elapsed:.4f}},-1,ERROR:{str(e)}"
|
|
|
|
print(result)
|
|
"""
|
|
|
|
success, response, _ = self.send_request("execute_python", {"code": code})
|
|
|
|
if success and "result" in response:
|
|
output = str(response["result"])
|
|
try:
|
|
parts = output.strip().split(",")
|
|
elapsed = float(parts[0])
|
|
times.append(elapsed)
|
|
except:
|
|
pass
|
|
|
|
if times:
|
|
avg_time = statistics.mean(times)
|
|
std_dev = statistics.stdev(times) if len(times) > 1 else 0
|
|
self.log_benchmark(
|
|
f"Configuration Validation ({iterations} iterations)",
|
|
avg_time,
|
|
True,
|
|
f"Avg: {avg_time:.3f}s ± {std_dev:.3f}s",
|
|
)
|
|
|
|
def benchmark_network_rebuild(self, iterations=3):
|
|
"""Benchmark: Reconstrucción de red"""
|
|
print("\n⏱️ Benchmarking Network Rebuild...")
|
|
|
|
times = []
|
|
|
|
for i in range(iterations):
|
|
code = f"""
|
|
import time
|
|
start_time = time.time()
|
|
|
|
try:
|
|
# Rebuild network
|
|
app.tsnetSimulationManager.RebuildNetwork()
|
|
|
|
elapsed = time.time() - start_time
|
|
result = f"{{elapsed:.4f}}"
|
|
|
|
except Exception as e:
|
|
elapsed = time.time() - start_time
|
|
result = f"{{elapsed:.4f}},ERROR:{str(e)}"
|
|
|
|
print(result)
|
|
"""
|
|
|
|
success, response, _ = self.send_request("execute_python", {"code": code})
|
|
|
|
if success and "result" in response:
|
|
output = str(response["result"])
|
|
try:
|
|
elapsed = float(output.strip().split(",")[0])
|
|
times.append(elapsed)
|
|
except:
|
|
pass
|
|
|
|
if times:
|
|
avg_time = statistics.mean(times)
|
|
std_dev = statistics.stdev(times) if len(times) > 1 else 0
|
|
self.log_benchmark(
|
|
f"Network Rebuild ({iterations} iterations)",
|
|
avg_time,
|
|
True,
|
|
f"Avg: {avg_time:.3f}s ± {std_dev:.3f}s",
|
|
)
|
|
|
|
def benchmark_full_simulation_cycle(self, iterations=3):
|
|
"""Benchmark: Ciclo completo de simulación"""
|
|
print("\n⏱️ Benchmarking Full Simulation Cycle...")
|
|
|
|
times = []
|
|
|
|
for i in range(iterations):
|
|
start_time = time.time()
|
|
|
|
success, response, _ = self.send_request(
|
|
"execute_python", {"code": "app.RunTSNetSimulationSync()"}, timeout=60
|
|
)
|
|
|
|
elapsed = time.time() - start_time
|
|
|
|
if success:
|
|
times.append(elapsed)
|
|
|
|
if times:
|
|
avg_time = statistics.mean(times)
|
|
std_dev = statistics.stdev(times) if len(times) > 1 else 0
|
|
self.log_benchmark(
|
|
f"Full Simulation Cycle ({iterations} iterations)",
|
|
avg_time,
|
|
True,
|
|
f"Avg: {avg_time:.3f}s ± {std_dev:.3f}s",
|
|
)
|
|
|
|
def benchmark_memory_usage(self):
|
|
"""Benchmark: Uso de memoria"""
|
|
print("\n⏱️ Benchmarking Memory Usage...")
|
|
|
|
code = """
|
|
import gc
|
|
import sys
|
|
|
|
try:
|
|
# Force garbage collection
|
|
gc.collect()
|
|
|
|
# Get object counts before
|
|
before_objects = len(gc.get_objects())
|
|
|
|
# Perform TSNet operations
|
|
app.tsnetSimulationManager.ResetAllCalculatedValues()
|
|
|
|
for obj in app.ObjetosSimulables:
|
|
if obj.GetType().Name.startswith("osHyd"):
|
|
obj.CheckData()
|
|
app.tsnetSimulationManager.RegisterHydraulicObject(obj)
|
|
|
|
app.tsnetSimulationManager.ValidateAllConfigurations()
|
|
app.tsnetSimulationManager.RebuildNetwork()
|
|
|
|
# Get object counts after
|
|
after_objects = len(gc.get_objects())
|
|
|
|
# Clean up
|
|
app.tsnetSimulationManager.ResetAllCalculatedValues()
|
|
gc.collect()
|
|
|
|
final_objects = len(gc.get_objects())
|
|
|
|
result = f"Before: {before_objects}, After: {after_objects}, Final: {final_objects}"
|
|
|
|
except Exception as e:
|
|
result = f"ERROR: {str(e)}"
|
|
|
|
print(result)
|
|
"""
|
|
|
|
success, response, _ = self.send_request("execute_python", {"code": code})
|
|
|
|
if success and "result" in response:
|
|
output = str(response["result"])
|
|
self.log_benchmark("Memory Usage Analysis", 0, True, output)
|
|
else:
|
|
self.log_benchmark("Memory Usage Analysis", 0, False, "Failed to analyze")
|
|
|
|
def run_benchmarks(self):
|
|
"""Ejecutar todos los benchmarks"""
|
|
print("🏁 TSNet Phase 2 - Performance Benchmark Suite")
|
|
print("=" * 60)
|
|
print(f"Started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
|
|
# Verificar conectividad
|
|
success, _, _ = self.send_request("get_ctreditor_status")
|
|
if not success:
|
|
print("❌ Cannot connect to CtrEditor MCP server")
|
|
return
|
|
|
|
# Limpiar workspace
|
|
print("\n🧹 Preparing test environment...")
|
|
success, result, _ = self.send_request("list_objects")
|
|
if success and "result" in result:
|
|
objects = result["result"].get("objects", [])
|
|
if objects:
|
|
object_ids = [str(obj["id"]["Value"]) for obj in objects]
|
|
self.send_request("delete_objects", {"ids": object_ids})
|
|
print(f" Cleaned {len(object_ids)} existing objects")
|
|
|
|
# Ejecutar benchmarks
|
|
benchmarks = [
|
|
lambda: self.benchmark_object_creation(5),
|
|
lambda: self.benchmark_tsnet_registration(3),
|
|
lambda: self.benchmark_configuration_validation(5),
|
|
lambda: self.benchmark_network_rebuild(3),
|
|
lambda: self.benchmark_full_simulation_cycle(2),
|
|
self.benchmark_memory_usage,
|
|
]
|
|
|
|
for benchmark_func in benchmarks:
|
|
try:
|
|
benchmark_func()
|
|
time.sleep(1) # Pausa entre benchmarks
|
|
except Exception as e:
|
|
print(f"❌ Benchmark failed: {str(e)}")
|
|
|
|
# Generar reporte
|
|
self.generate_performance_report()
|
|
|
|
def generate_performance_report(self):
|
|
"""Generar reporte de rendimiento"""
|
|
print("\n" + "=" * 60)
|
|
print("📊 PERFORMANCE BENCHMARK REPORT")
|
|
print("=" * 60)
|
|
|
|
# Estadísticas por categoría
|
|
successful_benchmarks = [b for b in self.benchmarks if b["success"]]
|
|
|
|
if successful_benchmarks:
|
|
print(
|
|
f"Successful Benchmarks: {len(successful_benchmarks)}/{len(self.benchmarks)}"
|
|
)
|
|
|
|
# Top 3 más rápidos
|
|
timed_benchmarks = [b for b in successful_benchmarks if b["elapsed"] > 0]
|
|
if timed_benchmarks:
|
|
fastest = sorted(timed_benchmarks, key=lambda x: x["elapsed"])[:3]
|
|
print("\n🚀 Fastest Operations:")
|
|
for i, bench in enumerate(fastest, 1):
|
|
print(f" {i}. {bench['test']}: {bench['elapsed']:.3f}s")
|
|
|
|
# Más lentos
|
|
slowest = sorted(
|
|
timed_benchmarks, key=lambda x: x["elapsed"], reverse=True
|
|
)[:3]
|
|
print("\n⏳ Slowest Operations:")
|
|
for i, bench in enumerate(slowest, 1):
|
|
print(f" {i}. {bench['test']}: {bench['elapsed']:.3f}s")
|
|
|
|
# Análisis de rendimiento
|
|
total_time = sum(b["elapsed"] for b in timed_benchmarks)
|
|
avg_time = total_time / len(timed_benchmarks) if timed_benchmarks else 0
|
|
|
|
print(f"\n📈 Performance Summary:")
|
|
print(f" Total Benchmark Time: {total_time:.3f}s")
|
|
print(f" Average Operation Time: {avg_time:.3f}s")
|
|
|
|
# Evaluación de rendimiento
|
|
if avg_time < 0.1:
|
|
performance_rating = "🚀 Excellent"
|
|
elif avg_time < 0.5:
|
|
performance_rating = "✅ Good"
|
|
elif avg_time < 2.0:
|
|
performance_rating = "⚠️ Acceptable"
|
|
else:
|
|
performance_rating = "❌ Needs Optimization"
|
|
|
|
print(f" Performance Rating: {performance_rating}")
|
|
|
|
print(f"\nCompleted at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
|
|
|
|
def main():
|
|
benchmark_suite = TSNetBenchmarkSuite()
|
|
benchmark_suite.run_benchmarks()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|