Refactor disk usage retrieval in PLCDataStreamer for improved Windows compatibility and remove reliance on psutil. Enhance polling mechanism in useCoordinatedConnection to implement exponential backoff on consecutive errors. Update system_state.json to reorder active datasets and adjust last update timestamp. Add comprehensive test_disk_status.py script to validate disk_space functionality via the /api/status endpoint.
This commit is contained in:
parent
9dae98bfdc
commit
cc4888fa18
File diff suppressed because it is too large
Load Diff
|
@ -639,61 +639,39 @@ class PLCDataStreamer:
|
|||
return None
|
||||
|
||||
def _get_disk_usage_safe(self, path):
|
||||
"""Safely get disk usage with Windows compatibility fallbacks"""
|
||||
import shutil
|
||||
"""Get disk usage using shutil.disk_usage() - robust for Windows Python 3.12+"""
|
||||
from collections import namedtuple
|
||||
import os
|
||||
import shutil
|
||||
|
||||
DiskUsage = namedtuple("DiskUsage", ["total", "used", "free", "percent"])
|
||||
|
||||
try:
|
||||
# Method 1: Try psutil with absolute path
|
||||
abs_path = os.path.abspath(path)
|
||||
usage = psutil.disk_usage(abs_path)
|
||||
# For Windows, always use the drive root (e.g., "D:\") for maximum compatibility
|
||||
# Extract drive letter from the given path
|
||||
drive = os.path.splitdrive(os.path.abspath(path))[0]
|
||||
|
||||
# Ensure we have a valid drive (e.g., "D:")
|
||||
if not drive or len(drive) < 2:
|
||||
raise ValueError(f"Invalid drive extracted from path: {path}")
|
||||
|
||||
# Create proper drive root path for Windows (e.g., "D:\")
|
||||
drive_root = drive + "\\"
|
||||
|
||||
# Use shutil.disk_usage() instead of psutil - avoids Python 3.12 Unicode API issues
|
||||
usage = shutil.disk_usage(drive_root)
|
||||
|
||||
return DiskUsage(
|
||||
total=usage.total,
|
||||
used=usage.used,
|
||||
free=usage.free,
|
||||
percent=round((usage.used / usage.total) * 100, 1),
|
||||
)
|
||||
except (SystemError, OSError, Exception) as e:
|
||||
|
||||
except Exception as e:
|
||||
if hasattr(self, "logger"):
|
||||
self.logger.warning(f"psutil.disk_usage failed for {path}: {e}")
|
||||
|
||||
try:
|
||||
# Method 2: Try psutil with drive root
|
||||
drive = os.path.splitdrive(os.path.abspath(path))[0]
|
||||
if drive:
|
||||
drive_root = drive + os.sep # e.g., "D:\"
|
||||
if hasattr(self, "logger"):
|
||||
self.logger.info(f"Trying psutil with drive root: {drive_root}")
|
||||
usage = psutil.disk_usage(drive_root)
|
||||
return DiskUsage(
|
||||
total=usage.total,
|
||||
used=usage.used,
|
||||
free=usage.free,
|
||||
percent=round((usage.used / usage.total) * 100, 1),
|
||||
)
|
||||
except Exception as e2:
|
||||
if hasattr(self, "logger"):
|
||||
self.logger.warning(
|
||||
f"psutil.disk_usage with drive root failed: {e2}"
|
||||
)
|
||||
|
||||
try:
|
||||
# Method 3: Use shutil.disk_usage (Python 3.3+)
|
||||
if hasattr(self, "logger"):
|
||||
self.logger.info("Trying shutil.disk_usage as fallback")
|
||||
total, used, free = shutil.disk_usage(path)
|
||||
return DiskUsage(
|
||||
total=total,
|
||||
used=used,
|
||||
free=free,
|
||||
percent=round((used / total) * 100, 1),
|
||||
)
|
||||
except Exception as e3:
|
||||
if hasattr(self, "logger"):
|
||||
self.logger.error(f"All disk usage methods failed: {e3}")
|
||||
return None
|
||||
self.logger.error(f"Failed to get disk usage for drive root: {e}")
|
||||
return None
|
||||
|
||||
def _estimate_csv_size_per_hour(self) -> float:
|
||||
"""Estimate CSV file size per hour based on active datasets and variables"""
|
||||
|
|
|
@ -112,7 +112,15 @@ export function useCoordinatedPolling(source, fetchFunction, interval = 5000, de
|
|||
let intervalId = null
|
||||
let isActive = true
|
||||
let consecutiveErrors = 0
|
||||
const maxConsecutiveErrors = 3
|
||||
let currentInterval = interval
|
||||
|
||||
const scheduleNextPoll = (delay) => {
|
||||
if (!isActive) return
|
||||
if (intervalId) {
|
||||
clearTimeout(intervalId)
|
||||
}
|
||||
intervalId = setTimeout(poll, delay)
|
||||
}
|
||||
|
||||
const poll = async () => {
|
||||
if (!isActive) return
|
||||
|
@ -120,35 +128,43 @@ export function useCoordinatedPolling(source, fetchFunction, interval = 5000, de
|
|||
const data = await fetchFunction()
|
||||
if (isActive) {
|
||||
consecutiveErrors = 0
|
||||
currentInterval = interval // Reset to normal interval
|
||||
setConnectionError(null)
|
||||
onData(data)
|
||||
// Schedule next poll at normal interval
|
||||
scheduleNextPoll(interval)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Polling error for ${source}:`, error)
|
||||
consecutiveErrors++
|
||||
|
||||
if (consecutiveErrors >= maxConsecutiveErrors) {
|
||||
if (consecutiveErrors >= 3) {
|
||||
setConnectionError(error)
|
||||
// Stop polling after too many consecutive errors
|
||||
if (intervalId) {
|
||||
clearInterval(intervalId)
|
||||
intervalId = null
|
||||
}
|
||||
// Use exponential backoff instead of stopping completely
|
||||
// Start with 10 seconds, max out at 30 seconds
|
||||
currentInterval = Math.min(10000 + (consecutiveErrors - 3) * 5000, 30000)
|
||||
console.log(`${source}: Using backoff interval ${currentInterval}ms after ${consecutiveErrors} errors`)
|
||||
} else {
|
||||
// For first few errors, keep normal interval
|
||||
currentInterval = interval
|
||||
}
|
||||
|
||||
// Always continue polling, even with errors
|
||||
if (isActive) {
|
||||
scheduleNextPoll(currentInterval)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Poll inicial
|
||||
poll()
|
||||
|
||||
// Configurar intervalo
|
||||
intervalId = setInterval(poll, interval)
|
||||
|
||||
return {
|
||||
close: () => {
|
||||
isActive = false
|
||||
if (intervalId) {
|
||||
clearInterval(intervalId)
|
||||
clearTimeout(intervalId)
|
||||
intervalId = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,12 +3,11 @@
|
|||
"should_connect": true,
|
||||
"should_stream": false,
|
||||
"active_datasets": [
|
||||
"Fast",
|
||||
"DAR",
|
||||
"Test"
|
||||
"Test",
|
||||
"Fast"
|
||||
]
|
||||
},
|
||||
"auto_recovery_enabled": true,
|
||||
"last_update": "2025-08-22T16:39:17.693082",
|
||||
"plotjuggler_path": "C:\\Program Files\\PlotJuggler\\plotjuggler.exe"
|
||||
"last_update": "2025-08-22T17:31:16.229568"
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script to verify disk_space functionality after psutil fix
|
||||
"""
|
||||
import requests
|
||||
import json
|
||||
|
||||
|
||||
def test_disk_status():
|
||||
"""Test the /api/status endpoint to check if disk_space_info works correctly"""
|
||||
try:
|
||||
print("🧪 Testing /api/status endpoint...")
|
||||
response = requests.get("http://localhost:5050/api/status", timeout=10)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
print("✅ Status endpoint responded successfully")
|
||||
|
||||
# Check if disk_space_info is present and valid
|
||||
if "disk_space_info" in data:
|
||||
disk_info = data["disk_space_info"]
|
||||
print(f"✅ Disk space info retrieved successfully:")
|
||||
print(f" 📁 Free space: {disk_info.get('free_space', 'Unknown')}")
|
||||
print(f" 📁 Total space: {disk_info.get('total_space', 'Unknown')}")
|
||||
print(f" 📁 Used space: {disk_info.get('used_space', 'Unknown')}")
|
||||
print(
|
||||
f" 📊 Percent used: {disk_info.get('percent_used', 'Unknown')}%"
|
||||
)
|
||||
print(
|
||||
f" ⏱️ Recording time left: {disk_info.get('recording_time_left', 'Unknown')}"
|
||||
)
|
||||
|
||||
if disk_info.get("error"):
|
||||
print(f"❌ Error in disk_space_info: {disk_info['error']}")
|
||||
else:
|
||||
print("✅ No errors in disk_space_info")
|
||||
else:
|
||||
print("❌ disk_space_info not found in response")
|
||||
|
||||
else:
|
||||
print(f"❌ Status endpoint failed: {response.status_code}")
|
||||
print(f"Response: {response.text}")
|
||||
|
||||
except requests.exceptions.ConnectionError:
|
||||
print("❌ Cannot connect to backend server at http://localhost:5050")
|
||||
except Exception as e:
|
||||
print(f"❌ Error testing disk status: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_disk_status()
|
Loading…
Reference in New Issue