Compare commits

...

2 Commits

Author SHA1 Message Date
ab2aa2f4f0 more code cleansup 2025-06-11 22:14:36 +00:00
cae951806c more code cleanup 2025-06-11 22:14:26 +00:00
10 changed files with 0 additions and 2066 deletions

View File

@@ -1,307 +0,0 @@
# DTS API Test Script - Implementation Plan
## Overview
This document outlines the comprehensive test script for monitoring DTS mode progression through the API. The script will start DTS mode, monitor all screen transitions, track timer progress, and provide detailed debugging information.
## Architecture
### Core Components
#### 1. DTSAPITester Class
```python
class DTSAPITester:
"""Main test orchestrator for DTS API testing"""
def __init__(self, api_base_url="http://localhost:5000/api", config=None):
self.api_base_url = api_base_url
self.config = config or TestConfig()
self.logger = self._setup_logger()
self.session = requests.Session()
self.current_task_id = None
self.transition_history = []
self.start_time = None
self.test_results = TestResults()
```
#### 2. API Client Methods
```python
def start_dts_sequence(self) -> dict:
"""Start DTS sequence via API POST /api/dts/start"""
def get_task_status(self, task_id: str) -> dict:
"""Get task status via GET /api/dts/status/{task_id}"""
def get_current_step_progress(self) -> dict:
"""Get real-time progress via GET /api/dts/current-step-progress"""
def stop_dts_sequence(self) -> dict:
"""Emergency stop via POST /api/dts/stop"""
```
#### 3. Monitoring & Analysis
```python
def monitor_dts_progress(self):
"""Main monitoring loop - polls API and detects transitions"""
def detect_screen_transition(self, current_state: dict, previous_state: dict) -> bool:
"""Detect when controller advances to next screen"""
def log_transition_event(self, transition: TransitionEvent):
"""Log detailed transition information"""
def analyze_timer_progress(self, timer_info: dict):
"""Analyze timer countdown and progress"""
```
## DTS Flow Monitoring
### Screen Sequence
1. **DTS Requested (Mode 34)** - No timer, user interaction required
2. **Priming (Mode 5)** - Timer R128 (180 seconds)
3. **Init (Mode 6)** - Timer R129 (60 seconds)
4. **Production (Mode 7)** - No timer, continuous operation
5. **Fresh Water Flush (Mode 8)** - Timer R133 (60 seconds)
6. **Complete (Mode 2)** - Return to standby
### Transition Detection Logic
```python
# Mode mapping for screen identification
SCREEN_MODES = {
34: "dts_requested_active",
5: "dts_priming_active",
6: "dts_init_active",
7: "dts_production_active",
8: "dts_flush_active",
2: "dts_process_complete"
}
# Timer mappings for progress tracking
TIMER_MAPPINGS = {
5: {"timer_address": 128, "expected_duration": 180, "name": "Priming"},
6: {"timer_address": 129, "expected_duration": 60, "name": "Init"},
8: {"timer_address": 133, "expected_duration": 60, "name": "Fresh Water Flush"}
}
```
## API Endpoints Used
### Primary Endpoints
- `POST /api/dts/start` - Initiate DTS sequence
- `GET /api/dts/status/{task_id}` - Monitor task progress
- `GET /api/dts/current-step-progress` - Real-time timer info
### Supporting Endpoints
- `GET /api/system/status` - System health check
- `POST /api/dts/stop` - Emergency stop capability
- `POST /api/dts/cancel/{task_id}` - Cancel specific task
## Logging & Output
### Console Output Format
```
🚀 DTS API Test Suite v1.0
📡 API: http://localhost:5000/api
⏰ Started: 2025-01-11 18:12:00
✅ System Status: Connected
🔄 Starting DTS Sequence...
📋 Task ID: abc12345
📺 Screen Transitions:
┌─────────────────────────────────────────────────────────────┐
│ [18:12:05] DTS Requested → Priming (Mode 34 → 5) │
│ ⏱️ Timer R128: 1800 → 1795 (0.3% complete) │
│ 📊 Expected Duration: 3m 0s │
├─────────────────────────────────────────────────────────────┤
│ [18:15:05] Priming → Init (Mode 5 → 6) │
│ ⏱️ Timer R128: Complete, R129: 600 → 595 (0.8%) │
│ ⏳ Actual Duration: 3m 0s ✅ │
└─────────────────────────────────────────────────────────────┘
⏳ Current: Init Screen (Mode 6)
📊 Progress: ████████░░ 80% (Timer R129: 120/600)
🕐 Elapsed: 48s / Expected: 60s
```
### Log File Structure
```json
{
"test_session": {
"start_time": "2025-01-11T18:12:00Z",
"api_endpoint": "http://localhost:5000/api",
"task_id": "abc12345"
},
"transitions": [
{
"timestamp": "2025-01-11T18:12:05Z",
"from_mode": 34,
"to_mode": 5,
"from_screen": "dts_requested_active",
"to_screen": "dts_priming_active",
"timer_info": {
"address": 128,
"initial_value": 1800,
"current_value": 1795,
"progress_percent": 0.3
},
"api_response_time_ms": 45
}
],
"timer_progress": [
{
"timestamp": "2025-01-11T18:12:06Z",
"mode": 5,
"timer_address": 128,
"timer_value": 1794,
"progress_percent": 0.6,
"countdown_rate": 1.0
}
]
}
```
## Configuration Options
### TestConfig Class
```python
class TestConfig:
# API Settings
API_BASE_URL = "http://localhost:5000/api"
REQUEST_TIMEOUT = 10
RETRY_ATTEMPTS = 3
RETRY_DELAY = 1
# Monitoring Settings
POLLING_INTERVAL = 1.0 # seconds
TRANSITION_TIMEOUT = 300 # 5 minutes max per screen
PROGRESS_UPDATE_INTERVAL = 5 # seconds
# Output Settings
CONSOLE_VERBOSITY = "INFO" # DEBUG, INFO, WARNING, ERROR
LOG_FILE_ENABLED = True
CSV_EXPORT_ENABLED = True
HTML_REPORT_ENABLED = True
# Test Parameters
EXPECTED_SCREEN_DURATIONS = {
5: 180, # Priming: 3 minutes
6: 60, # Init: 1 minute
8: 60 # Flush: 1 minute
}
# Alert Thresholds
STUCK_TIMER_THRESHOLD = 30 # seconds without timer change
SLOW_TRANSITION_THRESHOLD = 1.5 # 150% of expected duration
```
## Error Handling & Edge Cases
### API Communication Errors
- Connection timeouts with exponential backoff
- HTTP error responses with detailed logging
- Network interruption recovery
- Invalid JSON response handling
### DTS Process Issues
- Stuck timer detection (timer not counting down)
- Unexpected mode transitions
- Screen timeout conditions
- PLC communication failures
### Recovery Mechanisms
```python
def handle_api_error(self, error: Exception, endpoint: str):
"""Handle API communication errors with retry logic"""
def detect_stuck_timer(self, timer_history: List[dict]) -> bool:
"""Detect if timer has stopped counting down"""
def handle_unexpected_transition(self, expected_mode: int, actual_mode: int):
"""Handle unexpected screen transitions"""
def emergency_stop_sequence(self, reason: str):
"""Emergency stop with detailed logging"""
```
## Test Results & Reporting
### TestResults Class
```python
class TestResults:
def __init__(self):
self.start_time = None
self.end_time = None
self.total_duration = None
self.transitions_detected = 0
self.screens_completed = 0
self.api_errors = 0
self.timer_issues = 0
self.success = False
self.error_messages = []
self.performance_metrics = {}
```
### Report Generation
- **Console Summary**: Real-time status and final results
- **JSON Log**: Detailed machine-readable log
- **CSV Export**: Timer data for analysis
- **HTML Report**: Visual report with charts and timelines
## Usage Examples
### Basic Usage
```bash
python dts_api_test_suite.py
```
### Advanced Usage
```bash
# Custom API endpoint
python dts_api_test_suite.py --api-url http://192.168.1.100:5000/api
# Verbose output with CSV export
python dts_api_test_suite.py --verbose --export-csv
# Custom configuration
python dts_api_test_suite.py --config custom_test_config.json
# Continuous monitoring mode
python dts_api_test_suite.py --continuous --interval 0.5
```
## Implementation Files
### Main Script
- `dts_api_test_suite.py` - Main executable script
### Supporting Files
- `config/test_config.json` - Configuration file
- `lib/api_client.py` - HTTP client wrapper
- `lib/transition_detector.py` - Screen transition logic
- `lib/report_generator.py` - Report generation utilities
- `lib/logger_setup.py` - Logging configuration
### Output Directories
- `logs/` - Log files with timestamps
- `reports/` - HTML and CSV reports
- `data/` - Raw test data for analysis
## Success Criteria
### Test Passes If:
✅ DTS sequence starts successfully via API
✅ All 5 screen transitions are detected
✅ Timer progress is tracked accurately
✅ No API communication errors
✅ Process completes within expected timeframes
✅ System returns to standby mode (Mode 2)
### Test Fails If:
❌ API connection failures
❌ Missing screen transitions
❌ Stuck timers detected
❌ Process timeout exceeded
❌ Unexpected system errors
❌ Incomplete DTS sequence
This comprehensive test script will provide detailed insights into DTS API behavior and help identify any issues before UI development proceeds.

View File

@@ -1,135 +0,0 @@
# DTS Single State Refactoring - Completion Summary
## Overview
Successfully implemented the DTS Single State Refactoring Plan, transforming the DTS controller from a task_id-based system to a single operation state model. This refactoring eliminates unnecessary complexity while maintaining all existing functionality.
## ✅ Completed Implementation
### 1. Core Infrastructure ✅
- **Created `OperationStateManager`** in `watermaker_plc_api/services/operation_state.py`
- Thread-safe single operation state management
- Operation lifecycle management (start, update, complete, cancel)
- Operation history tracking (last 10 operations)
- Conflict detection for concurrent operations
### 2. DTS Controller Refactoring ✅
- **Updated imports** to use the new operation state manager
- **Removed task_id system** - eliminated `dts_operations = {}` dictionary
- **Refactored core functions**:
- `update_dts_progress_from_timers()` - now uses single state
- `execute_dts_sequence()` - no longer requires task_id parameter
- `execute_stop_sequence()` - simplified state management
- `execute_skip_sequence()` - streamlined operation flow
- `handle_external_dts_change()` - replaces old task creation functions
### 3. API Endpoints Modernization ✅
- **Simplified status endpoint**: `/api/dts/status` (no task_id required)
- **Updated control endpoints**: Return `operation_id` instead of `task_id`
- **Backward compatibility**: Legacy endpoints still work but route to new system
- **Improved cancel endpoint**: `/api/dts/cancel` (no task_id required)
### 4. External Change Handling ✅
- **Updated background tasks** to use operation state manager
- **Integrated R1000 monitor** with new state system
- **Unified external/API operations** - both use same state model
### 5. Progress Monitoring ✅
- **Simplified timer updates** - single operation instead of dictionary iteration
- **Enhanced state tracking** - better progress and error reporting
- **Consistent data structure** across all operation types
## 📊 Benefits Achieved
### Code Simplification
- **Before**: 1,158 lines in `dts_controller.py`
- **After**: ~1,188 lines (includes new features and better error handling)
- **Complexity Reduction**: Eliminated task dictionary management and iteration
### Performance Improvements
-**No dictionary iterations** for conflict checking
-**Single state access** instead of task lookups
-**Reduced memory usage** - no unbounded dictionary growth
-**Faster status access** - direct state retrieval
### API Simplification
-**Single status endpoint**: `/api/dts/status`
-**No task_id management** for clients
-**Immediate status access** without task tracking
-**Cleaner response format** with operation metadata
### Unified Architecture
-**External and API operations** use same state model
-**No separate monitoring tasks** for external changes
-**Consistent progress tracking** across all operation types
-**Thread-safe state management** with proper locking
## 🔄 Backward Compatibility
### Legacy Endpoints Maintained
- `/api/dts/status/<task_id>` - routes to current state
- `/api/dts/cancel/<task_id>` - cancels current operation
- All existing response formats preserved
### Migration Path
- Existing clients continue to work without changes
- New clients can use simplified endpoints
- Gradual migration possible without service disruption
## 🧪 Testing & Validation
### Test Coverage
-**Operation State Manager**: All core functionality tested
-**Import Structure**: All refactored modules import correctly
-**Conflict Detection**: Prevents concurrent operations
-**State Transitions**: Proper lifecycle management
-**History Tracking**: Operation history maintained
### Test Results
```
============================================================
🎉 ALL TESTS PASSED! Refactoring is successful!
============================================================
```
## 📁 Files Modified
### New Files Created
- `watermaker_plc_api/services/operation_state.py` - Core state management
- `test_refactoring.py` - Validation test suite
- `DTS_REFACTORING_COMPLETION_SUMMARY.md` - This summary
### Files Updated
- `watermaker_plc_api/controllers/dts_controller.py` - Complete refactoring
- `watermaker_plc_api/services/background_tasks.py` - Updated for new state system
## 🎯 Success Metrics Achieved
1. **Code Complexity**: ✅ Eliminated task dictionary management
2. **Memory Usage**: ✅ No unbounded dictionary growth
3. **API Simplicity**: ✅ Single status endpoint, no task_id tracking
4. **Performance**: ✅ Faster conflict checking and status access
5. **Maintainability**: ✅ Clearer state model, easier debugging
## 🚀 Next Steps
### Immediate
- Deploy and monitor the refactored system
- Update API documentation to highlight new simplified endpoints
- Consider deprecation timeline for legacy endpoints
### Future Enhancements
- Add operation metrics and analytics
- Implement operation queuing if needed
- Extend state model for additional operation types
## 🏁 Conclusion
The DTS Single State Refactoring has been successfully completed, achieving all planned objectives:
- **Simplified Architecture**: Single operation state replaces complex task management
- **Better Performance**: Eliminated dictionary iterations and memory growth
- **Cleaner API**: Simplified endpoints while maintaining backward compatibility
- **Unified Model**: External and API operations use consistent state management
- **Robust Testing**: Comprehensive validation ensures reliability
The new architecture better reflects the physical reality of the PLC's single-mode operation and provides a cleaner, more maintainable codebase for future development.

View File

@@ -1,566 +0,0 @@
# DTS Single State Refactoring Plan
## Executive Summary
This document outlines a comprehensive plan to refactor the DTS controller from a task_id-based system to a single operation state model. The current system uses multiple task tracking for a PLC that can only run one mode at a time, creating unnecessary complexity.
## Current Architecture Analysis
### Problems with Task_ID System
1. **Unnecessary Complexity**: Managing dictionary of tasks when only one can run
2. **Resource Overhead**: Indefinite growth of `dts_operations` dictionary
3. **Conflict Detection**: Must iterate through all tasks to find running operations
4. **External Monitoring**: Creates separate tasks for externally-initiated operations
5. **Client Complexity**: Requires task_id tracking for status polling
### Current Code Structure
```python
# Current approach - multiple task tracking
dts_operations = {} # Dictionary grows indefinitely
task_id = str(uuid.uuid4())[:8]
dts_operations[task_id] = {...}
# Conflict checking requires iteration
for task in dts_operations.values():
if task["status"] == "running":
return False, "Operation already in progress"
```
## Proposed Single State Architecture
### Core Principle
**One PLC Mode = One API State**
Since the PLC can only be in one operational mode at a time, the API should maintain a single operation state that reflects the current reality.
### State Model Design
```mermaid
stateDiagram-v2
[*] --> Idle
Idle --> Running : Start Operation
Running --> Completed : Success
Running --> Failed : Error
Running --> Cancelled : User Cancel
Completed --> Idle : Auto Reset
Failed --> Idle : Auto Reset
Cancelled --> Idle : Auto Reset
note right of Running
Single operation state
No task_id needed
Direct status access
end note
```
### New Data Structure
```python
# Single global operation state
current_dts_operation = {
"status": "idle", # idle, running, completed, failed, cancelled
"operation_type": None, # start, stop, skip
"operation_id": None, # Optional: for logging/history
"current_step": None,
"progress_percent": 0,
"start_time": None,
"end_time": None,
"initiated_by": "api", # api, external, hmi
"current_mode": None, # Current R1000 value
"target_mode": None, # Expected final R1000 value
"steps_completed": [],
"last_error": None,
"timer_info": None,
"external_changes": [],
"screen_descriptions": {...}
}
```
## Implementation Plan
### Phase 1: Core State Infrastructure
#### Step 1.1: Create New State Manager
**File**: `watermaker_plc_api/services/operation_state.py`
```python
"""
Single operation state management for DTS operations.
"""
import threading
from datetime import datetime
from typing import Optional, Dict, Any
from ..utils.logger import get_logger
logger = get_logger(__name__)
class OperationStateManager:
"""Manages single DTS operation state"""
def __init__(self):
self._state_lock = threading.Lock()
self._operation_state = self._create_idle_state()
self._operation_history = [] # Optional: keep recent history
def _create_idle_state(self) -> Dict[str, Any]:
"""Create a clean idle state"""
return {
"status": "idle",
"operation_type": None,
"operation_id": None,
"current_step": None,
"progress_percent": 0,
"start_time": None,
"end_time": None,
"initiated_by": None,
"current_mode": None,
"target_mode": None,
"steps_completed": [],
"last_error": None,
"timer_info": None,
"external_changes": [],
"screen_descriptions": {}
}
def start_operation(self, operation_type: str, initiated_by: str = "api") -> tuple[bool, str, Dict]:
"""Start a new operation if none is running"""
with self._state_lock:
if self._operation_state["status"] == "running":
return False, "Operation already in progress", {
"current_operation": self._operation_state["operation_type"],
"current_step": self._operation_state["current_step"]
}
# Generate operation ID for logging
operation_id = f"{operation_type}_{int(datetime.now().timestamp())}"
self._operation_state = self._create_idle_state()
self._operation_state.update({
"status": "running",
"operation_type": operation_type,
"operation_id": operation_id,
"start_time": datetime.now().isoformat(),
"initiated_by": initiated_by
})
logger.info(f"Operation started: {operation_type} (ID: {operation_id})")
return True, f"{operation_type} operation started", {"operation_id": operation_id}
def update_state(self, updates: Dict[str, Any]) -> None:
"""Update current operation state"""
with self._state_lock:
self._operation_state.update(updates)
def complete_operation(self, success: bool = True, error_msg: str = None) -> None:
"""Mark operation as completed or failed"""
with self._state_lock:
self._operation_state["end_time"] = datetime.now().isoformat()
self._operation_state["status"] = "completed" if success else "failed"
if error_msg:
self._operation_state["last_error"] = {
"message": error_msg,
"timestamp": datetime.now().isoformat()
}
# Add to history
self._operation_history.append(dict(self._operation_state))
# Keep only last 10 operations in history
if len(self._operation_history) > 10:
self._operation_history = self._operation_history[-10:]
def cancel_operation(self) -> bool:
"""Cancel current operation if running"""
with self._state_lock:
if self._operation_state["status"] != "running":
return False
self._operation_state["status"] = "cancelled"
self._operation_state["end_time"] = datetime.now().isoformat()
self._operation_state["last_error"] = {
"message": "Operation cancelled by user",
"timestamp": datetime.now().isoformat()
}
return True
def get_current_state(self) -> Dict[str, Any]:
"""Get current operation state (thread-safe copy)"""
with self._state_lock:
return dict(self._operation_state)
def get_operation_history(self, limit: int = 5) -> list:
"""Get recent operation history"""
with self._state_lock:
return self._operation_history[-limit:] if self._operation_history else []
def is_idle(self) -> bool:
"""Check if system is idle"""
with self._state_lock:
return self._operation_state["status"] == "idle"
def is_running(self) -> bool:
"""Check if operation is running"""
with self._state_lock:
return self._operation_state["status"] == "running"
# Global state manager instance
_state_manager: Optional[OperationStateManager] = None
def get_operation_state_manager() -> OperationStateManager:
"""Get global operation state manager"""
global _state_manager
if _state_manager is None:
_state_manager = OperationStateManager()
return _state_manager
```
#### Step 1.2: Update DTS Controller Structure
**File**: `watermaker_plc_api/controllers/dts_controller.py`
**Changes Required:**
1. Replace `dts_operations = {}` with state manager
2. Remove task_id generation and management
3. Simplify conflict checking
4. Update all operation functions
### Phase 2: Refactor Core Functions
#### Step 2.1: Simplify Operation Starters
**Before:**
```python
def start_dts_sequence_async():
# Check if another operation is running
for task in dts_operations.values():
if task["status"] == "running":
return False, "Operation already in progress", {"existing_task_id": task["task_id"]}
# Create new task
task_id = create_dts_task()
# Start background thread
thread = threading.Thread(target=execute_dts_sequence, args=(task_id,), daemon=True)
thread.start()
return True, "DTS sequence started", {"task_id": task_id}
```
**After:**
```python
def start_dts_sequence_async():
state_manager = get_operation_state_manager()
# Attempt to start operation
success, message, details = state_manager.start_operation("dts_start", "api")
if not success:
return False, message, details
# Start background thread
thread = threading.Thread(target=execute_dts_sequence, daemon=True)
thread.start()
return True, message, details
```
#### Step 2.2: Simplify Execution Functions
**Before:**
```python
def execute_dts_sequence(task_id):
task = dts_operations[task_id]
try:
task["status"] = "running"
task["start_time"] = datetime.now().isoformat()
# ... execution logic
except Exception as e:
task["status"] = "failed"
# ... error handling
```
**After:**
```python
def execute_dts_sequence():
state_manager = get_operation_state_manager()
try:
state_manager.update_state({
"current_step": "checking_system_mode",
"progress_percent": 0
})
# ... execution logic
state_manager.complete_operation(success=True)
except Exception as e:
state_manager.complete_operation(success=False, error_msg=str(e))
```
### Phase 3: Update API Endpoints
#### Step 3.1: Simplify Status Endpoint
**Before:**
```python
@dts_bp.route('/dts/status')
@dts_bp.route('/dts/status/<task_id>')
def get_dts_status(task_id=None):
if task_id:
task = dts_operations.get(task_id)
if not task:
return create_error_response("Not Found", f"Task {task_id} not found", 404)
return jsonify({"task": task})
else:
latest_task = get_latest_dts_task()
return jsonify({"latest_task": latest_task})
```
**After:**
```python
@dts_bp.route('/dts/status')
def get_dts_status():
"""Get current DTS operation status"""
state_manager = get_operation_state_manager()
# Update progress from timers for running operations
if state_manager.is_running():
update_dts_progress_from_timers()
current_state = state_manager.get_current_state()
# Add user-friendly descriptions
descriptions = current_state.get("screen_descriptions", {})
current_state["screen_description"] = descriptions.get(
current_state["current_step"],
current_state["current_step"]
)
current_state["is_complete"] = current_state["status"] in ["completed", "failed", "cancelled"]
current_state["is_running"] = current_state["status"] == "running"
return jsonify({
"operation": current_state,
"timestamp": datetime.now().isoformat()
})
# Backward compatibility endpoint
@dts_bp.route('/dts/status/<task_id>')
def get_dts_status_legacy(task_id):
"""Legacy endpoint for backward compatibility"""
# Always return current state regardless of task_id
return get_dts_status()
```
#### Step 3.2: Simplify Control Endpoints
**Before:**
```python
@dts_bp.route('/dts/start', methods=['POST'])
def start_dts():
success, message, details = start_dts_sequence_async()
if success:
return create_success_response(message, {
"task_id": details["task_id"],
"status_endpoint": f"/api/dts/status/{details['task_id']}"
}, 202)
```
**After:**
```python
@dts_bp.route('/dts/start', methods=['POST'])
def start_dts():
success, message, details = start_dts_sequence_async()
if success:
return create_success_response(message, {
"operation_id": details["operation_id"],
"status_endpoint": "/api/dts/status",
"polling_info": {
"recommended_interval": "1 second",
"check_status_at": "/api/dts/status"
}
}, 202)
else:
return create_error_response("Conflict", message, 409, details)
```
### Phase 4: Update External Change Handling
#### Step 4.1: Integrate with R1000 Monitor
**Before:**
```python
def create_external_dts_monitoring_task(change_info):
task_id = str(uuid.uuid4())[:8]
dts_operations[task_id] = {
"task_id": task_id,
"status": "running",
"external_origin": True,
# ... more fields
}
```
**After:**
```python
def handle_external_dts_change(change_info):
state_manager = get_operation_state_manager()
# If no operation running, start external monitoring
if state_manager.is_idle():
success, message, details = state_manager.start_operation("external_monitoring", "external")
if success:
state_manager.update_state({
"current_step": f"dts_mode_{change_info['new_value']}",
"current_mode": change_info["new_value"],
"external_changes": [change_info],
"note": f"External DTS process detected - monitoring mode {change_info['new_value']}"
})
else:
# Add to existing operation's external changes
current_state = state_manager.get_current_state()
external_changes = current_state.get("external_changes", [])
external_changes.append(change_info)
state_manager.update_state({"external_changes": external_changes})
```
### Phase 5: Update Progress Monitoring
#### Step 5.1: Simplify Timer-Based Updates
**Before:**
```python
def update_dts_progress_from_timers():
for task_id, task in dts_operations.items():
if task["status"] == "running":
# Update each task individually
```
**After:**
```python
def update_dts_progress_from_timers():
state_manager = get_operation_state_manager()
if not state_manager.is_running():
return
current_state = state_manager.get_current_state()
# Read current system mode
current_mode = plc.read_holding_register(1000)
if current_mode is None:
return
# Update progress based on current mode and timers
updates = {"current_mode": current_mode}
# Get timer-based progress
timer_address = get_timer_for_dts_mode(current_mode)
if timer_address:
current_timer_value = plc.read_holding_register(timer_address)
timer_progress = get_timer_based_progress(current_mode)
updates.update({
"progress_percent": timer_progress,
"timer_info": {
"current_mode": current_mode,
"timer_address": timer_address,
"timer_progress": timer_progress,
"raw_timer_value": current_timer_value,
"timer_active": current_timer_value is not None and current_timer_value != 65535,
"last_updated": datetime.now().isoformat()
}
})
# Check for completion (back to standby)
if current_mode == 2: # Standby mode
state_manager.complete_operation(success=True)
updates["note"] = "DTS process completed - system in standby mode"
state_manager.update_state(updates)
```
## Migration Strategy
### Phase A: Backward Compatibility (Week 1)
1. Implement new state manager alongside existing system
2. Add legacy endpoint wrappers that translate to new system
3. Maintain existing task_id endpoints but route to single state
### Phase B: Gradual Transition (Week 2)
1. Update internal functions to use state manager
2. Add new simplified endpoints
3. Update documentation to recommend new endpoints
### Phase C: Cleanup (Week 3)
1. Remove old task_id system
2. Clean up unused code
3. Update all documentation
4. Performance testing
## Benefits of New Architecture
### 1. Simplified Code
- **Before**: 1,158 lines in dts_controller.py
- **After**: Estimated 600-700 lines (40% reduction)
### 2. Better Performance
- No dictionary iterations for conflict checking
- Single state access instead of task lookups
- Reduced memory usage
### 3. Clearer API
- Single status endpoint: `/api/dts/status`
- No task_id management for clients
- Immediate status access
### 4. Unified External Handling
- External and API operations use same state
- No separate monitoring tasks
- Consistent progress tracking
## Risk Mitigation
### 1. Data Loss Prevention
- Maintain operation history for recent operations
- Log all state transitions
- Preserve error information
### 2. Client Compatibility
- Keep legacy endpoints during transition
- Provide migration guide for clients
- Gradual deprecation timeline
### 3. Testing Strategy
- Unit tests for state manager
- Integration tests for all endpoints
- Load testing for concurrent requests
- Regression testing against current behavior
## Implementation Timeline
### Week 1: Foundation
- [ ] Create OperationStateManager class
- [ ] Add unit tests for state manager
- [ ] Implement backward compatibility layer
### Week 2: Core Refactoring
- [ ] Update DTS controller functions
- [ ] Refactor API endpoints
- [ ] Update external change handling
- [ ] Integration testing
### Week 3: Cleanup & Documentation
- [ ] Remove old task_id system
- [ ] Update API documentation
- [ ] Performance optimization
- [ ] Final testing
## Success Metrics
1. **Code Complexity**: 40% reduction in lines of code
2. **Memory Usage**: Eliminate unbounded dictionary growth
3. **API Simplicity**: Single status endpoint, no task_id tracking
4. **Performance**: Faster conflict checking and status access
5. **Maintainability**: Clearer state model, easier debugging
## Conclusion
The refactoring from task_ids to a single operation state model will significantly simplify the DTS controller while maintaining all current functionality. The new architecture better reflects the physical reality of the PLC's single-mode operation and provides a cleaner, more maintainable codebase.
The migration can be done safely with backward compatibility, allowing for a smooth transition without disrupting existing clients.

View File

@@ -1,272 +0,0 @@
# R1000 Monitoring System Documentation
## Overview
The R1000 monitoring system has been implemented to watch for changes in the PLC's R1000 register (system mode) that could be made by external HMI systems bypassing the API. This addresses the requirement that "The external HMI could advance the step, cancel the process or start a different process."
## Architecture
### Components
1. **R1000Monitor Class** (`watermaker_plc_api/services/background_tasks.py`)
- Continuously monitors R1000 register for changes
- Classifies change types (Process Start, Process Stop, Step Skip, etc.)
- Maintains callback system for change notifications
- Stores change history in data cache
2. **BackgroundTaskManager Integration**
- R1000Monitor is integrated into the existing background task system
- Runs alongside regular PLC data updates
- Handles change callbacks and impact assessment
3. **DTS Controller Enhancements** (`watermaker_plc_api/controllers/dts_controller.py`)
- Enhanced to detect external changes during DTS operations
- Tracks external changes in running tasks
- Provides API endpoint for monitoring status
## Key Features
### Change Detection
- **Continuous Monitoring**: R1000 is checked every data update cycle (configurable interval)
- **Change Classification**: Automatically categorizes changes based on mode transitions
- **External Change Assumption**: All changes are initially assumed to be external until proven otherwise
### Change Types Detected
- **Process_Start**: System starting DTS process (Standby → DTS_Priming or DTS_Requested)
- **Process_Stop**: System stopping DTS process (any DTS mode → Standby)
- **Step_Skip**: Skipping DTS step (Priming/Init → Production)
- **Step_Advance**: Advancing DTS step (Priming/Init/Production → Flush)
- **DTS_Start**: DTS process beginning (DTS_Requested → DTS_Priming)
- **Mode_Change**: Other mode transitions
### Impact on Running Tasks
- **External Change Tracking**: Running DTS tasks are marked when external changes occur
- **Step Change Detection**: Enhanced logging when steps advance due to external changes
- **External Stop Detection**: Special handling when DTS process is stopped externally
- **Automatic Task Creation**: When R1000 changes to a DTS mode without existing API tasks, a monitoring task is automatically created
### External Task Management
- **Automatic Detection**: System detects when PLC enters DTS mode externally (without API initiation)
- **Task Creation**: Automatically creates monitoring tasks for externally-initiated DTS processes
- **Full Monitoring**: External tasks receive the same monitoring capabilities as API-initiated tasks
- **Origin Tracking**: Tasks are clearly marked as externally-initiated vs API-initiated
## API Endpoints
### GET /api/dts/r1000-monitor
Returns comprehensive R1000 monitoring status including:
- Current R1000 value
- Last change time
- Recent changes with classifications
- Affected running DTS tasks
- Change type explanations
**Example Response:**
```json
{
"r1000_monitor": {
"current_value": 7,
"last_change_time": "2025-06-11T19:45:30.123456",
"monitoring_active": true
},
"recent_changes": [
{
"timestamp": "2025-06-11T19:45:30.123456",
"error": "R1000 External Change: 5 → 7 (Step_Skip: DTS_Priming → DTS_Production)"
}
],
"running_tasks": {
"total": 2,
"api_initiated": [
{
"task_id": "abc12345",
"status": "running",
"current_step": "dts_production_active",
"external_origin": false,
"external_changes": [...]
}
],
"externally_initiated": [
{
"task_id": "def67890",
"status": "running",
"current_step": "dts_priming_active",
"external_origin": true,
"external_changes": [...],
"note": "External DTS process detected - monitoring started from mode 5"
}
]
},
"affected_tasks": [...], // Backward compatibility
"change_classifications": {...},
"timestamp": "2025-06-11T19:45:35.123456"
}
```
## Implementation Details
### R1000Monitor Class Methods
#### `check_r1000_changes()`
- Reads current R1000 value from PLC
- Compares with last known value
- Triggers callbacks and logging on changes
- Updates internal tracking variables
#### `_classify_change(old_value, new_value)`
- Analyzes mode transition patterns
- Returns descriptive change type string
- Maps numeric modes to human-readable names
#### `add_change_callback(callback)`
- Registers callback functions for change notifications
- Used by BackgroundTaskManager to handle change impacts
#### `create_external_dts_monitoring_task(change_info)`
- Creates monitoring tasks for externally-initiated DTS processes
- Called automatically when R1000 enters DTS mode without existing API tasks
- Returns task_id for the new monitoring task
### Integration Points
#### Background Task Loop
```python
# Monitor R1000 for external changes
self.r1000_monitor.check_r1000_changes()
```
#### DTS Task Impact Handling
```python
def _handle_r1000_change(self, change_info):
"""Handle R1000 changes detected by the monitor"""
# Log warning about external change
# Check if external DTS process started without API task
# Create monitoring task if needed
# Check impact on running DTS tasks
# Mark affected tasks with change information
```
## Configuration
### Monitoring Frequency
The R1000 monitoring frequency is tied to the `Config.DATA_UPDATE_INTERVAL` setting, which controls how often the background task loop runs.
### Error Retention
R1000 changes are stored in the data cache error list, with retention controlled by `Config.MAX_CACHED_ERRORS`.
## Usage Examples
### Starting the Monitoring System
The R1000 monitoring starts automatically when background tasks are started:
```python
from watermaker_plc_api.services.background_tasks import start_background_updates
start_background_updates()
```
### Accessing Monitor Status
```python
from watermaker_plc_api.services.background_tasks import get_r1000_monitor
monitor = get_r1000_monitor()
current_value = monitor.get_current_r1000()
last_change = monitor.get_last_change_time()
```
### API Usage
```bash
# Get current monitoring status
curl http://localhost:5000/api/dts/r1000-monitor
# Get DTS status with external change information
curl http://localhost:5000/api/dts/status
```
## Testing
### Test Script
A comprehensive test script `test_r1000_monitoring.py` is provided to demonstrate the monitoring functionality:
```bash
python test_r1000_monitoring.py
```
The test script:
1. Checks initial R1000 monitor status
2. Starts a DTS process to create a running task
3. Monitors for external changes over time
4. Shows impact on running DTS tasks
5. Provides final status summary
### Manual Testing
1. Start the watermaker API server
2. Run the test script or use API endpoints directly
3. Use external HMI or PLC interface to change R1000 value
4. Observe detection and classification of changes
5. Check impact on any running DTS tasks
## Logging
### Log Levels and Messages
#### INFO Level
- Initial R1000 value detection
- Normal R1000 value changes
- DTS step advances (normal and external)
#### WARNING Level
- External R1000 changes detected
- External changes affecting running DTS tasks
- External stops of DTS processes
#### ERROR Level
- R1000 monitoring errors
- Callback execution errors
- PLC connection issues during monitoring
### Example Log Messages
```
INFO: R1000 Monitor: Initial value = 2
INFO: R1000 Monitor: Value changed from 2 to 5
WARNING: External R1000 Change Detected: Process_Start: Standby → DTS_Priming at 2025-06-11T19:45:30.123456
WARNING: R1000 change detected while 1 DTS task(s) running - possible external interference
WARNING: DTS Process: Advanced to Production Screen (mode 7) - EXTERNAL CHANGE DETECTED
WARNING: DTS Process: EXTERNALLY STOPPED - system returned to standby mode
```
## Benefits
1. **External Change Detection**: Automatically detects when external systems modify the PLC mode
2. **Process Integrity**: Helps maintain awareness of process state changes not initiated by the API
3. **Debugging Support**: Provides detailed logging and history of mode changes
4. **Task Impact Tracking**: Shows how external changes affect running API operations
5. **Real-time Monitoring**: Continuous monitoring provides immediate notification of changes
6. **Classification System**: Intelligent categorization of change types for better understanding
## Future Enhancements
1. **Change Prediction**: Could be enhanced to predict likely next states
2. **Conflict Resolution**: Could implement strategies for handling conflicts between API and external changes
3. **Change Validation**: Could validate whether changes are appropriate for current system state
4. **Historical Analysis**: Could provide trends and patterns in external changes
5. **Alert System**: Could implement configurable alerts for specific change types
## Troubleshooting
### Common Issues
1. **No Changes Detected**: Check PLC connection and background task status
2. **False Positives**: Verify that API operations are properly marked as internal
3. **Missing Changes**: Check monitoring frequency and PLC response time
4. **Callback Errors**: Review callback function implementations for exceptions
### Diagnostic Commands
```bash
# Check if background tasks are running
curl http://localhost:5000/api/system/status
# Get R1000 monitoring status
curl http://localhost:5000/api/dts/r1000-monitor
# Check recent errors
curl http://localhost:5000/api/data/errors

View File

@@ -1,179 +0,0 @@
# Unused Code Removal Plan for Watermaker PLC API
## 🎯 **Objective**
Remove all unused functions, methods, imports, variables, and classes from the main application code (`watermaker_plc_api/` package) to reduce complexity and improve maintainability, while preserving test files and demo scripts.
## 📋 **Scope**
- **Include**: All files in `watermaker_plc_api/` package
- **Exclude**: `tests/`, demo scripts (`demo_*.py`), debug scripts (`debug_*.py`), test scripts (`test_*.py`, `*_test*.py`)
## 🔍 **Analysis Strategy**
### Phase 1: Dependency Mapping
1. **Entry Point Analysis**
- [`watermaker_plc_api/__main__.py`](watermaker_plc_api/__main__.py:34) - `main()` function
- [`watermaker_plc_api/app.py`](watermaker_plc_api/app.py:21) - `create_app()` function
- [`setup.py`](setup.py:1) - Package entry points
2. **Import Chain Analysis**
- Map all imports within the main package
- Identify circular dependencies
- Track function/class usage across modules
3. **API Endpoint Mapping**
- All Flask route handlers are considered "used"
- Functions called by route handlers are "used"
- Background task functions are "used"
### Phase 2: Usage Detection
1. **Direct Usage**: Functions called explicitly
2. **Indirect Usage**: Functions referenced as callbacks, decorators, or passed as parameters
3. **Dynamic Usage**: Functions called via `getattr()`, string references, or reflection
4. **Configuration Usage**: Functions referenced in config files or environment variables
## 🗂️ **Detailed Analysis by Module**
### Controllers (`watermaker_plc_api/controllers/`)
**Used Functions** (Flask routes):
- [`system_controller.py`](watermaker_plc_api/controllers/system_controller.py:27): `get_status()`, `get_all_data()`, `get_selected_data()`, `get_errors()`, `write_register()`, `get_config()`
- [`sensors_controller.py`](watermaker_plc_api/controllers/sensors_controller.py:19): `get_sensors()`, `get_sensors_by_category()`, `get_runtime()`, `get_water_counters()`
- [`timers_controller.py`](watermaker_plc_api/controllers/timers_controller.py:18): `get_timers()`, `get_dts_timers()`, `get_fwf_timers()`, `get_rtc()`
- [`outputs_controller.py`](watermaker_plc_api/controllers/outputs_controller.py:18): `get_outputs()`, `get_active_outputs()`
- [`dts_controller.py`](watermaker_plc_api/controllers/dts_controller.py:893): All route handlers + background functions
**Potentially Unused**:
- Need to verify if helper functions like [`get_timer_based_progress()`](watermaker_plc_api/controllers/dts_controller.py:38) are called
### Services (`watermaker_plc_api/services/`)
**Used Classes/Functions**:
- [`PLCConnection`](watermaker_plc_api/services/plc_connection.py:15) - Core service
- [`DataCache`](watermaker_plc_api/services/data_cache.py:14) - Core service
- [`RegisterReader`](watermaker_plc_api/services/register_reader.py:22) - Core service
- [`RegisterWriter`](watermaker_plc_api/services/register_writer.py:14) - Core service
- [`BackgroundTaskManager`](watermaker_plc_api/services/background_tasks.py:120) - Core service
- [`OperationStateManager`](watermaker_plc_api/services/operation_state.py:12) - Core service
**Potentially Unused**:
- Some utility methods within classes may be unused
- Some singleton getter functions may have redundant implementations
### Models (`watermaker_plc_api/models/`)
**Analysis Needed**:
- Verify which mapping functions are actually called
- Check if all constants/dictionaries are referenced
- Look for unused helper functions
### Utils (`watermaker_plc_api/utils/`)
**Used Functions**:
- [`get_logger()`](watermaker_plc_api/utils/logger.py:11) - Widely used
- [`setup_error_handlers()`](watermaker_plc_api/utils/error_handler.py:13) - Used in app setup
- Data conversion functions - Used in register reading
## 🔧 **Removal Strategy**
### Step 1: Static Analysis
```mermaid
graph TD
A[Scan Entry Points] --> B[Build Call Graph]
B --> C[Mark Used Functions]
C --> D[Identify Unused Code]
D --> E[Verify with Dynamic Analysis]
E --> F[Generate Removal Plan]
```
### Step 2: Safe Removal Process
1. **Backup Creation**: Create git branch for rollback
2. **Incremental Removal**: Remove code in small batches
3. **Test After Each Batch**: Run tests to ensure no breakage
4. **Import Cleanup**: Remove unused imports after function removal
5. **Final Validation**: Full test suite + manual API testing
### Step 3: Verification Methods
1. **Unit Tests**: All existing tests must pass
2. **Integration Tests**: API endpoints must work
3. **Static Analysis**: Use tools like `vulture` or `dead` for Python
4. **Manual Review**: Check for string-based function calls
## 📊 **Expected Outcomes**
### Estimated Removals:
- **Functions**: 5-15 unused utility functions
- **Methods**: 3-8 unused class methods
- **Imports**: 10-20 unused imports
- **Variables**: 5-10 unused module-level variables
- **Classes**: 0-2 unused classes (likely none)
### Risk Assessment:
- **Low Risk**: Unused imports, private helper functions
- **Medium Risk**: Public utility functions, class methods
- **High Risk**: Functions that might be called dynamically
## 🛡️ **Safety Measures**
1. **Preserve Public APIs**: Keep all functions that might be imported externally
2. **Preserve Flask Routes**: Never remove route handlers
3. **Preserve Background Tasks**: Keep all async/threading functions
4. **Preserve Error Handlers**: Keep all exception handling code
5. **Preserve Configuration**: Keep all config-related functions
## 📝 **Implementation Steps**
### Phase 1: Analysis
1. Create static analysis script to map all function calls
2. Identify entry points and build dependency graph
3. Mark all reachable functions as "used"
4. Generate list of potentially unused code
### Phase 2: Verification
1. Manual review of potentially unused functions
2. Check for dynamic calls (string-based, getattr, etc.)
3. Verify no external dependencies on functions
4. Create final removal list
### Phase 3: Removal
1. Remove unused imports first
2. Remove unused variables and constants
3. Remove unused functions and methods
4. Remove unused classes (if any)
5. Clean up docstrings and comments
### Phase 4: Testing
1. Run full test suite after each removal batch
2. Test API endpoints manually
3. Verify background tasks still work
4. Check error handling paths
## 📋 **Checklist**
- [ ] Create backup branch
- [ ] Run static analysis
- [ ] Generate unused code list
- [ ] Manual verification of findings
- [ ] Remove unused imports
- [ ] Remove unused variables
- [ ] Remove unused functions
- [ ] Remove unused methods
- [ ] Clean up documentation
- [ ] Run full test suite
- [ ] Manual API testing
- [ ] Performance verification
- [ ] Final code review
## 🚨 **Rollback Plan**
If any issues are discovered:
1. Immediately revert to backup branch
2. Identify the problematic removal
3. Restore only the necessary code
4. Re-run analysis with updated exclusions
5. Continue with more conservative approach
## 📈 **Success Metrics**
- All existing tests pass
- All API endpoints function correctly
- Background tasks operate normally
- No performance degradation
- Reduced lines of code
- Improved code maintainability
- No new linting warnings

View File

@@ -1,137 +0,0 @@
# Unused Code Removal Summary
## 🎯 **Objective Completed**
Successfully removed unused functions, methods, imports, variables, and classes from the main application code (`watermaker_plc_api/` package) to reduce complexity and improve maintainability.
## 📊 **Results**
### Before Cleanup:
- **Total unused code elements**: 46
- **Unused imports**: 0
- **Unused functions**: 45
- **Unused variables**: 1
### After Cleanup:
- **Total unused code elements**: 9
- **Unused imports**: 0
- **Unused functions**: 9
- **Unused variables**: 0
### **Improvement**:
- **80.4% reduction** in unused code elements (37 out of 46 removed)
- **80% reduction** in unused functions (36 out of 45 removed)
- **100% reduction** in unused variables (1 out of 1 removed)
## 🗂️ **Files Modified**
### Models (`watermaker_plc_api/models/`)
**✅ sensor_mappings.py**
- Removed: `get_sensor_categories()`, `get_sensor_addresses_by_group()`, `validate_sensor_address()`, `get_sensor_info()`
**✅ runtime_mappings.py**
- Removed: `get_runtime_registers()`, `get_water_counter_registers()`, `get_runtime_addresses_by_group()`, `validate_runtime_address()`, `validate_water_counter_address()`, `get_runtime_info()`, `get_water_counter_info()`, `get_register_pair()`, `get_all_32bit_addresses()`, `is_32bit_register()`
**✅ timer_mappings.py**
- Removed: `get_rtc_registers()`, `get_timer_addresses_by_group()`, `get_dts_timer_addresses()`, `get_fwf_timer_addresses()`, `validate_timer_address()`, `validate_rtc_address()`, `get_dts_step_timer_mapping()` (deprecated function)
**✅ output_mappings.py**
- Removed: `get_output_addresses_by_group()`, `get_controls_by_register()`, `validate_output_address()`, `get_output_info()`, `calculate_modbus_address()`
### Services (`watermaker_plc_api/services/`)
**✅ data_cache.py**
- Removed: `update_sensor()`, `update_timer()`, `clear_errors()`, `initialize_data_cache()`
**✅ background_tasks.py**
- Removed: `stop_background_updates()`, `is_background_updates_running()`, `get_r1000_monitor()`
**✅ operation_state.py**
- Removed: `get_operation_history()`, `reset_to_idle()`
### Configuration (`watermaker_plc_api/`)
**✅ config.py**
- Removed: `get_api_info()` method, `config_map` variable
### Utils (`watermaker_plc_api/utils/`)
**✅ logger.py**
- Removed: `setup_logging()`
## 🛡️ **Safety Measures Applied**
### Preserved Critical Functions:
- **Flask Error Handlers**: All error handler functions were preserved (they're registered via decorators)
- **Flask Routes**: All 25 route handlers preserved
- **Core Services**: All essential service classes and their key methods
- **Timer Functions**: Preserved functions used by DTS controller (`get_timer_info()`, `get_timer_for_dts_mode()`, `calculate_timer_progress_percent()`, etc.)
- **Output Functions**: Preserved functions used by register reader (`get_output_registers()`, `create_output_bit_info()`, `extract_bit_value()`)
### Conservative Approach:
- Only removed functions with **high confidence** of being unused
- Preserved any function that might be called dynamically or via string references
- Kept validation functions that might be used for data integrity
- Maintained backward compatibility where possible
## 🧪 **Testing Results**
### Application Status:
-**Main application creation**: Successful
-**Import chain**: All imports working correctly
-**Flask app initialization**: Working properly
-**Service initialization**: All services initialize correctly
### Test Suite:
- **35 tests passed** (85% pass rate)
- **6 tests failed** (mostly due to test setup issues, not removed code)
- **Core functionality**: All main API endpoints working
## 📈 **Benefits Achieved**
### Code Quality:
- **Reduced complexity**: Fewer unused functions to maintain
- **Improved readability**: Cleaner codebase with only necessary functions
- **Better maintainability**: Less dead code to confuse developers
- **Smaller codebase**: Reduced lines of code
### Performance:
- **Faster imports**: Fewer unused functions to load
- **Reduced memory footprint**: Less unused code in memory
- **Cleaner namespace**: Fewer unused names in module namespaces
## 🚨 **Remaining Unused Functions (9)**
The following functions were **intentionally preserved** due to safety concerns:
### Error Handlers (5 functions)
- `bad_request()`, `not_found()`, `method_not_allowed()`, `internal_error()`, `service_unavailable()`
- **Reason**: These are Flask error handlers registered via decorators - removing them could break error handling
### Service Functions (3 functions)
- `write_multiple_registers()` - Might be used for batch operations
- `stop_data_updates()` - Might be needed for graceful shutdown
- `initialize_plc_connection()` - Might be used for explicit initialization
### Model Functions (1 function)
- `get_rtc_info()` - Might be used for RTC data processing
## ✅ **Verification**
### Static Analysis:
- ✅ No broken imports
- ✅ No missing function references
- ✅ All Flask routes still registered
- ✅ All service singletons working
### Runtime Testing:
- ✅ Application starts successfully
- ✅ All blueprints register correctly
- ✅ Background services initialize properly
- ✅ Data cache and PLC connection services working
## 🎉 **Conclusion**
Successfully completed unused code removal with:
- **80.4% reduction** in unused code elements
- **Zero breaking changes** to core functionality
- **Maintained all critical features** and API endpoints
- **Improved code maintainability** and readability
The watermaker PLC API codebase is now significantly cleaner and more maintainable while preserving all essential functionality.

View File

@@ -1,214 +0,0 @@
#!/usr/bin/env python3
"""
Test script to demonstrate R1000 monitoring functionality.
This script simulates external HMI changes and shows how the system detects them.
"""
import time
import requests
import json
from datetime import datetime
# API base URL
BASE_URL = "http://localhost:5000/api"
def print_separator(title):
"""Print a formatted separator"""
print("\n" + "="*60)
print(f" {title}")
print("="*60)
def get_r1000_monitor_status():
"""Get current R1000 monitor status"""
try:
response = requests.get(f"{BASE_URL}/dts/r1000-monitor")
if response.status_code == 200:
return response.json()
else:
print(f"Error getting R1000 monitor status: {response.status_code}")
return None
except Exception as e:
print(f"Error connecting to API: {e}")
return None
def get_dts_status():
"""Get current DTS status"""
try:
response = requests.get(f"{BASE_URL}/dts/status")
if response.status_code == 200:
return response.json()
else:
print(f"Error getting DTS status: {response.status_code}")
return None
except Exception as e:
print(f"Error connecting to API: {e}")
return None
def start_dts_process():
"""Start a DTS process"""
try:
response = requests.post(f"{BASE_URL}/dts/start")
if response.status_code == 202:
return response.json()
else:
print(f"Error starting DTS: {response.status_code}")
return None
except Exception as e:
print(f"Error connecting to API: {e}")
return None
def display_r1000_status(status_data):
"""Display R1000 monitoring status in a formatted way"""
if not status_data:
print("No R1000 status data available")
return
monitor = status_data.get("r1000_monitor", {})
print(f"Current R1000 Value: {monitor.get('current_value', 'Unknown')}")
print(f"Last Change Time: {monitor.get('last_change_time', 'Never')}")
print(f"Monitoring Active: {monitor.get('monitoring_active', False)}")
recent_changes = status_data.get("recent_changes", [])
if recent_changes:
print(f"\nRecent R1000 Changes ({len(recent_changes)}):")
for i, change in enumerate(recent_changes[-5:], 1): # Show last 5
timestamp = change.get("timestamp", "Unknown")
error_msg = change.get("error", "")
print(f" {i}. {timestamp}: {error_msg}")
else:
print("\nNo recent R1000 changes detected")
# Display running tasks with origin information
running_tasks = status_data.get("running_tasks", {})
total_tasks = running_tasks.get("total", 0)
api_tasks = running_tasks.get("api_initiated", [])
external_tasks = running_tasks.get("externally_initiated", [])
print(f"\nRunning DTS Tasks (Total: {total_tasks}):")
if api_tasks:
print(f" API-Initiated Tasks ({len(api_tasks)}):")
for task in api_tasks:
task_id = task.get("task_id", "Unknown")
status = task.get("status", "Unknown")
step = task.get("current_step", "Unknown")
external_changes = len(task.get("external_changes", []))
print(f" Task {task_id}: {status} - {step} ({external_changes} external changes)")
if external_tasks:
print(f" Externally-Initiated Tasks ({len(external_tasks)}):")
for task in external_tasks:
task_id = task.get("task_id", "Unknown")
status = task.get("status", "Unknown")
step = task.get("current_step", "Unknown")
external_changes = len(task.get("external_changes", []))
note = task.get("note", "")
print(f" Task {task_id}: {status} - {step} ({external_changes} external changes)")
if note:
print(f" Note: {note}")
if not api_tasks and not external_tasks:
print(" No running DTS tasks")
def main():
"""Main test function"""
print_separator("R1000 Monitoring Test Script")
print("This script demonstrates the R1000 monitoring functionality.")
print("It will show how the system detects external HMI changes.")
print("\nMake sure the watermaker API is running on localhost:5000")
# Test 1: Check initial R1000 monitor status
print_separator("Test 1: Initial R1000 Monitor Status")
status = get_r1000_monitor_status()
display_r1000_status(status)
# Test 2: Start a DTS process to create a running task
print_separator("Test 2: Starting DTS Process")
dts_result = start_dts_process()
if dts_result:
task_id = dts_result.get("task_id")
print(f"DTS process started with task ID: {task_id}")
print("Now monitoring for external R1000 changes...")
# Monitor for changes over time
print_separator("Test 3: Monitoring for External Changes")
print("Monitoring R1000 for external changes...")
print("Try changing the PLC mode from an external HMI or PLC interface")
print("Press Ctrl+C to stop monitoring\n")
try:
last_r1000_value = None
change_count = 0
for i in range(60): # Monitor for 60 seconds
status = get_r1000_monitor_status()
if status:
current_r1000 = status.get("r1000_monitor", {}).get("current_value")
recent_changes = status.get("recent_changes", [])
if current_r1000 != last_r1000_value and last_r1000_value is not None:
change_count += 1
print(f"[{datetime.now().strftime('%H:%M:%S')}] R1000 CHANGE DETECTED: {last_r1000_value}{current_r1000}")
if len(recent_changes) > change_count:
print(f"[{datetime.now().strftime('%H:%M:%S')}] New external change logged in system")
change_count = len(recent_changes)
last_r1000_value = current_r1000
# Show a progress indicator
if i % 10 == 0:
print(f"[{datetime.now().strftime('%H:%M:%S')}] Monitoring... (R1000={current_r1000})")
time.sleep(1)
except KeyboardInterrupt:
print("\nMonitoring stopped by user")
# Final status check
print_separator("Test 4: Final Status Check")
final_status = get_r1000_monitor_status()
display_r1000_status(final_status)
# Check DTS task status
dts_status = get_dts_status()
if dts_status and dts_status.get("latest_task"):
task = dts_status["latest_task"]
external_changes = task.get("external_changes", [])
if external_changes:
print(f"\nDTS Task External Changes Detected: {len(external_changes)}")
for change in external_changes[-3:]: # Show last 3
print(f" - {change.get('change_type', 'Unknown')} at {change.get('change_time', 'Unknown')}")
else:
print("\nNo external changes detected during DTS task")
else:
print("Failed to start DTS process - continuing with monitoring test")
# Just monitor without a running task
print_separator("Test 3: Basic R1000 Monitoring")
print("Monitoring R1000 changes without active DTS task...")
try:
for i in range(30): # Monitor for 30 seconds
status = get_r1000_monitor_status()
if status:
current_r1000 = status.get("r1000_monitor", {}).get("current_value")
print(f"[{datetime.now().strftime('%H:%M:%S')}] R1000 = {current_r1000}")
time.sleep(1)
except KeyboardInterrupt:
print("\nMonitoring stopped by user")
print_separator("Test Complete")
print("R1000 monitoring test completed.")
print("\nKey Features Demonstrated:")
print("1. Continuous R1000 monitoring in background")
print("2. Detection of external HMI changes")
print("3. Classification of change types")
print("4. Impact tracking on running DTS tasks")
print("5. API access to monitoring data")
if __name__ == "__main__":
main()

View File

@@ -1,140 +0,0 @@
#!/usr/bin/env python3
"""
Test script to verify the DTS Single State Refactoring implementation.
"""
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'watermaker_plc_api'))
from watermaker_plc_api.services.operation_state import get_operation_state_manager
def test_operation_state_manager():
"""Test the basic functionality of the operation state manager"""
print("Testing Operation State Manager...")
state_manager = get_operation_state_manager()
# Test 1: Initial state should be idle
print("Test 1: Initial state")
assert state_manager.is_idle(), "Initial state should be idle"
assert not state_manager.is_running(), "Initial state should not be running"
print("✓ Initial state is idle")
# Test 2: Start an operation
print("\nTest 2: Start operation")
success, message, details = state_manager.start_operation("test_operation", "api")
assert success, f"Should be able to start operation: {message}"
assert state_manager.is_running(), "State should be running after start"
assert not state_manager.is_idle(), "State should not be idle after start"
print(f"✓ Operation started: {details['operation_id']}")
# Test 3: Try to start another operation (should fail)
print("\nTest 3: Conflict detection")
success2, message2, details2 = state_manager.start_operation("another_operation", "api")
assert not success2, "Should not be able to start second operation"
assert "already in progress" in message2.lower(), f"Should indicate conflict: {message2}"
print("✓ Conflict detection works")
# Test 4: Update state
print("\nTest 4: State updates")
state_manager.update_state({
"current_step": "test_step",
"progress_percent": 50,
"test_field": "test_value"
})
current_state = state_manager.get_current_state()
assert current_state["current_step"] == "test_step", "State update should work"
assert current_state["progress_percent"] == 50, "Progress should be updated"
assert current_state["test_field"] == "test_value", "Custom fields should be updated"
print("✓ State updates work")
# Test 5: Complete operation
print("\nTest 5: Complete operation")
state_manager.complete_operation(success=True)
assert not state_manager.is_running(), "Should not be running after completion"
current_state = state_manager.get_current_state()
assert current_state["status"] == "completed", "Status should be completed"
print("✓ Operation completion works")
# Test 6: Operation history
print("\nTest 6: Operation history")
history = state_manager.get_operation_history()
assert len(history) == 1, "Should have one operation in history"
assert history[0]["operation_type"] == "test_operation", "History should contain our operation"
print("✓ Operation history works")
# Test 7: Start new operation after completion
print("\nTest 7: New operation after completion")
success3, message3, details3 = state_manager.start_operation("new_operation", "external")
assert success3, f"Should be able to start new operation: {message3}"
assert state_manager.is_running(), "Should be running again"
print(f"✓ New operation started: {details3['operation_id']}")
# Test 8: Cancel operation
print("\nTest 8: Cancel operation")
cancel_success = state_manager.cancel_operation()
assert cancel_success, "Should be able to cancel running operation"
current_state = state_manager.get_current_state()
assert current_state["status"] == "cancelled", "Status should be cancelled"
print("✓ Operation cancellation works")
print("\n✅ All tests passed! Operation State Manager is working correctly.")
def test_import_structure():
"""Test that all the refactored imports work correctly"""
print("\nTesting Import Structure...")
try:
from watermaker_plc_api.controllers.dts_controller import (
get_operation_state_manager,
handle_external_dts_change,
start_dts_sequence_async,
start_stop_sequence_async,
start_skip_sequence_async,
update_dts_progress_from_timers
)
print("✓ DTS controller imports work")
from watermaker_plc_api.services.operation_state import OperationStateManager
print("✓ Operation state imports work")
from watermaker_plc_api.services.background_tasks import get_task_manager
print("✓ Background tasks imports work")
print("✅ All imports successful!")
except ImportError as e:
print(f"❌ Import error: {e}")
return False
return True
def main():
"""Run all tests"""
print("=" * 60)
print("DTS Single State Refactoring - Test Suite")
print("=" * 60)
try:
# Test imports first
if not test_import_structure():
return 1
# Test operation state manager
test_operation_state_manager()
print("\n" + "=" * 60)
print("🎉 ALL TESTS PASSED! Refactoring is successful!")
print("=" * 60)
return 0
except Exception as e:
print(f"\n❌ Test failed with error: {e}")
import traceback
traceback.print_exc()
return 1
if __name__ == "__main__":
sys.exit(main())

View File

@@ -1,57 +0,0 @@
# Unused Code Analysis Report
Generated: Wed 11 Jun 2025 10:11:28 PM UTC
## Summary
- Files analyzed: 26
- Flask routes found: 25
- Used names identified: 476
- Unused imports: 0
- Unused functions: 9
- Unused variables: 0
## Unused Functions
### watermaker_plc_api/services/register_writer.py
- `write_multiple_registers()`
### watermaker_plc_api/services/background_tasks.py
- `stop_data_updates()`
### watermaker_plc_api/services/plc_connection.py
- `initialize_plc_connection()`
### watermaker_plc_api/models/timer_mappings.py
- `get_rtc_info()`
### watermaker_plc_api/utils/error_handler.py
- `method_not_allowed()`
- `service_unavailable()`
- `bad_request()`
- `internal_error()`
- `not_found()`
## Flask Routes (Preserved)
- `cancel_dts_operation()`
- `cancel_dts_task_legacy()`
- `get_active_outputs()`
- `get_all_data()`
- `get_config()`
- `get_current_step_progress()`
- `get_dts_status()`
- `get_dts_status_legacy()`
- `get_dts_timers()`
- `get_errors()`
- `get_fwf_timers()`
- `get_outputs()`
- `get_r1000_monitor_status()`
- `get_rtc()`
- `get_runtime()`
- `get_selected_data()`
- `get_sensors()`
- `get_sensors_by_category()`
- `get_status()`
- `get_timers()`
- `get_water_counters()`
- `skip_step()`
- `start_dts()`
- `stop_watermaker()`
- `write_register()`

View File

@@ -1,59 +0,0 @@
{
"unused_imports": {},
"unused_functions": {
"/home/paulg/FCI/api/watermaker_plc_api/services/register_writer.py": [
"write_multiple_registers"
],
"/home/paulg/FCI/api/watermaker_plc_api/services/background_tasks.py": [
"stop_data_updates"
],
"/home/paulg/FCI/api/watermaker_plc_api/services/plc_connection.py": [
"initialize_plc_connection"
],
"/home/paulg/FCI/api/watermaker_plc_api/models/timer_mappings.py": [
"get_rtc_info"
],
"/home/paulg/FCI/api/watermaker_plc_api/utils/error_handler.py": [
"method_not_allowed",
"service_unavailable",
"bad_request",
"internal_error",
"not_found"
]
},
"unused_variables": {},
"flask_routes": [
"get_sensors",
"get_outputs",
"get_dts_status_legacy",
"cancel_dts_operation",
"get_dts_timers",
"stop_watermaker",
"get_r1000_monitor_status",
"get_selected_data",
"get_config",
"get_active_outputs",
"write_register",
"get_errors",
"skip_step",
"cancel_dts_task_legacy",
"get_all_data",
"get_water_counters",
"get_status",
"get_timers",
"get_runtime",
"get_current_step_progress",
"get_sensors_by_category",
"get_rtc",
"start_dts",
"get_dts_status",
"get_fwf_timers"
],
"entry_points": [
"parse_args",
"create_app",
"main"
],
"used_names_count": 476,
"total_files_analyzed": 26
}