Spaces:
Running
Running
SeungHyeok Jang
commited on
Commit
·
47b6af0
1
Parent(s):
621542c
change the pipeline logic
Browse files- .github/scripts/demo_controller.py +143 -0
- .github/scripts/health_check.py +408 -0
- .github/scripts/send_data_batch.py +252 -0
- .github/scripts/system_recovery.py +359 -0
- .github/scripts/trigger_issue.py +467 -0
- .github/workflows/tide_scheduler.yml +91 -100
- __pycache__/api_docs.cpython-39.pyc +0 -0
- __pycache__/api_utils.cpython-39.pyc +0 -0
- __pycache__/app.cpython-39.pyc +0 -0
- __pycache__/chatbot.cpython-39.pyc +0 -0
- __pycache__/config.cpython-39.pyc +0 -0
- __pycache__/prediction.cpython-39.pyc +0 -0
- __pycache__/supabase_utils.cpython-39.pyc +0 -0
- __pycache__/ui.cpython-39.pyc +0 -0
- automation/__init__.py +0 -0
- automation/data_collector.py +0 -176
- automation/data_processor.py +0 -279
- automation/internal_api.py +0 -174
- automation/prediction_updater.py +0 -0
- internal_api.py +754 -0
- performance_api.py +651 -0
- test.py +37 -0
.github/scripts/demo_controller.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Demo Controller - 시연 시작/중지 제어
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
import requests
|
9 |
+
import json
|
10 |
+
from datetime import datetime
|
11 |
+
|
12 |
+
HF_SPACE_URL = os.environ.get('HF_SPACE_URL')
|
13 |
+
API_KEY = os.environ.get('INTERNAL_API_KEY')
|
14 |
+
|
15 |
+
def call_api(endpoint, data=None, method='POST'):
|
16 |
+
"""HF Space API 호출"""
|
17 |
+
url = f"{HF_SPACE_URL}{endpoint}"
|
18 |
+
headers = {
|
19 |
+
'Authorization': f'Bearer {API_KEY}',
|
20 |
+
'Content-Type': 'application/json'
|
21 |
+
}
|
22 |
+
|
23 |
+
try:
|
24 |
+
if method == 'POST':
|
25 |
+
response = requests.post(url, headers=headers, json=data, timeout=30)
|
26 |
+
else:
|
27 |
+
response = requests.get(url, headers=headers, timeout=30)
|
28 |
+
|
29 |
+
return response.status_code, response.json()
|
30 |
+
except Exception as e:
|
31 |
+
return 500, {'error': str(e)}
|
32 |
+
|
33 |
+
def start_demo():
|
34 |
+
"""시연 시작"""
|
35 |
+
print("🚀 Starting interactive demo...")
|
36 |
+
|
37 |
+
data = {
|
38 |
+
'action': 'start_demo',
|
39 |
+
'timestamp': datetime.now().isoformat(),
|
40 |
+
'demo_mode': 'interactive',
|
41 |
+
'stations': ['DT_0001', 'DT_0002', 'DT_0003', 'DT_0008', 'DT_0017'],
|
42 |
+
'simulation_settings': {
|
43 |
+
'start_time': '2025-07-01T00:00:00',
|
44 |
+
'max_speed': 10, # 최대 10배속
|
45 |
+
'auto_issues': False # 자동 문제 발생 비활성화
|
46 |
+
}
|
47 |
+
}
|
48 |
+
|
49 |
+
status, response = call_api('/api/internal/demo_control', data)
|
50 |
+
|
51 |
+
if status == 200:
|
52 |
+
print("✅ Demo started successfully")
|
53 |
+
print(f"📊 Session ID: {response.get('session_id', 'N/A')}")
|
54 |
+
print(f"🏪 Active stations: {len(response.get('stations', []))}")
|
55 |
+
print(f"⏰ Simulation start: {response.get('simulation_start_time', 'N/A')}")
|
56 |
+
print(f"🎮 Demo mode: {response.get('demo_mode', 'N/A')}")
|
57 |
+
|
58 |
+
# 성공 결과를 파일로 저장
|
59 |
+
with open('demo_start_result.json', 'w') as f:
|
60 |
+
json.dump(response, f, indent=2)
|
61 |
+
|
62 |
+
else:
|
63 |
+
print(f"❌ Failed to start demo: {status}")
|
64 |
+
print(f"Error: {response}")
|
65 |
+
sys.exit(1)
|
66 |
+
|
67 |
+
def stop_demo():
|
68 |
+
"""시연 중지"""
|
69 |
+
print("🛑 Stopping demo...")
|
70 |
+
|
71 |
+
data = {
|
72 |
+
'action': 'stop_demo',
|
73 |
+
'timestamp': datetime.now().isoformat(),
|
74 |
+
'save_results': True # 결과 저장
|
75 |
+
}
|
76 |
+
|
77 |
+
status, response = call_api('/api/internal/demo_control', data)
|
78 |
+
|
79 |
+
if status == 200:
|
80 |
+
print("✅ Demo stopped successfully")
|
81 |
+
print(f"⏱️ Session duration: {response.get('duration_minutes', 0)} minutes")
|
82 |
+
print(f"📊 Total data points processed: {response.get('total_processed', 0)}")
|
83 |
+
print(f"🎯 Prediction accuracy: {response.get('final_accuracy', 'N/A')}%")
|
84 |
+
print(f"📈 Average RMSE: {response.get('average_rmse', 'N/A')} cm")
|
85 |
+
|
86 |
+
# 최종 결과 저장
|
87 |
+
with open('demo_final_results.json', 'w') as f:
|
88 |
+
json.dump(response, f, indent=2)
|
89 |
+
|
90 |
+
else:
|
91 |
+
print(f"❌ Failed to stop demo: {status}")
|
92 |
+
print(f"Error: {response}")
|
93 |
+
|
94 |
+
def get_demo_status():
|
95 |
+
"""현재 시연 상태 확인"""
|
96 |
+
print("📊 Checking demo status...")
|
97 |
+
|
98 |
+
status, response = call_api('/api/internal/demo_status', method='GET')
|
99 |
+
|
100 |
+
if status == 200:
|
101 |
+
demo_active = response.get('demo_active', False)
|
102 |
+
|
103 |
+
if demo_active:
|
104 |
+
print("🟢 Demo is currently ACTIVE")
|
105 |
+
print(f"⏰ Running for: {response.get('running_minutes', 0)} minutes")
|
106 |
+
print(f"📊 Current simulation time: {response.get('current_sim_time', 'N/A')}")
|
107 |
+
print(f"🏪 Active stations: {response.get('active_stations', 0)}")
|
108 |
+
print(f"📈 Current RMSE: {response.get('current_rmse', 'N/A')} cm")
|
109 |
+
else:
|
110 |
+
print("🔴 Demo is currently INACTIVE")
|
111 |
+
print(f"🕐 Last session: {response.get('last_session_end', 'Never')}")
|
112 |
+
|
113 |
+
else:
|
114 |
+
print(f"❌ Failed to get demo status: {status}")
|
115 |
+
print(f"Error: {response}")
|
116 |
+
|
117 |
+
if __name__ == '__main__':
|
118 |
+
if len(sys.argv) < 2:
|
119 |
+
print("Usage: python demo_controller.py [start|stop|status]")
|
120 |
+
print("Commands:")
|
121 |
+
print(" start - Start interactive demo")
|
122 |
+
print(" stop - Stop current demo")
|
123 |
+
print(" status - Check demo status")
|
124 |
+
sys.exit(1)
|
125 |
+
|
126 |
+
action = sys.argv[1]
|
127 |
+
|
128 |
+
if not HF_SPACE_URL or not API_KEY:
|
129 |
+
print("❌ Missing environment variables:")
|
130 |
+
print(" HF_SPACE_URL:", "✓" if HF_SPACE_URL else "✗")
|
131 |
+
print(" INTERNAL_API_KEY:", "✓" if API_KEY else "✗")
|
132 |
+
sys.exit(1)
|
133 |
+
|
134 |
+
if action == 'start':
|
135 |
+
start_demo()
|
136 |
+
elif action == 'stop':
|
137 |
+
stop_demo()
|
138 |
+
elif action == 'status':
|
139 |
+
get_demo_status()
|
140 |
+
else:
|
141 |
+
print(f"❌ Unknown action: {action}")
|
142 |
+
print("Available actions: start, stop, status")
|
143 |
+
sys.exit(1)
|
.github/scripts/health_check.py
ADDED
@@ -0,0 +1,408 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Health Check - 시스템 상태 종합 점검
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
import requests
|
9 |
+
import json
|
10 |
+
import time
|
11 |
+
from datetime import datetime, timedelta
|
12 |
+
|
13 |
+
HF_SPACE_URL = os.environ.get('HF_SPACE_URL')
|
14 |
+
API_KEY = os.environ.get('INTERNAL_API_KEY')
|
15 |
+
|
16 |
+
def test_basic_connectivity():
|
17 |
+
"""기본 연결성 테스트"""
|
18 |
+
|
19 |
+
print("🌐 Testing basic connectivity...")
|
20 |
+
|
21 |
+
try:
|
22 |
+
response = requests.get(f"{HF_SPACE_URL}/api/health", timeout=10)
|
23 |
+
|
24 |
+
if response.status_code == 200:
|
25 |
+
health_data = response.json()
|
26 |
+
print(" ✅ Basic health endpoint: OK")
|
27 |
+
print(f" 📊 Status: {health_data.get('status', 'unknown')}")
|
28 |
+
print(f" 🕐 Timestamp: {health_data.get('timestamp', 'N/A')}")
|
29 |
+
return True, health_data
|
30 |
+
else:
|
31 |
+
print(f" ❌ Health endpoint failed: {response.status_code}")
|
32 |
+
return False, None
|
33 |
+
|
34 |
+
except Exception as e:
|
35 |
+
print(f" ❌ Connectivity error: {e}")
|
36 |
+
return False, None
|
37 |
+
|
38 |
+
def test_api_authentication():
|
39 |
+
"""API 인증 테스트"""
|
40 |
+
|
41 |
+
print("🔐 Testing API authentication...")
|
42 |
+
|
43 |
+
if not API_KEY:
|
44 |
+
print(" ❌ No API key provided")
|
45 |
+
return False
|
46 |
+
|
47 |
+
try:
|
48 |
+
response = requests.get(
|
49 |
+
f"{HF_SPACE_URL}/api/internal/demo_status",
|
50 |
+
headers={'Authorization': f'Bearer {API_KEY}'},
|
51 |
+
timeout=10
|
52 |
+
)
|
53 |
+
|
54 |
+
if response.status_code == 200:
|
55 |
+
print(" ✅ API authentication: OK")
|
56 |
+
return True
|
57 |
+
elif response.status_code == 401:
|
58 |
+
print(" ❌ Authentication failed: Invalid API key")
|
59 |
+
return False
|
60 |
+
else:
|
61 |
+
print(f" ⚠️ Unexpected response: {response.status_code}")
|
62 |
+
return False
|
63 |
+
|
64 |
+
except Exception as e:
|
65 |
+
print(f" ❌ Authentication test error: {e}")
|
66 |
+
return False
|
67 |
+
|
68 |
+
def check_demo_status():
|
69 |
+
"""시연 상태 확인"""
|
70 |
+
|
71 |
+
print("🎬 Checking demo status...")
|
72 |
+
|
73 |
+
try:
|
74 |
+
response = requests.get(
|
75 |
+
f"{HF_SPACE_URL}/api/internal/demo_status",
|
76 |
+
headers={'Authorization': f'Bearer {API_KEY}'},
|
77 |
+
timeout=10
|
78 |
+
)
|
79 |
+
|
80 |
+
if response.status_code == 200:
|
81 |
+
demo_data = response.json()
|
82 |
+
is_active = demo_data.get('demo_active', False)
|
83 |
+
|
84 |
+
print(f" 🎭 Demo active: {'✅ YES' if is_active else '🔴 NO'}")
|
85 |
+
|
86 |
+
if is_active:
|
87 |
+
print(f" ⏰ Running time: {demo_data.get('running_minutes', 0)} minutes")
|
88 |
+
print(f" 🏪 Active stations: {demo_data.get('active_stations', 0)}")
|
89 |
+
print(f" 📊 Current sim time: {demo_data.get('current_sim_time', 'N/A')}")
|
90 |
+
else:
|
91 |
+
print(f" 🕐 Last session: {demo_data.get('last_session_end', 'Never')}")
|
92 |
+
|
93 |
+
return True, demo_data
|
94 |
+
else:
|
95 |
+
print(f" ❌ Demo status check failed: {response.status_code}")
|
96 |
+
return False, None
|
97 |
+
|
98 |
+
except Exception as e:
|
99 |
+
print(f" ❌ Demo status error: {e}")
|
100 |
+
return False, None
|
101 |
+
|
102 |
+
def check_data_freshness():
|
103 |
+
"""데이터 신선도 확인"""
|
104 |
+
|
105 |
+
print("📊 Checking data freshness...")
|
106 |
+
|
107 |
+
try:
|
108 |
+
response = requests.get(
|
109 |
+
f"{HF_SPACE_URL}/api/internal/data_freshness",
|
110 |
+
headers={'Authorization': f'Bearer {API_KEY}'},
|
111 |
+
timeout=10
|
112 |
+
)
|
113 |
+
|
114 |
+
if response.status_code == 200:
|
115 |
+
fresh_data = response.json()
|
116 |
+
oldest_minutes = fresh_data.get('oldest_data_minutes', 0)
|
117 |
+
overall_status = fresh_data.get('overall_status', 'unknown')
|
118 |
+
|
119 |
+
print(f" 📈 Overall status: {overall_status}")
|
120 |
+
print(f" ⏰ Oldest data: {oldest_minutes} minutes")
|
121 |
+
|
122 |
+
# 관측소별 상태
|
123 |
+
stations = fresh_data.get('stations', {})
|
124 |
+
fresh_count = sum(1 for s in stations.values() if s.get('status') == 'fresh')
|
125 |
+
total_count = len(stations)
|
126 |
+
|
127 |
+
print(f" 🏪 Fresh stations: {fresh_count}/{total_count}")
|
128 |
+
|
129 |
+
# 문제가 있는 관측소 표시
|
130 |
+
stale_stations = [sid for sid, data in stations.items()
|
131 |
+
if data.get('status') != 'fresh']
|
132 |
+
|
133 |
+
if stale_stations:
|
134 |
+
print(f" ⚠️ Stale stations: {', '.join(stale_stations)}")
|
135 |
+
|
136 |
+
is_healthy = overall_status == 'healthy' and oldest_minutes < 15
|
137 |
+
return is_healthy, fresh_data
|
138 |
+
else:
|
139 |
+
print(f" ❌ Data freshness check failed: {response.status_code}")
|
140 |
+
return False, None
|
141 |
+
|
142 |
+
except Exception as e:
|
143 |
+
print(f" ❌ Data freshness error: {e}")
|
144 |
+
return False, None
|
145 |
+
|
146 |
+
def check_prediction_performance():
|
147 |
+
"""예측 성능 확인"""
|
148 |
+
|
149 |
+
print("🎯 Checking prediction performance...")
|
150 |
+
|
151 |
+
try:
|
152 |
+
response = requests.get(
|
153 |
+
f"{HF_SPACE_URL}/api/performance/realtime",
|
154 |
+
headers={'Authorization': f'Bearer {API_KEY}'},
|
155 |
+
timeout=10
|
156 |
+
)
|
157 |
+
|
158 |
+
if response.status_code == 200:
|
159 |
+
perf_data = response.json()
|
160 |
+
|
161 |
+
rmse = perf_data.get('rmse', 'N/A')
|
162 |
+
mae = perf_data.get('mae', 'N/A')
|
163 |
+
accuracy = perf_data.get('accuracy', 'N/A')
|
164 |
+
|
165 |
+
print(f" 📊 RMSE: {rmse} cm")
|
166 |
+
print(f" 📊 MAE: {mae} cm")
|
167 |
+
print(f" 🎯 Accuracy: {accuracy}%")
|
168 |
+
|
169 |
+
# 성능 평가
|
170 |
+
rmse_ok = isinstance(rmse, (int, float)) and rmse < 50 # 50cm 미만
|
171 |
+
accuracy_ok = isinstance(accuracy, (int, float)) and accuracy > 80 # 80% 이상
|
172 |
+
|
173 |
+
if rmse_ok and accuracy_ok:
|
174 |
+
print(" ✅ Performance: GOOD")
|
175 |
+
performance_status = 'good'
|
176 |
+
elif rmse_ok or accuracy_ok:
|
177 |
+
print(" ⚠️ Performance: MODERATE")
|
178 |
+
performance_status = 'moderate'
|
179 |
+
else:
|
180 |
+
print(" ❌ Performance: POOR")
|
181 |
+
performance_status = 'poor'
|
182 |
+
|
183 |
+
return performance_status in ['good', 'moderate'], perf_data
|
184 |
+
else:
|
185 |
+
print(f" ❌ Performance check failed: {response.status_code}")
|
186 |
+
return False, None
|
187 |
+
|
188 |
+
except Exception as e:
|
189 |
+
print(f" ❌ Performance check error: {e}")
|
190 |
+
return False, None
|
191 |
+
|
192 |
+
def test_prediction_endpoints():
|
193 |
+
"""예측 엔드포인트 테스트"""
|
194 |
+
|
195 |
+
print("🔮 Testing prediction endpoints...")
|
196 |
+
|
197 |
+
test_endpoints = [
|
198 |
+
('/api/tide_level?station_id=DT_0001', 'Tide level prediction'),
|
199 |
+
('/api/extremes?station_id=DT_0001', 'Extremes analysis'),
|
200 |
+
('/api/alert?station_id=DT_0001', 'Alert system')
|
201 |
+
]
|
202 |
+
|
203 |
+
working_endpoints = 0
|
204 |
+
|
205 |
+
for endpoint, description in test_endpoints:
|
206 |
+
try:
|
207 |
+
response = requests.get(f"{HF_SPACE_URL}{endpoint}", timeout=10)
|
208 |
+
|
209 |
+
if response.status_code == 200:
|
210 |
+
print(f" ✅ {description}: OK")
|
211 |
+
working_endpoints += 1
|
212 |
+
else:
|
213 |
+
print(f" ❌ {description}: ERROR ({response.status_code})")
|
214 |
+
|
215 |
+
except Exception as e:
|
216 |
+
print(f" ❌ {description}: EXCEPTION ({e})")
|
217 |
+
|
218 |
+
success_rate = working_endpoints / len(test_endpoints)
|
219 |
+
print(f" 📊 Endpoint success rate: {success_rate:.1%} ({working_endpoints}/{len(test_endpoints)})")
|
220 |
+
|
221 |
+
return success_rate > 0.8 # 80% 이상 성공
|
222 |
+
|
223 |
+
def measure_response_times():
|
224 |
+
"""응답 시간 측정"""
|
225 |
+
|
226 |
+
print("⏱️ Measuring response times...")
|
227 |
+
|
228 |
+
test_urls = [
|
229 |
+
('/api/health', 'Health check'),
|
230 |
+
('/api/tide_level?station_id=DT_0001', 'Tide prediction'),
|
231 |
+
('/api/internal/demo_status', 'Demo status')
|
232 |
+
]
|
233 |
+
|
234 |
+
response_times = {}
|
235 |
+
|
236 |
+
for endpoint, description in test_urls:
|
237 |
+
try:
|
238 |
+
start_time = time.time()
|
239 |
+
|
240 |
+
headers = {}
|
241 |
+
if 'internal' in endpoint:
|
242 |
+
headers['Authorization'] = f'Bearer {API_KEY}'
|
243 |
+
|
244 |
+
response = requests.get(f"{HF_SPACE_URL}{endpoint}", headers=headers, timeout=10)
|
245 |
+
|
246 |
+
end_time = time.time()
|
247 |
+
response_time = (end_time - start_time) * 1000 # ms
|
248 |
+
|
249 |
+
response_times[endpoint] = response_time
|
250 |
+
|
251 |
+
if response_time < 1000:
|
252 |
+
status = "✅ FAST"
|
253 |
+
elif response_time < 3000:
|
254 |
+
status = "⚠️ MODERATE"
|
255 |
+
else:
|
256 |
+
status = "❌ SLOW"
|
257 |
+
|
258 |
+
print(f" {description}: {response_time:.0f}ms {status}")
|
259 |
+
|
260 |
+
except Exception as e:
|
261 |
+
print(f" ❌ {description}: TIMEOUT or ERROR")
|
262 |
+
response_times[endpoint] = None
|
263 |
+
|
264 |
+
avg_time = sum(t for t in response_times.values() if t is not None) / len([t for t in response_times.values() if t is not None])
|
265 |
+
print(f" 📊 Average response time: {avg_time:.0f}ms")
|
266 |
+
|
267 |
+
return avg_time < 2000 # 2초 미만
|
268 |
+
|
269 |
+
def comprehensive_health_check():
|
270 |
+
"""종합 상태 점검"""
|
271 |
+
|
272 |
+
print("🏥 Starting comprehensive health check...")
|
273 |
+
print(f"🕐 Check started at: {datetime.now().isoformat()}")
|
274 |
+
print("=" * 60)
|
275 |
+
|
276 |
+
checks = [
|
277 |
+
("Basic Connectivity", test_basic_connectivity),
|
278 |
+
("API Authentication", test_api_authentication),
|
279 |
+
("Demo Status", check_demo_status),
|
280 |
+
("Data Freshness", check_data_freshness),
|
281 |
+
("Prediction Performance", check_prediction_performance),
|
282 |
+
("Prediction Endpoints", test_prediction_endpoints),
|
283 |
+
("Response Times", measure_response_times)
|
284 |
+
]
|
285 |
+
|
286 |
+
results = {}
|
287 |
+
passed_checks = 0
|
288 |
+
|
289 |
+
for check_name, check_function in checks:
|
290 |
+
print(f"\n🔍 {check_name}:")
|
291 |
+
|
292 |
+
try:
|
293 |
+
if check_function.__name__ in ['test_basic_connectivity', 'check_demo_status', 'check_data_freshness', 'check_prediction_performance']:
|
294 |
+
# 이 함수들은 (success, data) 튜플 반환
|
295 |
+
success, data = check_function()
|
296 |
+
results[check_name] = {
|
297 |
+
'success': success,
|
298 |
+
'data': data,
|
299 |
+
'timestamp': datetime.now().isoformat()
|
300 |
+
}
|
301 |
+
else:
|
302 |
+
# 나머지 함수들은 boolean 반환
|
303 |
+
success = check_function()
|
304 |
+
results[check_name] = {
|
305 |
+
'success': success,
|
306 |
+
'timestamp': datetime.now().isoformat()
|
307 |
+
}
|
308 |
+
|
309 |
+
if success:
|
310 |
+
passed_checks += 1
|
311 |
+
|
312 |
+
except Exception as e:
|
313 |
+
print(f" ❌ Check failed with exception: {e}")
|
314 |
+
results[check_name] = {
|
315 |
+
'success': False,
|
316 |
+
'error': str(e),
|
317 |
+
'timestamp': datetime.now().isoformat()
|
318 |
+
}
|
319 |
+
|
320 |
+
# 최종 결과 요약
|
321 |
+
total_checks = len(checks)
|
322 |
+
success_rate = passed_checks / total_checks
|
323 |
+
|
324 |
+
print("\n" + "=" * 60)
|
325 |
+
print("📊 HEALTH CHECK SUMMARY")
|
326 |
+
print("=" * 60)
|
327 |
+
print(f"✅ Passed checks: {passed_checks}/{total_checks}")
|
328 |
+
print(f"📈 Success rate: {success_rate:.1%}")
|
329 |
+
|
330 |
+
if success_rate >= 0.9:
|
331 |
+
overall_status = "🟢 EXCELLENT"
|
332 |
+
status_code = 0
|
333 |
+
elif success_rate >= 0.7:
|
334 |
+
overall_status = "🟡 GOOD"
|
335 |
+
status_code = 0
|
336 |
+
elif success_rate >= 0.5:
|
337 |
+
overall_status = "🟠 WARNING"
|
338 |
+
status_code = 1
|
339 |
+
else:
|
340 |
+
overall_status = "🔴 CRITICAL"
|
341 |
+
status_code = 2
|
342 |
+
|
343 |
+
print(f"🎯 Overall status: {overall_status}")
|
344 |
+
|
345 |
+
# 권장사항
|
346 |
+
if success_rate < 1.0:
|
347 |
+
print("\n💡 Recommendations:")
|
348 |
+
|
349 |
+
failed_checks = [name for name, result in results.items() if not result.get('success', False)]
|
350 |
+
|
351 |
+
for failed_check in failed_checks:
|
352 |
+
if 'Connectivity' in failed_check:
|
353 |
+
print(" 🌐 Check HF Space deployment status")
|
354 |
+
elif 'Authentication' in failed_check:
|
355 |
+
print(" 🔐 Verify INTERNAL_API_KEY environment variable")
|
356 |
+
elif 'Demo Status' in failed_check:
|
357 |
+
print(" 🎬 Consider starting demo session")
|
358 |
+
elif 'Data Freshness' in failed_check:
|
359 |
+
print(" 📊 Check data collection pipeline")
|
360 |
+
elif 'Performance' in failed_check:
|
361 |
+
print(" 🎯 Review prediction model performance")
|
362 |
+
elif 'Endpoints' in failed_check:
|
363 |
+
print(" 🔮 Check prediction service status")
|
364 |
+
elif 'Response Times' in failed_check:
|
365 |
+
print(" ⏱️ Consider system resource optimization")
|
366 |
+
|
367 |
+
# 결과 저장
|
368 |
+
health_report = {
|
369 |
+
'check_timestamp': datetime.now().isoformat(),
|
370 |
+
'overall_status': overall_status,
|
371 |
+
'success_rate': success_rate,
|
372 |
+
'passed_checks': passed_checks,
|
373 |
+
'total_checks': total_checks,
|
374 |
+
'check_results': results
|
375 |
+
}
|
376 |
+
|
377 |
+
with open(f'health_report_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json', 'w') as f:
|
378 |
+
json.dump(health_report, f, indent=2)
|
379 |
+
|
380 |
+
print(f"\n📄 Detailed report saved to health_report_*.json")
|
381 |
+
|
382 |
+
return status_code
|
383 |
+
|
384 |
+
if __name__ == '__main__':
|
385 |
+
if not HF_SPACE_URL:
|
386 |
+
print("❌ HF_SPACE_URL environment variable is required")
|
387 |
+
sys.exit(1)
|
388 |
+
|
389 |
+
# 인자 처리
|
390 |
+
quick_mode = len(sys.argv) > 1 and sys.argv[1] == 'quick'
|
391 |
+
|
392 |
+
if quick_mode:
|
393 |
+
print("⚡ Quick health check mode")
|
394 |
+
|
395 |
+
# 기본 연결성만 확인
|
396 |
+
connectivity_ok, _ = test_basic_connectivity()
|
397 |
+
auth_ok = test_api_authentication() if API_KEY else False
|
398 |
+
|
399 |
+
if connectivity_ok and (auth_ok or not API_KEY):
|
400 |
+
print("✅ Basic health: OK")
|
401 |
+
sys.exit(0)
|
402 |
+
else:
|
403 |
+
print("❌ Basic health: FAILED")
|
404 |
+
sys.exit(1)
|
405 |
+
else:
|
406 |
+
# 종합 상태 점검
|
407 |
+
exit_code = comprehensive_health_check()
|
408 |
+
sys.exit(exit_code)
|
.github/scripts/send_data_batch.py
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Data Batch Sender - 배치 데이터 전송
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
import pandas as pd
|
9 |
+
import numpy as np
|
10 |
+
import requests
|
11 |
+
import json
|
12 |
+
from datetime import datetime, timedelta
|
13 |
+
from pathlib import Path
|
14 |
+
|
15 |
+
def load_station_data(station_id, start_time, num_points=12):
|
16 |
+
"""관측소 데이터 로드 (실제 txt 파일 또는 시뮬레이션)"""
|
17 |
+
|
18 |
+
# 실제 데이터 파일 경로 확인
|
19 |
+
possible_paths = [
|
20 |
+
Path(f"data/2025_07_{station_id}_TEST.txt"),
|
21 |
+
Path(f"data/{station_id}_2025_07.txt"),
|
22 |
+
Path(f"data/{station_id}.txt"),
|
23 |
+
Path(f"2025_07_{station_id}_TEST.txt")
|
24 |
+
]
|
25 |
+
|
26 |
+
data_file = None
|
27 |
+
for path in possible_paths:
|
28 |
+
if path.exists():
|
29 |
+
data_file = path
|
30 |
+
break
|
31 |
+
|
32 |
+
if data_file:
|
33 |
+
try:
|
34 |
+
print(f"📁 Loading real data from: {data_file}")
|
35 |
+
|
36 |
+
# 인코딩 자동 감지
|
37 |
+
encodings = ['cp949', 'utf-8', 'euc-kr']
|
38 |
+
df = None
|
39 |
+
|
40 |
+
for encoding in encodings:
|
41 |
+
try:
|
42 |
+
df = pd.read_csv(
|
43 |
+
data_file,
|
44 |
+
encoding=encoding,
|
45 |
+
sep='\t',
|
46 |
+
skiprows=3
|
47 |
+
)
|
48 |
+
print(f"✅ Successfully loaded with {encoding} encoding")
|
49 |
+
break
|
50 |
+
except UnicodeDecodeError:
|
51 |
+
continue
|
52 |
+
|
53 |
+
if df is None:
|
54 |
+
raise Exception("Could not decode file with any encoding")
|
55 |
+
|
56 |
+
# 컬럼명 정규화
|
57 |
+
expected_columns = ['관측시간', '기압(hPa)', '풍향(deg)', '풍속(m/s)', '기온(℃)', '조위(cm)']
|
58 |
+
|
59 |
+
if len(df.columns) >= 6:
|
60 |
+
df = df.iloc[:, :6] # 처음 6개 컬럼만 사용
|
61 |
+
df.columns = ['date', 'air_pres', 'wind_dir', 'wind_speed', 'air_temp', 'tide_level']
|
62 |
+
else:
|
63 |
+
raise Exception(f"Expected 6 columns, got {len(df.columns)}")
|
64 |
+
|
65 |
+
# 날짜 변환
|
66 |
+
df['date'] = pd.to_datetime(df['date'], errors='coerce')
|
67 |
+
|
68 |
+
# 숫자형 변환
|
69 |
+
for col in ['air_pres', 'wind_dir', 'wind_speed', 'air_temp', 'tide_level']:
|
70 |
+
df[col] = pd.to_numeric(df[col], errors='coerce')
|
71 |
+
|
72 |
+
# 결측치 제거
|
73 |
+
df = df.dropna()
|
74 |
+
|
75 |
+
# 시작 시간부터 필요한 포인트 추출
|
76 |
+
start_dt = datetime.fromisoformat(start_time.replace('Z', '+00:00') if start_time.endswith('Z') else start_time)
|
77 |
+
|
78 |
+
# 가장 가까운 시간 찾기
|
79 |
+
if not df.empty:
|
80 |
+
time_diff = (df['date'] - start_dt).abs()
|
81 |
+
start_idx = time_diff.idxmin()
|
82 |
+
|
83 |
+
# 해당 인덱스부터 num_points만큼 추출
|
84 |
+
selected_data = df.loc[start_idx:start_idx+num_points-1]
|
85 |
+
|
86 |
+
if len(selected_data) < num_points:
|
87 |
+
# 데이터가 부족하면 마지막부터 추출
|
88 |
+
selected_data = df.tail(num_points)
|
89 |
+
|
90 |
+
print(f"📊 Extracted {len(selected_data)} real data points")
|
91 |
+
return selected_data.to_dict('records')
|
92 |
+
|
93 |
+
except Exception as e:
|
94 |
+
print(f"⚠️ Error reading {station_id} data from {data_file}: {e}")
|
95 |
+
print("🔄 Falling back to simulation data")
|
96 |
+
|
97 |
+
# 시뮬레이션 데이터 생성
|
98 |
+
print(f"🎲 Generating simulation data for {station_id}")
|
99 |
+
|
100 |
+
start_dt = datetime.fromisoformat(start_time.replace('Z', '+00:00') if start_time.endswith('Z') else start_time)
|
101 |
+
data_points = []
|
102 |
+
|
103 |
+
# 관측소별 시드 설정 (일관된 데이터)
|
104 |
+
np.random.seed(hash(station_id) % 2**32)
|
105 |
+
|
106 |
+
for i in range(num_points):
|
107 |
+
current_time = start_dt + timedelta(minutes=i*5)
|
108 |
+
|
109 |
+
# 시간 기반 패턴
|
110 |
+
hour_angle = 2 * np.pi * current_time.hour / 24
|
111 |
+
tide_angle = 2 * np.pi * current_time.hour / 12.42 # M2 조석
|
112 |
+
|
113 |
+
# 기본 시뮬레이션 데이터
|
114 |
+
data_point = {
|
115 |
+
'date': current_time.strftime('%Y-%m-%d %H:%M:%S'),
|
116 |
+
'station_id': station_id,
|
117 |
+
'air_pres': round(1013.0 + 10 * np.sin(hour_angle) + np.random.normal(0, 3), 1),
|
118 |
+
'wind_dir': round(180 + 90 * np.sin(hour_angle) + np.random.uniform(-30, 30), 1),
|
119 |
+
'wind_speed': round(max(0, 3 + 2 * np.sin(hour_angle) + np.random.normal(0, 1)), 1),
|
120 |
+
'air_temp': round(25 + 5 * np.sin(hour_angle) + np.random.normal(0, 1), 1),
|
121 |
+
'tide_level': round(300 + 200 * np.sin(tide_angle) + 50 * np.sin(2*tide_angle) + np.random.normal(0, 15), 1)
|
122 |
+
}
|
123 |
+
|
124 |
+
data_points.append(data_point)
|
125 |
+
|
126 |
+
return data_points
|
127 |
+
|
128 |
+
def get_current_simulation_time():
|
129 |
+
"""현재 시뮬레이션 시간 가져오기"""
|
130 |
+
|
131 |
+
HF_SPACE_URL = os.environ.get('HF_SPACE_URL')
|
132 |
+
API_KEY = os.environ.get('INTERNAL_API_KEY')
|
133 |
+
|
134 |
+
try:
|
135 |
+
response = requests.get(
|
136 |
+
f"{HF_SPACE_URL}/api/internal/simulation_status",
|
137 |
+
headers={'Authorization': f'Bearer {API_KEY}'},
|
138 |
+
timeout=10
|
139 |
+
)
|
140 |
+
|
141 |
+
if response.status_code == 200:
|
142 |
+
sim_status = response.json()
|
143 |
+
return sim_status.get('current_simulation_time', '2025-07-01T00:00:00')
|
144 |
+
else:
|
145 |
+
print(f"⚠️ Could not get simulation status: {response.status_code}")
|
146 |
+
return '2025-07-01T00:00:00'
|
147 |
+
|
148 |
+
except Exception as e:
|
149 |
+
print(f"⚠️ Error getting simulation status: {e}")
|
150 |
+
return '2025-07-01T00:00:00'
|
151 |
+
|
152 |
+
def send_batch_data():
|
153 |
+
"""배치 데이터 전송"""
|
154 |
+
|
155 |
+
HF_SPACE_URL = os.environ.get('HF_SPACE_URL')
|
156 |
+
API_KEY = os.environ.get('INTERNAL_API_KEY')
|
157 |
+
BATCH_SIZE = int(os.environ.get('INPUT_BATCH_SIZE', '12'))
|
158 |
+
|
159 |
+
if not HF_SPACE_URL or not API_KEY:
|
160 |
+
print("❌ Missing environment variables")
|
161 |
+
sys.exit(1)
|
162 |
+
|
163 |
+
# 현재 시뮬레이션 시간 가져오기
|
164 |
+
current_sim_time = get_current_simulation_time()
|
165 |
+
|
166 |
+
print(f"📦 Sending batch data from: {current_sim_time}")
|
167 |
+
print(f"📊 Batch size: {BATCH_SIZE} points per station (= {BATCH_SIZE * 5} minutes)")
|
168 |
+
|
169 |
+
# 시연용 관측소들
|
170 |
+
demo_stations = ['DT_0001', 'DT_0002', 'DT_0003', 'DT_0008', 'DT_0017']
|
171 |
+
|
172 |
+
# 모든 관측소 데이터 수집
|
173 |
+
all_data = []
|
174 |
+
successful_stations = []
|
175 |
+
|
176 |
+
for station_id in demo_stations:
|
177 |
+
try:
|
178 |
+
station_data = load_station_data(station_id, current_sim_time, BATCH_SIZE)
|
179 |
+
if station_data:
|
180 |
+
# station_id를 각 데이터 포인트에 추가
|
181 |
+
for point in station_data:
|
182 |
+
point['station_id'] = station_id
|
183 |
+
|
184 |
+
all_data.extend(station_data)
|
185 |
+
successful_stations.append(station_id)
|
186 |
+
print(f"✅ {station_id}: {len(station_data)} points loaded")
|
187 |
+
else:
|
188 |
+
print(f"❌ {station_id}: No data loaded")
|
189 |
+
|
190 |
+
except Exception as e:
|
191 |
+
print(f"❌ {station_id}: Error loading data - {e}")
|
192 |
+
|
193 |
+
if not all_data:
|
194 |
+
print("❌ No data to send")
|
195 |
+
sys.exit(1)
|
196 |
+
|
197 |
+
# HF Space로 전송할 페이로드 구성
|
198 |
+
payload = {
|
199 |
+
'task': 'batch_data_collection',
|
200 |
+
'timestamp': datetime.now().isoformat(),
|
201 |
+
'simulation_time': current_sim_time,
|
202 |
+
'batch_size': BATCH_SIZE,
|
203 |
+
'stations_data': all_data,
|
204 |
+
'metadata': {
|
205 |
+
'total_points': len(all_data),
|
206 |
+
'stations_count': len(successful_stations),
|
207 |
+
'successful_stations': successful_stations,
|
208 |
+
'time_range_minutes': BATCH_SIZE * 5,
|
209 |
+
'data_source': 'mixed' # 실제 + 시뮬레이션
|
210 |
+
}
|
211 |
+
}
|
212 |
+
|
213 |
+
print(f"🚀 Sending {len(all_data)} data points to HF Space...")
|
214 |
+
|
215 |
+
try:
|
216 |
+
response = requests.post(
|
217 |
+
f"{HF_SPACE_URL}/api/internal/collect_data",
|
218 |
+
headers={
|
219 |
+
'Authorization': f'Bearer {API_KEY}',
|
220 |
+
'Content-Type': 'application/json'
|
221 |
+
},
|
222 |
+
json=payload,
|
223 |
+
timeout=60
|
224 |
+
)
|
225 |
+
|
226 |
+
if response.status_code == 200:
|
227 |
+
result = response.json()
|
228 |
+
print("✅ Batch data sent successfully!")
|
229 |
+
print(f"📊 Records processed: {result.get('records_saved', 0)}")
|
230 |
+
print(f"🏪 Stations processed: {result.get('stations_processed', 0)}")
|
231 |
+
print(f"⏱️ Processing time: {result.get('processing_time_ms', 0)}ms")
|
232 |
+
|
233 |
+
# 성공 결과 저장
|
234 |
+
with open('batch_send_result.json', 'w') as f:
|
235 |
+
json.dump(result, f, indent=2)
|
236 |
+
|
237 |
+
else:
|
238 |
+
print(f"❌ Failed to send batch data: {response.status_code}")
|
239 |
+
print(f"Response: {response.text}")
|
240 |
+
sys.exit(1)
|
241 |
+
|
242 |
+
except Exception as e:
|
243 |
+
print(f"❌ Error sending batch data: {e}")
|
244 |
+
sys.exit(1)
|
245 |
+
|
246 |
+
if __name__ == '__main__':
|
247 |
+
print("📦 Starting batch data transmission...")
|
248 |
+
print(f"🕐 Current time: {datetime.now().isoformat()}")
|
249 |
+
|
250 |
+
send_batch_data()
|
251 |
+
|
252 |
+
print("🎉 Batch data transmission completed!")
|
.github/scripts/system_recovery.py
ADDED
@@ -0,0 +1,359 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
System Recovery - 시스템 복구 및 정상화
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
import requests
|
9 |
+
import json
|
10 |
+
import time
|
11 |
+
from datetime import datetime
|
12 |
+
|
13 |
+
HF_SPACE_URL = os.environ.get('HF_SPACE_URL')
|
14 |
+
API_KEY = os.environ.get('INTERNAL_API_KEY')
|
15 |
+
|
16 |
+
def check_system_status():
|
17 |
+
"""현재 시스템 상태 확인"""
|
18 |
+
|
19 |
+
print("🔍 Checking current system status...")
|
20 |
+
|
21 |
+
status_checks = {
|
22 |
+
'basic_health': '/api/health',
|
23 |
+
'demo_status': '/api/internal/demo_status',
|
24 |
+
'data_freshness': '/api/internal/data_freshness',
|
25 |
+
'performance': '/api/performance/realtime'
|
26 |
+
}
|
27 |
+
|
28 |
+
results = {}
|
29 |
+
|
30 |
+
for check_name, endpoint in status_checks.items():
|
31 |
+
try:
|
32 |
+
headers = {'Authorization': f'Bearer {API_KEY}'} if 'internal' in endpoint or 'performance' in endpoint else {}
|
33 |
+
|
34 |
+
response = requests.get(
|
35 |
+
f"{HF_SPACE_URL}{endpoint}",
|
36 |
+
headers=headers,
|
37 |
+
timeout=10
|
38 |
+
)
|
39 |
+
|
40 |
+
if response.status_code == 200:
|
41 |
+
results[check_name] = {
|
42 |
+
'status': 'ok',
|
43 |
+
'data': response.json()
|
44 |
+
}
|
45 |
+
print(f" ✅ {check_name}: OK")
|
46 |
+
else:
|
47 |
+
results[check_name] = {
|
48 |
+
'status': 'error',
|
49 |
+
'code': response.status_code,
|
50 |
+
'data': response.text
|
51 |
+
}
|
52 |
+
print(f" ❌ {check_name}: ERROR ({response.status_code})")
|
53 |
+
|
54 |
+
except Exception as e:
|
55 |
+
results[check_name] = {
|
56 |
+
'status': 'exception',
|
57 |
+
'error': str(e)
|
58 |
+
}
|
59 |
+
print(f" ⚠️ {check_name}: EXCEPTION ({e})")
|
60 |
+
|
61 |
+
return results
|
62 |
+
|
63 |
+
def clear_active_issues():
|
64 |
+
"""활성 문제 상황 모두 해제"""
|
65 |
+
|
66 |
+
print("🔧 Clearing all active issues...")
|
67 |
+
|
68 |
+
payload = {
|
69 |
+
'action': 'clear_all_issues',
|
70 |
+
'timestamp': datetime.now().isoformat(),
|
71 |
+
'forced': True
|
72 |
+
}
|
73 |
+
|
74 |
+
try:
|
75 |
+
response = requests.post(
|
76 |
+
f"{HF_SPACE_URL}/api/internal/system_recovery",
|
77 |
+
headers={
|
78 |
+
'Authorization': f'Bearer {API_KEY}',
|
79 |
+
'Content-Type': 'application/json'
|
80 |
+
},
|
81 |
+
json=payload,
|
82 |
+
timeout=30
|
83 |
+
)
|
84 |
+
|
85 |
+
if response.status_code == 200:
|
86 |
+
result = response.json()
|
87 |
+
cleared_issues = result.get('cleared_issues', [])
|
88 |
+
|
89 |
+
print(f"✅ Cleared {len(cleared_issues)} active issues")
|
90 |
+
for issue in cleared_issues:
|
91 |
+
print(f" 🔄 {issue.get('type', 'unknown')}: {issue.get('description', 'N/A')}")
|
92 |
+
|
93 |
+
return True
|
94 |
+
else:
|
95 |
+
print(f"❌ Failed to clear issues: {response.status_code}")
|
96 |
+
return False
|
97 |
+
|
98 |
+
except Exception as e:
|
99 |
+
print(f"❌ Error clearing issues: {e}")
|
100 |
+
return False
|
101 |
+
|
102 |
+
def reset_simulation_state():
|
103 |
+
"""시뮬레이션 상태 리셋"""
|
104 |
+
|
105 |
+
print("🔄 Resetting simulation state...")
|
106 |
+
|
107 |
+
payload = {
|
108 |
+
'action': 'reset_simulation',
|
109 |
+
'timestamp': datetime.now().isoformat(),
|
110 |
+
'preserve_data': True, # 기존 데이터는 보존
|
111 |
+
'reset_counters': True,
|
112 |
+
'clear_cache': True
|
113 |
+
}
|
114 |
+
|
115 |
+
try:
|
116 |
+
response = requests.post(
|
117 |
+
f"{HF_SPACE_URL}/api/internal/system_recovery",
|
118 |
+
headers={
|
119 |
+
'Authorization': f'Bearer {API_KEY}',
|
120 |
+
'Content-Type': 'application/json'
|
121 |
+
},
|
122 |
+
json=payload,
|
123 |
+
timeout=30
|
124 |
+
)
|
125 |
+
|
126 |
+
if response.status_code == 200:
|
127 |
+
result = response.json()
|
128 |
+
print("✅ Simulation state reset successfully")
|
129 |
+
print(f" 📊 Data points preserved: {result.get('preserved_records', 0)}")
|
130 |
+
print(f" 🧹 Cache cleared: {result.get('cache_cleared', False)}")
|
131 |
+
print(f" 🔢 Counters reset: {result.get('counters_reset', False)}")
|
132 |
+
return True
|
133 |
+
else:
|
134 |
+
print(f"❌ Failed to reset simulation: {response.status_code}")
|
135 |
+
return False
|
136 |
+
|
137 |
+
except Exception as e:
|
138 |
+
print(f"❌ Error resetting simulation: {e}")
|
139 |
+
return False
|
140 |
+
|
141 |
+
def restart_data_collection():
|
142 |
+
"""데이터 수집 재시작"""
|
143 |
+
|
144 |
+
print("📡 Restarting data collection services...")
|
145 |
+
|
146 |
+
# 먼저 수집 중지
|
147 |
+
stop_payload = {
|
148 |
+
'action': 'stop_collection',
|
149 |
+
'timestamp': datetime.now().isoformat()
|
150 |
+
}
|
151 |
+
|
152 |
+
try:
|
153 |
+
response = requests.post(
|
154 |
+
f"{HF_SPACE_URL}/api/internal/collection_control",
|
155 |
+
headers={
|
156 |
+
'Authorization': f'Bearer {API_KEY}',
|
157 |
+
'Content-Type': 'application/json'
|
158 |
+
},
|
159 |
+
json=stop_payload,
|
160 |
+
timeout=15
|
161 |
+
)
|
162 |
+
|
163 |
+
if response.status_code == 200:
|
164 |
+
print(" 🛑 Data collection stopped")
|
165 |
+
else:
|
166 |
+
print(f" ⚠️ Stop collection warning: {response.status_code}")
|
167 |
+
|
168 |
+
except Exception as e:
|
169 |
+
print(f" ⚠️ Stop collection error: {e}")
|
170 |
+
|
171 |
+
# 잠시 대기
|
172 |
+
time.sleep(2)
|
173 |
+
|
174 |
+
# 수집 재시작
|
175 |
+
start_payload = {
|
176 |
+
'action': 'start_collection',
|
177 |
+
'timestamp': datetime.now().isoformat(),
|
178 |
+
'stations': ['DT_0001', 'DT_0002', 'DT_0003', 'DT_0008', 'DT_0017'],
|
179 |
+
'fresh_start': True
|
180 |
+
}
|
181 |
+
|
182 |
+
try:
|
183 |
+
response = requests.post(
|
184 |
+
f"{HF_SPACE_URL}/api/internal/collection_control",
|
185 |
+
headers={
|
186 |
+
'Authorization': f'Bearer {API_KEY}',
|
187 |
+
'Content-Type': 'application/json'
|
188 |
+
},
|
189 |
+
json=start_payload,
|
190 |
+
timeout=15
|
191 |
+
)
|
192 |
+
|
193 |
+
if response.status_code == 200:
|
194 |
+
result = response.json()
|
195 |
+
print(" ✅ Data collection restarted")
|
196 |
+
print(f" 🏪 Active stations: {len(result.get('active_stations', []))}")
|
197 |
+
return True
|
198 |
+
else:
|
199 |
+
print(f" ❌ Failed to restart collection: {response.status_code}")
|
200 |
+
return False
|
201 |
+
|
202 |
+
except Exception as e:
|
203 |
+
print(f" ❌ Error restarting collection: {e}")
|
204 |
+
return False
|
205 |
+
|
206 |
+
def verify_recovery():
|
207 |
+
"""복구 상태 검증"""
|
208 |
+
|
209 |
+
print("🔍 Verifying system recovery...")
|
210 |
+
|
211 |
+
# 5초 대기 후 상태 재확인
|
212 |
+
time.sleep(5)
|
213 |
+
|
214 |
+
status_results = check_system_status()
|
215 |
+
|
216 |
+
# 기본 상태 확인
|
217 |
+
health_ok = status_results.get('basic_health', {}).get('status') == 'ok'
|
218 |
+
demo_ok = status_results.get('demo_status', {}).get('status') == 'ok'
|
219 |
+
|
220 |
+
if health_ok and demo_ok:
|
221 |
+
print("✅ System recovery verification PASSED")
|
222 |
+
|
223 |
+
# 상세 상태 출력
|
224 |
+
if 'performance' in status_results and status_results['performance']['status'] == 'ok':
|
225 |
+
perf_data = status_results['performance']['data']
|
226 |
+
rmse = perf_data.get('rmse', 'N/A')
|
227 |
+
accuracy = perf_data.get('accuracy', 'N/A')
|
228 |
+
print(f" 📈 Current RMSE: {rmse}")
|
229 |
+
print(f" 🎯 Current accuracy: {accuracy}%")
|
230 |
+
|
231 |
+
if 'data_freshness' in status_results and status_results['data_freshness']['status'] == 'ok':
|
232 |
+
fresh_data = status_results['data_freshness']['data']
|
233 |
+
oldest_minutes = fresh_data.get('oldest_data_minutes', 'N/A')
|
234 |
+
print(f" ⏰ Data freshness: {oldest_minutes} minutes")
|
235 |
+
|
236 |
+
return True
|
237 |
+
else:
|
238 |
+
print("❌ System recovery verification FAILED")
|
239 |
+
|
240 |
+
if not health_ok:
|
241 |
+
print(" 🚨 Basic health check still failing")
|
242 |
+
if not demo_ok:
|
243 |
+
print(" 🚨 Demo status still problematic")
|
244 |
+
|
245 |
+
return False
|
246 |
+
|
247 |
+
def full_system_recovery():
|
248 |
+
"""전체 시스템 복구 프로세스"""
|
249 |
+
|
250 |
+
print("🚀 Starting full system recovery process...")
|
251 |
+
print(f"🕐 Recovery started at: {datetime.now().isoformat()}")
|
252 |
+
|
253 |
+
recovery_steps = [
|
254 |
+
("Checking initial status", check_system_status),
|
255 |
+
("Clearing active issues", clear_active_issues),
|
256 |
+
("Resetting simulation state", reset_simulation_state),
|
257 |
+
("Restarting data collection", restart_data_collection),
|
258 |
+
("Verifying recovery", verify_recovery)
|
259 |
+
]
|
260 |
+
|
261 |
+
results = {}
|
262 |
+
|
263 |
+
for step_name, step_function in recovery_steps:
|
264 |
+
print(f"\n🔧 Step: {step_name}")
|
265 |
+
|
266 |
+
try:
|
267 |
+
result = step_function()
|
268 |
+
results[step_name] = {
|
269 |
+
'success': result if isinstance(result, bool) else True,
|
270 |
+
'timestamp': datetime.now().isoformat()
|
271 |
+
}
|
272 |
+
|
273 |
+
if isinstance(result, bool) and not result:
|
274 |
+
print(f"⚠️ Step '{step_name}' reported failure but continuing...")
|
275 |
+
|
276 |
+
except Exception as e:
|
277 |
+
print(f"❌ Step '{step_name}' failed with exception: {e}")
|
278 |
+
results[step_name] = {
|
279 |
+
'success': False,
|
280 |
+
'error': str(e),
|
281 |
+
'timestamp': datetime.now().isoformat()
|
282 |
+
}
|
283 |
+
|
284 |
+
# 최종 결과 요약
|
285 |
+
successful_steps = sum(1 for r in results.values() if r.get('success', False))
|
286 |
+
total_steps = len(recovery_steps)
|
287 |
+
|
288 |
+
print(f"\n📊 Recovery Summary:")
|
289 |
+
print(f" ✅ Successful steps: {successful_steps}/{total_steps}")
|
290 |
+
print(f" ⏱️ Total recovery time: ~{(datetime.now().hour * 60 + datetime.now().minute) % 60} seconds")
|
291 |
+
|
292 |
+
if successful_steps >= total_steps - 1: # 최소 4/5 성공하면 OK
|
293 |
+
print("🎉 System recovery COMPLETED successfully!")
|
294 |
+
recovery_success = True
|
295 |
+
else:
|
296 |
+
print("⚠️ System recovery PARTIALLY completed")
|
297 |
+
recovery_success = False
|
298 |
+
|
299 |
+
# 복구 결과 저장
|
300 |
+
recovery_report = {
|
301 |
+
'recovery_timestamp': datetime.now().isoformat(),
|
302 |
+
'success': recovery_success,
|
303 |
+
'steps': results,
|
304 |
+
'summary': {
|
305 |
+
'successful_steps': successful_steps,
|
306 |
+
'total_steps': total_steps,
|
307 |
+
'success_rate': successful_steps / total_steps
|
308 |
+
}
|
309 |
+
}
|
310 |
+
|
311 |
+
with open(f'recovery_report_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json', 'w') as f:
|
312 |
+
json.dump(recovery_report, f, indent=2)
|
313 |
+
|
314 |
+
return recovery_success
|
315 |
+
|
316 |
+
if __name__ == '__main__':
|
317 |
+
if not HF_SPACE_URL or not API_KEY:
|
318 |
+
print("❌ Missing environment variables:")
|
319 |
+
print(" HF_SPACE_URL:", "✓" if HF_SPACE_URL else "✗")
|
320 |
+
print(" INTERNAL_API_KEY:", "✓" if API_KEY else "✗")
|
321 |
+
sys.exit(1)
|
322 |
+
|
323 |
+
# 인자 처리
|
324 |
+
if len(sys.argv) > 1:
|
325 |
+
action = sys.argv[1]
|
326 |
+
|
327 |
+
if action == 'status':
|
328 |
+
print("🔍 System status check only...")
|
329 |
+
status = check_system_status()
|
330 |
+
sys.exit(0)
|
331 |
+
elif action == 'clear':
|
332 |
+
print("🔧 Clearing issues only...")
|
333 |
+
clear_active_issues()
|
334 |
+
sys.exit(0)
|
335 |
+
elif action == 'reset':
|
336 |
+
print("🔄 Reset simulation only...")
|
337 |
+
reset_simulation_state()
|
338 |
+
sys.exit(0)
|
339 |
+
elif action == 'restart':
|
340 |
+
print("📡 Restart collection only...")
|
341 |
+
restart_data_collection()
|
342 |
+
sys.exit(0)
|
343 |
+
elif action != 'full':
|
344 |
+
print("Usage: python system_recovery.py [status|clear|reset|restart|full]")
|
345 |
+
print(" status - Check system status only")
|
346 |
+
print(" clear - Clear active issues only")
|
347 |
+
print(" reset - Reset simulation state only")
|
348 |
+
print(" restart - Restart data collection only")
|
349 |
+
print(" full - Full recovery process (default)")
|
350 |
+
sys.exit(1)
|
351 |
+
|
352 |
+
# 전체 복구 실행
|
353 |
+
success = full_system_recovery()
|
354 |
+
|
355 |
+
if not success:
|
356 |
+
print("\n⚠️ Recovery was not fully successful. Manual intervention may be required.")
|
357 |
+
sys.exit(1)
|
358 |
+
else:
|
359 |
+
print("\n🎉 System is ready for demonstration!")
|
.github/scripts/trigger_issue.py
ADDED
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
+
Issue Trigger - 문제 상황 발생 시뮬레이션
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
import requests
|
9 |
+
import json
|
10 |
+
import random
|
11 |
+
from datetime import datetime, timedelta
|
12 |
+
|
13 |
+
HF_SPACE_URL = os.environ.get('HF_SPACE_URL')
|
14 |
+
API_KEY = os.environ.get('INTERNAL_API_KEY')
|
15 |
+
|
16 |
+
def get_issue_config(issue_type, station_id=None):
|
17 |
+
"""문제 유형별 설정 생성"""
|
18 |
+
|
19 |
+
configs = {
|
20 |
+
'network_failure': {
|
21 |
+
'description': 'Network connectivity issues',
|
22 |
+
'duration_seconds': random.randint(30, 120),
|
23 |
+
'failure_rate': random.uniform(0.7, 0.9),
|
24 |
+
'affected_endpoints': ['/api/internal/collect_data', '/api/internal/update_predictions'],
|
25 |
+
'recovery_time': random.randint(15, 45),
|
26 |
+
'symptoms': ['timeout errors', 'connection refused', 'partial data loss']
|
27 |
+
},
|
28 |
+
|
29 |
+
'extreme_weather': {
|
30 |
+
'description': 'Extreme weather conditions (typhoon simulation)',
|
31 |
+
'magnitude': random.choice(['moderate', 'high', 'severe']),
|
32 |
+
'duration_minutes': random.randint(60, 180),
|
33 |
+
'affected_parameters': {
|
34 |
+
'tide_level': {'offset': random.randint(150, 400), 'noise_factor': 3.0},
|
35 |
+
'wind_speed': {'multiplier': random.uniform(2.5, 5.0)},
|
36 |
+
'air_pres': {'offset': random.randint(-30, -15)},
|
37 |
+
'wind_dir': {'chaos_factor': 0.8} # 풍향 불안정
|
38 |
+
},
|
39 |
+
'warning_threshold': 750.0, # cm
|
40 |
+
'danger_threshold': 900.0
|
41 |
+
},
|
42 |
+
|
43 |
+
'sensor_malfunction': {
|
44 |
+
'description': f'Sensor malfunction at station {station_id or "random"}',
|
45 |
+
'target_station': station_id or random.choice(['DT_0001', 'DT_0002', 'DT_0003']),
|
46 |
+
'malfunction_type': random.choice(['data_loss', 'frozen_values', 'random_noise', 'calibration_drift']),
|
47 |
+
'affected_sensors': random.sample(['tide_level', 'air_temp', 'air_pres', 'wind_speed'],
|
48 |
+
random.randint(1, 3)),
|
49 |
+
'duration_minutes': random.randint(30, 90),
|
50 |
+
'recovery_probability': 0.8
|
51 |
+
},
|
52 |
+
|
53 |
+
'data_corruption': {
|
54 |
+
'description': 'Data transmission corruption',
|
55 |
+
'corruption_rate': random.uniform(0.1, 0.4),
|
56 |
+
'corruption_types': ['missing_fields', 'invalid_values', 'timestamp_errors', 'encoding_issues'],
|
57 |
+
'affected_stations': random.sample(['DT_0001', 'DT_0002', 'DT_0003', 'DT_0008', 'DT_0017'],
|
58 |
+
random.randint(2, 4)),
|
59 |
+
'duration_minutes': random.randint(15, 60)
|
60 |
+
}
|
61 |
+
}
|
62 |
+
|
63 |
+
return configs.get(issue_type, {})
|
64 |
+
|
65 |
+
def trigger_issue(issue_type, station_id=None):
|
66 |
+
"""문제 상황 발생"""
|
67 |
+
|
68 |
+
if not HF_SPACE_URL or not API_KEY:
|
69 |
+
print("❌ Missing environment variables")
|
70 |
+
sys.exit(1)
|
71 |
+
|
72 |
+
print(f"⚠️ Triggering issue: {issue_type}")
|
73 |
+
if station_id:
|
74 |
+
print(f"🎯 Target station: {station_id}")
|
75 |
+
|
76 |
+
# 문제 설정 생성
|
77 |
+
issue_config = get_issue_config(issue_type, station_id)
|
78 |
+
|
79 |
+
if not issue_config:
|
80 |
+
print(f"❌ Unknown issue type: {issue_type}")
|
81 |
+
print("Available types: network_failure, extreme_weather, sensor_malfunction, data_corruption")
|
82 |
+
sys.exit(1)
|
83 |
+
|
84 |
+
# API 페이로드 구성
|
85 |
+
payload = {
|
86 |
+
'action': 'trigger_issue',
|
87 |
+
'issue_type': issue_type,
|
88 |
+
'station_id': station_id,
|
89 |
+
'timestamp': datetime.now().isoformat(),
|
90 |
+
'severity': 'demo',
|
91 |
+
'config': issue_config,
|
92 |
+
'auto_recovery': True, # 자동 복구 활성화
|
93 |
+
'notification': True # 알림 활성화
|
94 |
+
}
|
95 |
+
|
96 |
+
# 문제 유형별 특별 처리
|
97 |
+
if issue_type == 'network_failure':
|
98 |
+
print(f"🌐 Simulating network failure for {issue_config['duration_seconds']} seconds")
|
99 |
+
print(f"📊 Failure rate: {issue_config['failure_rate']:.1%}")
|
100 |
+
|
101 |
+
elif issue_type == 'extreme_weather':
|
102 |
+
magnitude = issue_config['magnitude']
|
103 |
+
duration = issue_config['duration_minutes']
|
104 |
+
print(f"🌪️ Simulating {magnitude} extreme weather for {duration} minutes")
|
105 |
+
print(f"🌊 Expected tide surge: +{issue_config['affected_parameters']['tide_level']['offset']} cm")
|
106 |
+
|
107 |
+
elif issue_type == 'sensor_malfunction':
|
108 |
+
target = issue_config['target_station']
|
109 |
+
malfunction = issue_config['malfunction_type']
|
110 |
+
sensors = issue_config['affected_sensors']
|
111 |
+
print(f"🔧 Simulating {malfunction} at {target}")
|
112 |
+
print(f"📡 Affected sensors: {', '.join(sensors)}")
|
113 |
+
|
114 |
+
elif issue_type == 'data_corruption':
|
115 |
+
rate = issue_config['corruption_rate']
|
116 |
+
stations = issue_config['affected_stations']
|
117 |
+
print(f"🔀 Simulating data corruption ({rate:.1%} rate)")
|
118 |
+
print(f"🏪 Affected stations: {', '.join(stations)}")
|
119 |
+
|
120 |
+
try:
|
121 |
+
print("🚀 Sending issue trigger to HF Space...")
|
122 |
+
|
123 |
+
response = requests.post(
|
124 |
+
f"{HF_SPACE_URL}/api/internal/trigger_demo",
|
125 |
+
headers={
|
126 |
+
'Authorization': f'Bearer {API_KEY}',
|
127 |
+
'Content-Type': 'application/json'
|
128 |
+
},
|
129 |
+
json=payload,
|
130 |
+
timeout=30
|
131 |
+
)
|
132 |
+
|
133 |
+
if response.status_code == 200:
|
134 |
+
result = response.json()
|
135 |
+
print("✅ Issue triggered successfully!")
|
136 |
+
|
137 |
+
print("\n📊 Issue Details:")
|
138 |
+
print(f" 🎯 Target: {result.get('target', 'system-wide')}")
|
139 |
+
print(f" ⏱️ Duration: {result.get('expected_duration', 'variable')}")
|
140 |
+
print(f" 📈 Expected impact: {result.get('expected_impact', 'monitoring required')}")
|
141 |
+
print(f" 🔄 Auto recovery: {result.get('auto_recovery', 'enabled')}")
|
142 |
+
|
143 |
+
if 'issue_id' in result:
|
144 |
+
print(f" 🆔 Issue ID: {result['issue_id']}")
|
145 |
+
|
146 |
+
# 모니터링 권장사항
|
147 |
+
print("\n🔍 Monitoring recommendations:")
|
148 |
+
if issue_type == 'network_failure':
|
149 |
+
print(" - Watch for API timeout errors")
|
150 |
+
print(" - Monitor data collection success rate")
|
151 |
+
print(" - Check system health dashboard")
|
152 |
+
|
153 |
+
elif issue_type == 'extreme_weather':
|
154 |
+
print(" - Monitor tide level predictions")
|
155 |
+
print(" - Watch for alert threshold breaches")
|
156 |
+
print(" - Check RMSE increase")
|
157 |
+
|
158 |
+
elif issue_type == 'sensor_malfunction':
|
159 |
+
print(f" - Monitor {issue_config['target_station']} data quality")
|
160 |
+
print(" - Watch for missing data warnings")
|
161 |
+
print(" - Check interpolation accuracy")
|
162 |
+
|
163 |
+
elif issue_type == 'data_corruption':
|
164 |
+
print(" - Monitor data validation errors")
|
165 |
+
print(" - Check processing success rate")
|
166 |
+
print(" - Watch for data quality alerts")
|
167 |
+
|
168 |
+
# 결과 저장
|
169 |
+
with open(f'issue_trigger_{issue_type}_{datetime.now().strftime("%H%M%S")}.json', 'w') as f:
|
170 |
+
json.dump(result, f, indent=2)
|
171 |
+
|
172 |
+
else:
|
173 |
+
print(f"❌ Failed to trigger issue: {response.status_code}")
|
174 |
+
print(f"Response: {response.text}")
|
175 |
+
|
176 |
+
# 에러 상세 정보
|
177 |
+
try:
|
178 |
+
error_detail = response.json()
|
179 |
+
if 'detail' in error_detail:
|
180 |
+
print(f"Error detail: {error_detail['detail']}")
|
181 |
+
except:
|
182 |
+
pass
|
183 |
+
|
184 |
+
sys.exit(1)
|
185 |
+
|
186 |
+
except requests.exceptions.Timeout:
|
187 |
+
print("⏱️ Request timed out (this might be part of network failure simulation)")
|
188 |
+
if issue_type == 'network_failure':
|
189 |
+
print("✅ Network failure simulation appears to be working!")
|
190 |
+
else:
|
191 |
+
sys.exit(1)
|
192 |
+
|
193 |
+
except Exception as e:
|
194 |
+
print(f"❌ Error triggering issue: {e}")
|
195 |
+
sys.exit(1)
|
196 |
+
|
197 |
+
def list_available_issues():
|
198 |
+
"""사용 가능한 문제 유형 목록"""
|
199 |
+
|
200 |
+
issues = {
|
201 |
+
'network_failure': 'Network connectivity problems, API timeouts',
|
202 |
+
'extreme_weather': 'Typhoon/storm simulation with extreme tide levels',
|
203 |
+
'sensor_malfunction': 'Individual station sensor failures',
|
204 |
+
'data_corruption': 'Data transmission and validation errors'
|
205 |
+
}
|
206 |
+
|
207 |
+
print("🔧 Available issue types:")
|
208 |
+
for issue_type, description in issues.items():
|
209 |
+
print(f" {issue_type}: {description}")
|
210 |
+
|
211 |
+
if __name__ == '__main__':
|
212 |
+
if len(sys.argv) < 2:
|
213 |
+
print("Usage: python trigger_issue.py <issue_type> [station_id]")
|
214 |
+
print()
|
215 |
+
list_available_issues()
|
216 |
+
print()
|
217 |
+
print("Examples:")
|
218 |
+
print(" python trigger_issue.py network_failure")
|
219 |
+
print(" python trigger_issue.py extreme_weather")
|
220 |
+
print(" python trigger_issue.py sensor_malfunction DT_0001")
|
221 |
+
print(" python trigger_issue.py data_corruption")
|
222 |
+
sys.exit(1)
|
223 |
+
|
224 |
+
issue_type = sys.argv[1]
|
225 |
+
station_id = sys.argv[2] if len(sys.argv) > 2 else None
|
226 |
+
|
227 |
+
if issue_type == 'list':
|
228 |
+
list_available_issues()
|
229 |
+
sys.exit(0)
|
230 |
+
|
231 |
+
print(f"🎭 Starting issue simulation: {issue_type}")
|
232 |
+
print(f"🕐 Current time: {datetime.now().isoformat()}")
|
233 |
+
|
234 |
+
trigger_issue(issue_type, station_id)#!/usr/bin/env python3
|
235 |
+
"""
|
236 |
+
Issue Trigger - 문제 상황 발생 시뮬레이션
|
237 |
+
"""
|
238 |
+
|
239 |
+
import os
|
240 |
+
import sys
|
241 |
+
import requests
|
242 |
+
import json
|
243 |
+
import random
|
244 |
+
from datetime import datetime, timedelta
|
245 |
+
|
246 |
+
HF_SPACE_URL = os.environ.get('HF_SPACE_URL')
|
247 |
+
API_KEY = os.environ.get('INTERNAL_API_KEY')
|
248 |
+
|
249 |
+
def get_issue_config(issue_type, station_id=None):
|
250 |
+
"""문제 유형별 설정 생성"""
|
251 |
+
|
252 |
+
configs = {
|
253 |
+
'network_failure': {
|
254 |
+
'description': 'Network connectivity issues',
|
255 |
+
'duration_seconds': random.randint(30, 120),
|
256 |
+
'failure_rate': random.uniform(0.7, 0.9),
|
257 |
+
'affected_endpoints': ['/api/internal/collect_data', '/api/internal/update_predictions'],
|
258 |
+
'recovery_time': random.randint(15, 45),
|
259 |
+
'symptoms': ['timeout errors', 'connection refused', 'partial data loss']
|
260 |
+
},
|
261 |
+
|
262 |
+
'extreme_weather': {
|
263 |
+
'description': 'Extreme weather conditions (typhoon simulation)',
|
264 |
+
'magnitude': random.choice(['moderate', 'high', 'severe']),
|
265 |
+
'duration_minutes': random.randint(60, 180),
|
266 |
+
'affected_parameters': {
|
267 |
+
'tide_level': {'offset': random.randint(150, 400), 'noise_factor': 3.0},
|
268 |
+
'wind_speed': {'multiplier': random.uniform(2.5, 5.0)},
|
269 |
+
'air_pres': {'offset': random.randint(-30, -15)},
|
270 |
+
'wind_dir': {'chaos_factor': 0.8} # 풍향 불안정
|
271 |
+
},
|
272 |
+
'warning_threshold': 750.0, # cm
|
273 |
+
'danger_threshold': 900.0
|
274 |
+
},
|
275 |
+
|
276 |
+
'sensor_malfunction': {
|
277 |
+
'description': f'Sensor malfunction at station {station_id or "random"}',
|
278 |
+
'target_station': station_id or random.choice(['DT_0001', 'DT_0002', 'DT_0003']),
|
279 |
+
'malfunction_type': random.choice(['data_loss', 'frozen_values', 'random_noise', 'calibration_drift']),
|
280 |
+
'affected_sensors': random.sample(['tide_level', 'air_temp', 'air_pres', 'wind_speed'],
|
281 |
+
random.randint(1, 3)),
|
282 |
+
'duration_minutes': random.randint(30, 90),
|
283 |
+
'recovery_probability': 0.8
|
284 |
+
},
|
285 |
+
|
286 |
+
'data_corruption': {
|
287 |
+
'description': 'Data transmission corruption',
|
288 |
+
'corruption_rate': random.uniform(0.1, 0.4),
|
289 |
+
'corruption_types': ['missing_fields', 'invalid_values', 'timestamp_errors', 'encoding_issues'],
|
290 |
+
'affected_stations': random.sample(['DT_0001', 'DT_0002', 'DT_0003', 'DT_0008', 'DT_0017'],
|
291 |
+
random.randint(2, 4)),
|
292 |
+
'duration_minutes': random.randint(15, 60)
|
293 |
+
}
|
294 |
+
}
|
295 |
+
|
296 |
+
return configs.get(issue_type, {})
|
297 |
+
|
298 |
+
def trigger_issue(issue_type, station_id=None):
|
299 |
+
"""문제 상황 발생"""
|
300 |
+
|
301 |
+
if not HF_SPACE_URL or not API_KEY:
|
302 |
+
print("❌ Missing environment variables")
|
303 |
+
sys.exit(1)
|
304 |
+
|
305 |
+
print(f"⚠️ Triggering issue: {issue_type}")
|
306 |
+
if station_id:
|
307 |
+
print(f"🎯 Target station: {station_id}")
|
308 |
+
|
309 |
+
# 문제 설정 생성
|
310 |
+
issue_config = get_issue_config(issue_type, station_id)
|
311 |
+
|
312 |
+
if not issue_config:
|
313 |
+
print(f"❌ Unknown issue type: {issue_type}")
|
314 |
+
print("Available types: network_failure, extreme_weather, sensor_malfunction, data_corruption")
|
315 |
+
sys.exit(1)
|
316 |
+
|
317 |
+
# API 페이로드 구성
|
318 |
+
payload = {
|
319 |
+
'action': 'trigger_issue',
|
320 |
+
'issue_type': issue_type,
|
321 |
+
'station_id': station_id,
|
322 |
+
'timestamp': datetime.now().isoformat(),
|
323 |
+
'severity': 'demo',
|
324 |
+
'config': issue_config,
|
325 |
+
'auto_recovery': True, # 자동 복구 활성화
|
326 |
+
'notification': True # 알림 활성화
|
327 |
+
}
|
328 |
+
|
329 |
+
# 문제 유형별 특별 처리
|
330 |
+
if issue_type == 'network_failure':
|
331 |
+
print(f"🌐 Simulating network failure for {issue_config['duration_seconds']} seconds")
|
332 |
+
print(f"📊 Failure rate: {issue_config['failure_rate']:.1%}")
|
333 |
+
|
334 |
+
elif issue_type == 'extreme_weather':
|
335 |
+
magnitude = issue_config['magnitude']
|
336 |
+
duration = issue_config['duration_minutes']
|
337 |
+
print(f"🌪️ Simulating {magnitude} extreme weather for {duration} minutes")
|
338 |
+
print(f"🌊 Expected tide surge: +{issue_config['affected_parameters']['tide_level']['offset']} cm")
|
339 |
+
|
340 |
+
elif issue_type == 'sensor_malfunction':
|
341 |
+
target = issue_config['target_station']
|
342 |
+
malfunction = issue_config['malfunction_type']
|
343 |
+
sensors = issue_config['affected_sensors']
|
344 |
+
print(f"🔧 Simulating {malfunction} at {target}")
|
345 |
+
print(f"📡 Affected sensors: {', '.join(sensors)}")
|
346 |
+
|
347 |
+
elif issue_type == 'data_corruption':
|
348 |
+
rate = issue_config['corruption_rate']
|
349 |
+
stations = issue_config['affected_stations']
|
350 |
+
print(f"🔀 Simulating data corruption ({rate:.1%} rate)")
|
351 |
+
print(f"🏪 Affected stations: {', '.join(stations)}")
|
352 |
+
|
353 |
+
try:
|
354 |
+
print("🚀 Sending issue trigger to HF Space...")
|
355 |
+
|
356 |
+
response = requests.post(
|
357 |
+
f"{HF_SPACE_URL}/api/internal/trigger_demo",
|
358 |
+
headers={
|
359 |
+
'Authorization': f'Bearer {API_KEY}',
|
360 |
+
'Content-Type': 'application/json'
|
361 |
+
},
|
362 |
+
json=payload,
|
363 |
+
timeout=30
|
364 |
+
)
|
365 |
+
|
366 |
+
if response.status_code == 200:
|
367 |
+
result = response.json()
|
368 |
+
print("✅ Issue triggered successfully!")
|
369 |
+
|
370 |
+
print("\n📊 Issue Details:")
|
371 |
+
print(f" 🎯 Target: {result.get('target', 'system-wide')}")
|
372 |
+
print(f" ⏱️ Duration: {result.get('expected_duration', 'variable')}")
|
373 |
+
print(f" 📈 Expected impact: {result.get('expected_impact', 'monitoring required')}")
|
374 |
+
print(f" 🔄 Auto recovery: {result.get('auto_recovery', 'enabled')}")
|
375 |
+
|
376 |
+
if 'issue_id' in result:
|
377 |
+
print(f" 🆔 Issue ID: {result['issue_id']}")
|
378 |
+
|
379 |
+
# 모니터링 권장사항
|
380 |
+
print("\n🔍 Monitoring recommendations:")
|
381 |
+
if issue_type == 'network_failure':
|
382 |
+
print(" - Watch for API timeout errors")
|
383 |
+
print(" - Monitor data collection success rate")
|
384 |
+
print(" - Check system health dashboard")
|
385 |
+
|
386 |
+
elif issue_type == 'extreme_weather':
|
387 |
+
print(" - Monitor tide level predictions")
|
388 |
+
print(" - Watch for alert threshold breaches")
|
389 |
+
print(" - Check RMSE increase")
|
390 |
+
|
391 |
+
elif issue_type == 'sensor_malfunction':
|
392 |
+
print(f" - Monitor {issue_config['target_station']} data quality")
|
393 |
+
print(" - Watch for missing data warnings")
|
394 |
+
print(" - Check interpolation accuracy")
|
395 |
+
|
396 |
+
elif issue_type == 'data_corruption':
|
397 |
+
print(" - Monitor data validation errors")
|
398 |
+
print(" - Check processing success rate")
|
399 |
+
print(" - Watch for data quality alerts")
|
400 |
+
|
401 |
+
# 결과 저장
|
402 |
+
with open(f'issue_trigger_{issue_type}_{datetime.now().strftime("%H%M%S")}.json', 'w') as f:
|
403 |
+
json.dump(result, f, indent=2)
|
404 |
+
|
405 |
+
else:
|
406 |
+
print(f"❌ Failed to trigger issue: {response.status_code}")
|
407 |
+
print(f"Response: {response.text}")
|
408 |
+
|
409 |
+
# 에러 상세 정보
|
410 |
+
try:
|
411 |
+
error_detail = response.json()
|
412 |
+
if 'detail' in error_detail:
|
413 |
+
print(f"Error detail: {error_detail['detail']}")
|
414 |
+
except:
|
415 |
+
pass
|
416 |
+
|
417 |
+
sys.exit(1)
|
418 |
+
|
419 |
+
except requests.exceptions.Timeout:
|
420 |
+
print("⏱️ Request timed out (this might be part of network failure simulation)")
|
421 |
+
if issue_type == 'network_failure':
|
422 |
+
print("✅ Network failure simulation appears to be working!")
|
423 |
+
else:
|
424 |
+
sys.exit(1)
|
425 |
+
|
426 |
+
except Exception as e:
|
427 |
+
print(f"❌ Error triggering issue: {e}")
|
428 |
+
sys.exit(1)
|
429 |
+
|
430 |
+
def list_available_issues():
|
431 |
+
"""사용 가능한 문제 유형 목록"""
|
432 |
+
|
433 |
+
issues = {
|
434 |
+
'network_failure': 'Network connectivity problems, API timeouts',
|
435 |
+
'extreme_weather': 'Typhoon/storm simulation with extreme tide levels',
|
436 |
+
'sensor_malfunction': 'Individual station sensor failures',
|
437 |
+
'data_corruption': 'Data transmission and validation errors'
|
438 |
+
}
|
439 |
+
|
440 |
+
print("🔧 Available issue types:")
|
441 |
+
for issue_type, description in issues.items():
|
442 |
+
print(f" {issue_type}: {description}")
|
443 |
+
|
444 |
+
if __name__ == '__main__':
|
445 |
+
if len(sys.argv) < 2:
|
446 |
+
print("Usage: python trigger_issue.py <issue_type> [station_id]")
|
447 |
+
print()
|
448 |
+
list_available_issues()
|
449 |
+
print()
|
450 |
+
print("Examples:")
|
451 |
+
print(" python trigger_issue.py network_failure")
|
452 |
+
print(" python trigger_issue.py extreme_weather")
|
453 |
+
print(" python trigger_issue.py sensor_malfunction DT_0001")
|
454 |
+
print(" python trigger_issue.py data_corruption")
|
455 |
+
sys.exit(1)
|
456 |
+
|
457 |
+
issue_type = sys.argv[1]
|
458 |
+
station_id = sys.argv[2] if len(sys.argv) > 2 else None
|
459 |
+
|
460 |
+
if issue_type == 'list':
|
461 |
+
list_available_issues()
|
462 |
+
sys.exit(0)
|
463 |
+
|
464 |
+
print(f"🎭 Starting issue simulation: {issue_type}")
|
465 |
+
print(f"🕐 Current time: {datetime.now().isoformat()}")
|
466 |
+
|
467 |
+
trigger_issue(issue_type, station_id)
|
.github/workflows/tide_scheduler.yml
CHANGED
@@ -1,122 +1,113 @@
|
|
1 |
-
name: Tide Data
|
2 |
|
3 |
on:
|
4 |
-
|
5 |
-
|
6 |
-
- cron: '*/5 * * * *'
|
7 |
-
|
8 |
-
workflow_dispatch: # 수동 실행 옵션
|
9 |
inputs:
|
10 |
-
|
11 |
-
description: '
|
12 |
required: true
|
13 |
-
default: '
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
type: choice
|
15 |
options:
|
16 |
-
-
|
17 |
-
-
|
18 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
env:
|
21 |
HF_SPACE_URL: https://alwaysgood-my-tide-env.hf.space
|
22 |
|
23 |
jobs:
|
24 |
-
|
|
|
25 |
runs-on: ubuntu-latest
|
26 |
-
timeout-minutes:
|
27 |
|
28 |
steps:
|
29 |
-
- name:
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
|
|
|
|
34 |
|
35 |
-
|
36 |
-
if [ $((MINUTE % 10)) -eq 0 ]; then
|
37 |
-
echo "should_predict=true" >> $GITHUB_OUTPUT
|
38 |
-
else
|
39 |
-
echo "should_predict=false" >> $GITHUB_OUTPUT
|
40 |
-
fi
|
41 |
-
|
42 |
-
- name: Collect and Process Data (5분 간격)
|
43 |
run: |
|
44 |
-
|
45 |
-
-H "Authorization: Bearer ${{ secrets.INTERNAL_API_KEY }}" \
|
46 |
-
-H "Content-Type: application/json" \
|
47 |
-
-d '{
|
48 |
-
"task": "collect_and_process",
|
49 |
-
"timestamp": "'$(date -u +%Y-%m-%dT%H:%M:%S)'"
|
50 |
-
}' \
|
51 |
-
-w "\n%{http_code}" \
|
52 |
-
-s)
|
53 |
-
|
54 |
-
http_code=$(echo "$response" | tail -n1)
|
55 |
-
body=$(echo "$response" | head -n-1)
|
56 |
-
|
57 |
-
if [ "$http_code" != "200" ]; then
|
58 |
-
echo "❌ Data collection failed with status $http_code"
|
59 |
-
echo "Response: $body"
|
60 |
-
exit 1
|
61 |
-
fi
|
62 |
-
|
63 |
-
echo "✅ Data collection successful"
|
64 |
-
echo "$body" | jq '.'
|
65 |
|
66 |
-
- name:
|
67 |
-
if: steps.time_check.outputs.should_predict == 'true'
|
68 |
run: |
|
69 |
-
|
70 |
-
|
71 |
-
-H "Content-Type: application/json" \
|
72 |
-
-d '{
|
73 |
-
"task": "update_predictions",
|
74 |
-
"timestamp": "'$(date -u +%Y-%m-%dT%H:%M:%S)'"
|
75 |
-
}' \
|
76 |
-
-w "\n%{http_code}" \
|
77 |
-
-s)
|
78 |
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
-
|
83 |
-
echo "❌ Prediction update failed with status $http_code"
|
84 |
-
echo "Response: $body"
|
85 |
-
exit 1
|
86 |
-
fi
|
87 |
-
|
88 |
-
echo "✅ Predictions updated successfully"
|
89 |
-
echo "$body" | jq '.'
|
90 |
-
|
91 |
-
- name: Send notification on failure
|
92 |
-
if: failure()
|
93 |
-
run: |
|
94 |
-
# Slack, Discord, 또는 이메일 알림 전송
|
95 |
-
echo "Pipeline failed at $(date)"
|
96 |
-
# curl -X POST slack_webhook_url ...
|
97 |
-
|
98 |
-
health_check:
|
99 |
-
runs-on: ubuntu-latest
|
100 |
-
needs: collect_and_process
|
101 |
-
|
102 |
-
steps:
|
103 |
-
- name: Verify System Health
|
104 |
-
run: |
|
105 |
-
response=$(curl -s "${{ env.HF_SPACE_URL }}/api/health")
|
106 |
-
echo "$response" | jq '.'
|
107 |
-
|
108 |
-
status=$(echo "$response" | jq -r '.status')
|
109 |
-
if [ "$status" != "healthy" ]; then
|
110 |
-
echo "⚠️ System is not healthy: $status"
|
111 |
-
fi
|
112 |
-
|
113 |
-
- name: Check Data Freshness
|
114 |
run: |
|
115 |
-
|
116 |
-
|
|
|
|
|
117 |
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
|
|
|
1 |
+
name: Tide Data Interactive Demo
|
2 |
|
3 |
on:
|
4 |
+
# 자동 스케줄링 제거 - 완전 수동 제어
|
5 |
+
workflow_dispatch: # 수동 실행만 지원
|
|
|
|
|
|
|
6 |
inputs:
|
7 |
+
demo_action:
|
8 |
+
description: 'Demo action to perform'
|
9 |
required: true
|
10 |
+
default: 'start_demo'
|
11 |
+
type: choice
|
12 |
+
options:
|
13 |
+
- start_demo # 시연 시작
|
14 |
+
- stop_demo # 시연 중지
|
15 |
+
- send_data_batch # 데이터 배치 전송
|
16 |
+
- trigger_issue # 문제 상황 발생
|
17 |
+
- system_recovery # 시스템 복구
|
18 |
+
- health_check # 상태 체크
|
19 |
+
|
20 |
+
issue_type:
|
21 |
+
description: 'Type of issue to simulate'
|
22 |
+
required: false
|
23 |
+
default: 'network_failure'
|
24 |
type: choice
|
25 |
options:
|
26 |
+
- network_failure # 네트워크 장애
|
27 |
+
- extreme_weather # 극값 데이터
|
28 |
+
- sensor_malfunction # 센서 고장
|
29 |
+
- data_corruption # 데이터 손상
|
30 |
+
|
31 |
+
station_id:
|
32 |
+
description: 'Target station for issue (optional)'
|
33 |
+
required: false
|
34 |
+
default: 'DT_0001'
|
35 |
+
type: string
|
36 |
+
|
37 |
+
batch_size:
|
38 |
+
description: 'Number of data points to send'
|
39 |
+
required: false
|
40 |
+
default: '12' # 1시간분 (5분 간격)
|
41 |
+
type: string
|
42 |
|
43 |
env:
|
44 |
HF_SPACE_URL: https://alwaysgood-my-tide-env.hf.space
|
45 |
|
46 |
jobs:
|
47 |
+
# 시연 제어 작업
|
48 |
+
demo_controller:
|
49 |
runs-on: ubuntu-latest
|
50 |
+
timeout-minutes: 15
|
51 |
|
52 |
steps:
|
53 |
+
- name: Checkout repository
|
54 |
+
uses: actions/checkout@v4
|
55 |
+
|
56 |
+
- name: Setup Python
|
57 |
+
uses: actions/setup-python@v4
|
58 |
+
with:
|
59 |
+
python-version: '3.9'
|
60 |
|
61 |
+
- name: Install dependencies
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
run: |
|
63 |
+
pip install pandas numpy requests python-dateutil
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
- name: Execute demo action
|
|
|
66 |
run: |
|
67 |
+
ACTION="${{ github.event.inputs.demo_action }}"
|
68 |
+
echo "🎬 Executing demo action: $ACTION"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
+
case $ACTION in
|
71 |
+
"start_demo")
|
72 |
+
echo "🚀 Starting interactive demo..."
|
73 |
+
python3 .github/scripts/demo_controller.py start
|
74 |
+
;;
|
75 |
+
"stop_demo")
|
76 |
+
echo "🛑 Stopping demo..."
|
77 |
+
python3 .github/scripts/demo_controller.py stop
|
78 |
+
;;
|
79 |
+
"send_data_batch")
|
80 |
+
echo "📦 Sending data batch..."
|
81 |
+
python3 .github/scripts/send_data_batch.py
|
82 |
+
;;
|
83 |
+
"trigger_issue")
|
84 |
+
echo "⚠️ Triggering issue: ${{ github.event.inputs.issue_type }}"
|
85 |
+
python3 .github/scripts/trigger_issue.py "${{ github.event.inputs.issue_type }}" "${{ github.event.inputs.station_id }}"
|
86 |
+
;;
|
87 |
+
"system_recovery")
|
88 |
+
echo "🔧 Initiating system recovery..."
|
89 |
+
python3 .github/scripts/system_recovery.py
|
90 |
+
;;
|
91 |
+
"health_check")
|
92 |
+
echo "🏥 Performing health check..."
|
93 |
+
python3 .github/scripts/health_check.py
|
94 |
+
;;
|
95 |
+
*)
|
96 |
+
echo "❌ Unknown action: $ACTION"
|
97 |
+
exit 1
|
98 |
+
;;
|
99 |
+
esac
|
100 |
|
101 |
+
- name: Save execution results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
run: |
|
103 |
+
# 실행 결과를 아티팩트로 저장
|
104 |
+
mkdir -p demo_results
|
105 |
+
echo "$(date): ${{ github.event.inputs.demo_action }} completed" >> demo_results/execution_log.txt
|
106 |
+
echo "Parameters: issue_type=${{ github.event.inputs.issue_type }}, station=${{ github.event.inputs.station_id }}" >> demo_results/execution_log.txt
|
107 |
|
108 |
+
- name: Upload demo results
|
109 |
+
uses: actions/upload-artifact@v3
|
110 |
+
with:
|
111 |
+
name: demo-execution-results
|
112 |
+
path: demo_results/
|
113 |
+
retention-days: 7
|
__pycache__/api_docs.cpython-39.pyc
ADDED
Binary file (19.1 kB). View file
|
|
__pycache__/api_utils.cpython-39.pyc
ADDED
Binary file (13.8 kB). View file
|
|
__pycache__/app.cpython-39.pyc
ADDED
Binary file (2.75 kB). View file
|
|
__pycache__/chatbot.cpython-39.pyc
ADDED
Binary file (5.21 kB). View file
|
|
__pycache__/config.cpython-39.pyc
ADDED
Binary file (1.05 kB). View file
|
|
__pycache__/prediction.cpython-39.pyc
ADDED
Binary file (9.54 kB). View file
|
|
__pycache__/supabase_utils.cpython-39.pyc
ADDED
Binary file (3.25 kB). View file
|
|
__pycache__/ui.cpython-39.pyc
ADDED
Binary file (7.75 kB). View file
|
|
automation/__init__.py
DELETED
File without changes
|
automation/data_collector.py
DELETED
@@ -1,176 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
데이터 수집 모듈
|
3 |
-
공공 API에서 실시간 조위 데이터 수집
|
4 |
-
"""
|
5 |
-
|
6 |
-
import aiohttp
|
7 |
-
import asyncio
|
8 |
-
from datetime import datetime, timedelta
|
9 |
-
import pandas as pd
|
10 |
-
import numpy as np
|
11 |
-
from typing import Dict, List, Optional
|
12 |
-
import logging
|
13 |
-
from config import STATIONS, KHOA_API_KEY
|
14 |
-
|
15 |
-
logger = logging.getLogger(__name__)
|
16 |
-
|
17 |
-
class DataCollector:
|
18 |
-
"""실시간 조위 데이터 수집기"""
|
19 |
-
|
20 |
-
def __init__(self):
|
21 |
-
self.api_base_url = "http://www.khoa.go.kr/api/oceangrid/tideObsRecent/search.do"
|
22 |
-
self.api_key = KHOA_API_KEY # 환경변수로 관리
|
23 |
-
self.stations = STATIONS
|
24 |
-
|
25 |
-
async def collect_station_data(self, station_id: str) -> Dict:
|
26 |
-
"""단일 관측소 데이터 수집"""
|
27 |
-
|
28 |
-
params = {
|
29 |
-
"ServiceKey": self.api_key,
|
30 |
-
"ObsCode": station_id,
|
31 |
-
"ResultType": "json",
|
32 |
-
"DataType": "tideObs", # 실시간 관측 데이터
|
33 |
-
}
|
34 |
-
|
35 |
-
try:
|
36 |
-
async with aiohttp.ClientSession() as session:
|
37 |
-
async with session.get(self.api_base_url, params=params, timeout=30) as response:
|
38 |
-
if response.status == 200:
|
39 |
-
data = await response.json()
|
40 |
-
|
41 |
-
# 데이터 파싱
|
42 |
-
if data.get("result", {}).get("data"):
|
43 |
-
observations = data["result"]["data"]
|
44 |
-
|
45 |
-
# 최신 데이터만 추출
|
46 |
-
latest_obs = observations[0] if observations else None
|
47 |
-
|
48 |
-
if latest_obs:
|
49 |
-
return {
|
50 |
-
"station_id": station_id,
|
51 |
-
"observed_at": latest_obs.get("record_time"),
|
52 |
-
"tide_level": float(latest_obs.get("tide_level", 0)),
|
53 |
-
"air_temp": float(latest_obs.get("air_temp", 0)),
|
54 |
-
"water_temp": float(latest_obs.get("water_temp", 0)),
|
55 |
-
"air_pres": float(latest_obs.get("air_press", 1013)),
|
56 |
-
"wind_dir": float(latest_obs.get("wind_dir", 0)),
|
57 |
-
"wind_speed": float(latest_obs.get("wind_speed", 0)),
|
58 |
-
"status": "success"
|
59 |
-
}
|
60 |
-
|
61 |
-
logger.warning(f"Failed to collect data for {station_id}: Status {response.status}")
|
62 |
-
return {"station_id": station_id, "status": "failed", "error": f"HTTP {response.status}"}
|
63 |
-
|
64 |
-
except asyncio.TimeoutError:
|
65 |
-
logger.error(f"Timeout collecting data for {station_id}")
|
66 |
-
return {"station_id": station_id, "status": "timeout"}
|
67 |
-
except Exception as e:
|
68 |
-
logger.error(f"Error collecting data for {station_id}: {str(e)}")
|
69 |
-
return {"station_id": station_id, "status": "error", "error": str(e)}
|
70 |
-
|
71 |
-
async def collect_all_stations(self) -> List[Dict]:
|
72 |
-
"""모든 관측소 데이터 병렬 수집"""
|
73 |
-
|
74 |
-
tasks = [
|
75 |
-
self.collect_station_data(station_id)
|
76 |
-
for station_id in self.stations
|
77 |
-
]
|
78 |
-
|
79 |
-
results = await asyncio.gather(*tasks)
|
80 |
-
|
81 |
-
# 성공한 데이터만 필터링
|
82 |
-
valid_results = [r for r in results if r.get("status") == "success"]
|
83 |
-
|
84 |
-
logger.info(f"Collected data from {len(valid_results)}/{len(self.stations)} stations")
|
85 |
-
|
86 |
-
return valid_results
|
87 |
-
|
88 |
-
async def collect_with_retry(self, max_retries: int = 3) -> List[Dict]:
|
89 |
-
"""재시도 로직을 포함한 데이터 수집"""
|
90 |
-
|
91 |
-
all_data = []
|
92 |
-
failed_stations = list(self.stations)
|
93 |
-
|
94 |
-
for attempt in range(max_retries):
|
95 |
-
if not failed_stations:
|
96 |
-
break
|
97 |
-
|
98 |
-
logger.info(f"Collection attempt {attempt + 1}/{max_retries} for {len(failed_stations)} stations")
|
99 |
-
|
100 |
-
tasks = [
|
101 |
-
self.collect_station_data(station_id)
|
102 |
-
for station_id in failed_stations
|
103 |
-
]
|
104 |
-
|
105 |
-
results = await asyncio.gather(*tasks)
|
106 |
-
|
107 |
-
# 성공/실패 분류
|
108 |
-
newly_succeeded = []
|
109 |
-
still_failed = []
|
110 |
-
|
111 |
-
for result in results:
|
112 |
-
if result.get("status") == "success":
|
113 |
-
newly_succeeded.append(result)
|
114 |
-
else:
|
115 |
-
still_failed.append(result["station_id"])
|
116 |
-
|
117 |
-
all_data.extend(newly_succeeded)
|
118 |
-
failed_stations = still_failed
|
119 |
-
|
120 |
-
if failed_stations and attempt < max_retries - 1:
|
121 |
-
# 재시도 전 대기
|
122 |
-
await asyncio.sleep(2 ** attempt) # Exponential backoff
|
123 |
-
|
124 |
-
if failed_stations:
|
125 |
-
logger.warning(f"Failed to collect data from stations: {failed_stations}")
|
126 |
-
|
127 |
-
return all_data
|
128 |
-
|
129 |
-
def validate_data(self, data: Dict) -> bool:
|
130 |
-
"""데이터 유효성 검증"""
|
131 |
-
|
132 |
-
# 필수 필드 확인
|
133 |
-
required_fields = ["station_id", "observed_at", "tide_level"]
|
134 |
-
if not all(field in data for field in required_fields):
|
135 |
-
return False
|
136 |
-
|
137 |
-
# 범위 검증
|
138 |
-
tide_level = data.get("tide_level", 0)
|
139 |
-
if not -100 <= tide_level <= 1000: # cm 단위
|
140 |
-
logger.warning(f"Invalid tide level: {tide_level} for station {data['station_id']}")
|
141 |
-
return False
|
142 |
-
|
143 |
-
# 시간 검증 (너무 오래된 데이터 제외)
|
144 |
-
try:
|
145 |
-
obs_time = datetime.fromisoformat(data["observed_at"])
|
146 |
-
if (datetime.now() - obs_time).total_seconds() > 3600: # 1시간 이상 오래된 데이터
|
147 |
-
logger.warning(f"Stale data for station {data['station_id']}: {data['observed_at']}")
|
148 |
-
return False
|
149 |
-
except:
|
150 |
-
return False
|
151 |
-
|
152 |
-
return True
|
153 |
-
|
154 |
-
|
155 |
-
class MockDataCollector(DataCollector):
|
156 |
-
"""테스트용 모의 데이터 수집기"""
|
157 |
-
|
158 |
-
async def collect_station_data(self, station_id: str) -> Dict:
|
159 |
-
"""모의 데이터 생성"""
|
160 |
-
|
161 |
-
# 실제 API 대신 랜덤 데이터 생성
|
162 |
-
import random
|
163 |
-
|
164 |
-
base_tide = 300 + 200 * np.sin(datetime.now().timestamp() / 3600 * np.pi / 6)
|
165 |
-
|
166 |
-
return {
|
167 |
-
"station_id": station_id,
|
168 |
-
"observed_at": datetime.now().isoformat(),
|
169 |
-
"tide_level": base_tide + random.uniform(-10, 10),
|
170 |
-
"air_temp": 20 + random.uniform(-5, 5),
|
171 |
-
"water_temp": 18 + random.uniform(-3, 3),
|
172 |
-
"air_pres": 1013 + random.uniform(-10, 10),
|
173 |
-
"wind_dir": random.uniform(0, 360),
|
174 |
-
"wind_speed": random.uniform(0, 20),
|
175 |
-
"status": "success"
|
176 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
automation/data_processor.py
DELETED
@@ -1,279 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
데이터 처리 모듈
|
3 |
-
수집된 데이터의 결측치 처리, 리샘플링, 저장
|
4 |
-
"""
|
5 |
-
|
6 |
-
import pandas as pd
|
7 |
-
import numpy as np
|
8 |
-
from datetime import datetime, timedelta
|
9 |
-
from scipy import interpolate
|
10 |
-
from typing import List, Dict, Optional
|
11 |
-
import logging
|
12 |
-
from supabase_utils import get_supabase_client
|
13 |
-
from config import DATA_COLLECTION_CONFIG
|
14 |
-
|
15 |
-
logger = logging.getLogger(__name__)
|
16 |
-
|
17 |
-
class DataProcessor:
|
18 |
-
"""데이터 처리 및 저장 클래스"""
|
19 |
-
|
20 |
-
def __init__(self):
|
21 |
-
self.client = get_supabase_client()
|
22 |
-
self.resample_interval = DATA_COLLECTION_CONFIG["resample_interval"]
|
23 |
-
self.missing_threshold = DATA_COLLECTION_CONFIG["missing_threshold_minutes"]
|
24 |
-
|
25 |
-
async def process_data(self, raw_data: List[Dict]) -> pd.DataFrame:
|
26 |
-
"""원시 데이터 처리"""
|
27 |
-
|
28 |
-
if not raw_data:
|
29 |
-
logger.warning("No data to process")
|
30 |
-
return pd.DataFrame()
|
31 |
-
|
32 |
-
# DataFrame으로 변환
|
33 |
-
df = pd.DataFrame(raw_data)
|
34 |
-
df['observed_at'] = pd.to_datetime(df['observed_at'])
|
35 |
-
df.set_index('observed_at', inplace=True)
|
36 |
-
|
37 |
-
# 관측소별로 처리
|
38 |
-
processed_frames = []
|
39 |
-
|
40 |
-
for station_id in df['station_id'].unique():
|
41 |
-
station_data = df[df['station_id'] == station_id].copy()
|
42 |
-
|
43 |
-
# 1. 결측치 처리
|
44 |
-
station_data = self.handle_missing_values(station_data)
|
45 |
-
|
46 |
-
# 2. 이상치 제거
|
47 |
-
station_data = self.remove_outliers(station_data)
|
48 |
-
|
49 |
-
# 3. 5분 리샘플링
|
50 |
-
station_data = self.resample_data(station_data)
|
51 |
-
|
52 |
-
# 4. Residual 계산
|
53 |
-
station_data = self.calculate_residual(station_data, station_id)
|
54 |
-
|
55 |
-
processed_frames.append(station_data)
|
56 |
-
|
57 |
-
# 모든 관측소 데이터 결합
|
58 |
-
if processed_frames:
|
59 |
-
result = pd.concat(processed_frames, ignore_index=True)
|
60 |
-
return result
|
61 |
-
|
62 |
-
return pd.DataFrame()
|
63 |
-
|
64 |
-
def handle_missing_values(self, df: pd.DataFrame) -> pd.DataFrame:
|
65 |
-
"""결측치 처리 - 10분 미만은 코사인 보간"""
|
66 |
-
|
67 |
-
for column in ['tide_level', 'air_temp', 'air_pres', 'wind_speed', 'wind_dir']:
|
68 |
-
if column not in df.columns:
|
69 |
-
continue
|
70 |
-
|
71 |
-
# 결측치 찾기
|
72 |
-
missing_mask = df[column].isna()
|
73 |
-
|
74 |
-
if not missing_mask.any():
|
75 |
-
continue
|
76 |
-
|
77 |
-
# 연속된 결측치 그룹 찾기
|
78 |
-
missing_groups = self.find_missing_groups(missing_mask)
|
79 |
-
|
80 |
-
for start_idx, end_idx in missing_groups:
|
81 |
-
duration_minutes = (df.index[end_idx] - df.index[start_idx]).total_seconds() / 60
|
82 |
-
|
83 |
-
if duration_minutes < self.missing_threshold:
|
84 |
-
# 10분 미만: 코사인 보간 (조위 특성 반영)
|
85 |
-
if column == 'tide_level':
|
86 |
-
df[column] = self.cosine_interpolation(df[column], start_idx, end_idx)
|
87 |
-
else:
|
88 |
-
# 다른 기상 데이터는 선형 보간
|
89 |
-
df[column].interpolate(method='linear', inplace=True)
|
90 |
-
else:
|
91 |
-
# 10분 이상: 예측값 사용 (별도 처리 필요)
|
92 |
-
logger.warning(f"Long missing period ({duration_minutes:.1f} min) for {column}")
|
93 |
-
# 일단 forward fill 사용
|
94 |
-
df[column].fillna(method='ffill', inplace=True)
|
95 |
-
|
96 |
-
return df
|
97 |
-
|
98 |
-
def cosine_interpolation(self, series: pd.Series, start_idx: int, end_idx: int) -> pd.Series:
|
99 |
-
"""코사인 보간 (조위의 주기적 특성 반영)"""
|
100 |
-
|
101 |
-
# 유효한 데이터 포인트
|
102 |
-
valid_mask = ~series.isna()
|
103 |
-
valid_indices = np.where(valid_mask)[0]
|
104 |
-
|
105 |
-
if len(valid_indices) < 2:
|
106 |
-
return series
|
107 |
-
|
108 |
-
# PCHIP 보간 (Piecewise Cubic Hermite Interpolating Polynomial)
|
109 |
-
# 부드러운 곡선을 생성하며 오버슈팅을 방지
|
110 |
-
interp_func = interpolate.PchipInterpolator(
|
111 |
-
valid_indices,
|
112 |
-
series.iloc[valid_indices].values,
|
113 |
-
extrapolate=False
|
114 |
-
)
|
115 |
-
|
116 |
-
# 결측 구간 보간
|
117 |
-
missing_indices = np.arange(start_idx, end_idx + 1)
|
118 |
-
interpolated_values = interp_func(missing_indices)
|
119 |
-
|
120 |
-
# 결과 적용
|
121 |
-
result = series.copy()
|
122 |
-
result.iloc[missing_indices] = interpolated_values
|
123 |
-
|
124 |
-
return result
|
125 |
-
|
126 |
-
def find_missing_groups(self, missing_mask: pd.Series) -> List[tuple]:
|
127 |
-
"""연속된 결측치 그룹 찾기"""
|
128 |
-
|
129 |
-
groups = []
|
130 |
-
in_group = False
|
131 |
-
start_idx = 0
|
132 |
-
|
133 |
-
for i, is_missing in enumerate(missing_mask):
|
134 |
-
if is_missing and not in_group:
|
135 |
-
start_idx = i
|
136 |
-
in_group = True
|
137 |
-
elif not is_missing and in_group:
|
138 |
-
groups.append((start_idx, i - 1))
|
139 |
-
in_group = False
|
140 |
-
|
141 |
-
if in_group:
|
142 |
-
groups.append((start_idx, len(missing_mask) - 1))
|
143 |
-
|
144 |
-
return groups
|
145 |
-
|
146 |
-
def remove_outliers(self, df: pd.DataFrame) -> pd.DataFrame:
|
147 |
-
"""이상치 제거"""
|
148 |
-
|
149 |
-
# Z-score 방법
|
150 |
-
for column in ['tide_level', 'air_temp', 'air_pres']:
|
151 |
-
if column not in df.columns:
|
152 |
-
continue
|
153 |
-
|
154 |
-
z_scores = np.abs((df[column] - df[column].mean()) / df[column].std())
|
155 |
-
|
156 |
-
# Z-score > 4인 경우 이상치로 판단
|
157 |
-
outlier_mask = z_scores > 4
|
158 |
-
|
159 |
-
if outlier_mask.any():
|
160 |
-
logger.info(f"Removing {outlier_mask.sum()} outliers from {column}")
|
161 |
-
df.loc[outlier_mask, column] = np.nan
|
162 |
-
# 이상치는 선형 보간으로 대체
|
163 |
-
df[column].interpolate(method='linear', inplace=True)
|
164 |
-
|
165 |
-
# 물리적 범위 체크
|
166 |
-
if 'tide_level' in df.columns:
|
167 |
-
df.loc[df['tide_level'] < -100, 'tide_level'] = np.nan
|
168 |
-
df.loc[df['tide_level'] > 1000, 'tide_level'] = np.nan
|
169 |
-
df['tide_level'].interpolate(method='linear', inplace=True)
|
170 |
-
|
171 |
-
return df
|
172 |
-
|
173 |
-
def resample_data(self, df: pd.DataFrame) -> pd.DataFrame:
|
174 |
-
"""5분 간격으로 리샘플링"""
|
175 |
-
|
176 |
-
# 시계열 인덱스 확인
|
177 |
-
if not isinstance(df.index, pd.DatetimeIndex):
|
178 |
-
df.index = pd.to_datetime(df.index)
|
179 |
-
|
180 |
-
# 5분 평균으로 리샘플링
|
181 |
-
numeric_columns = df.select_dtypes(include=[np.number]).columns
|
182 |
-
resampled = df[numeric_columns].resample(self.resample_interval).mean()
|
183 |
-
|
184 |
-
# 카테고리 데이터는 최빈값
|
185 |
-
categorical_columns = df.select_dtypes(exclude=[np.number]).columns
|
186 |
-
for col in categorical_columns:
|
187 |
-
if col in df.columns:
|
188 |
-
resampled[col] = df[col].resample(self.resample_interval).agg(lambda x: x.mode()[0] if not x.empty else None)
|
189 |
-
|
190 |
-
# station_id 복원
|
191 |
-
if 'station_id' not in resampled.columns and 'station_id' in df.columns:
|
192 |
-
resampled['station_id'] = df['station_id'].iloc[0]
|
193 |
-
|
194 |
-
return resampled.reset_index()
|
195 |
-
|
196 |
-
def calculate_residual(self, df: pd.DataFrame, station_id: str) -> pd.DataFrame:
|
197 |
-
"""잔차 계산 (관측값 - 조화예측값)"""
|
198 |
-
|
199 |
-
# 조화 예측값 가져오기 (별도 구현 필요)
|
200 |
-
# 여기서는 간단한 예시
|
201 |
-
df['astronomical_tide'] = self.get_astronomical_tide(station_id, df.index)
|
202 |
-
|
203 |
-
if 'tide_level' in df.columns:
|
204 |
-
df['residual'] = df['tide_level'] - df['astronomical_tide']
|
205 |
-
else:
|
206 |
-
df['residual'] = 0
|
207 |
-
|
208 |
-
return df
|
209 |
-
|
210 |
-
def get_astronomical_tide(self, station_id: str, timestamps: pd.DatetimeIndex) -> np.ndarray:
|
211 |
-
"""조화 예측값 계산 (간단한 예시)"""
|
212 |
-
|
213 |
-
# 실제로는 조화 상수를 사용한 계산 필요
|
214 |
-
# 여기서는 간단한 사인 함수로 대체
|
215 |
-
hours = timestamps.hour + timestamps.minute / 60
|
216 |
-
|
217 |
-
# 주요 조석 성분 (M2: 12.42시간 주기)
|
218 |
-
M2_period = 12.42
|
219 |
-
tide = 200 * np.sin(2 * np.pi * hours / M2_period)
|
220 |
-
|
221 |
-
# 평균 해수면 높이 추가
|
222 |
-
tide += 300
|
223 |
-
|
224 |
-
return tide
|
225 |
-
|
226 |
-
async def save_to_database(self, df: pd.DataFrame) -> int:
|
227 |
-
"""처리된 데이터를 Supabase에 저장"""
|
228 |
-
|
229 |
-
if df.empty:
|
230 |
-
return 0
|
231 |
-
|
232 |
-
# 저장할 데이터 준비
|
233 |
-
records = []
|
234 |
-
for _, row in df.iterrows():
|
235 |
-
record = {
|
236 |
-
"station_id": row.get("station_id"),
|
237 |
-
"observed_at": row.get("observed_at").isoformat() if pd.notna(row.get("observed_at")) else None,
|
238 |
-
"tide_level": float(row.get("tide_level", 0)),
|
239 |
-
"astronomical_tide": float(row.get("astronomical_tide", 0)),
|
240 |
-
"residual": float(row.get("residual", 0)),
|
241 |
-
"air_temp": float(row.get("air_temp", 0)),
|
242 |
-
"air_pres": float(row.get("air_pres", 1013)),
|
243 |
-
"wind_dir": float(row.get("wind_dir", 0)),
|
244 |
-
"wind_speed": float(row.get("wind_speed", 0)),
|
245 |
-
"interpolated": False, # 보간 여부
|
246 |
-
"created_at": datetime.now().isoformat()
|
247 |
-
}
|
248 |
-
records.append(record)
|
249 |
-
|
250 |
-
try:
|
251 |
-
# Supabase에 저장
|
252 |
-
response = self.client.table("tide_observations_processed").insert(records).execute()
|
253 |
-
|
254 |
-
logger.info(f"Saved {len(records)} records to database")
|
255 |
-
return len(records)
|
256 |
-
|
257 |
-
except Exception as e:
|
258 |
-
logger.error(f"Failed to save data: {str(e)}")
|
259 |
-
return 0
|
260 |
-
|
261 |
-
async def cleanup_old_data(self, days_to_keep: int = 7) -> int:
|
262 |
-
"""오래된 원시 데이터 정리"""
|
263 |
-
|
264 |
-
cutoff_date = datetime.now() - timedelta(days=days_to_keep)
|
265 |
-
|
266 |
-
try:
|
267 |
-
# 오래된 데이터 삭제
|
268 |
-
response = self.client.table("tide_observations_raw").delete().lt(
|
269 |
-
"created_at", cutoff_date.isoformat()
|
270 |
-
).execute()
|
271 |
-
|
272 |
-
deleted_count = len(response.data) if response.data else 0
|
273 |
-
logger.info(f"Cleaned up {deleted_count} old records")
|
274 |
-
|
275 |
-
return deleted_count
|
276 |
-
|
277 |
-
except Exception as e:
|
278 |
-
logger.error(f"Cleanup failed: {str(e)}")
|
279 |
-
return 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
automation/internal_api.py
DELETED
@@ -1,174 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Internal API endpoints for automation
|
3 |
-
GitHub Actions에서 호출하는 내부 API
|
4 |
-
"""
|
5 |
-
|
6 |
-
from fastapi import FastAPI, HTTPException, Header, Request
|
7 |
-
from datetime import datetime, timedelta
|
8 |
-
import os
|
9 |
-
import asyncio
|
10 |
-
from typing import Optional
|
11 |
-
import logging
|
12 |
-
from config import INTERNAL_API_KEY
|
13 |
-
|
14 |
-
logger = logging.getLogger(__name__)
|
15 |
-
|
16 |
-
# 환경변수에서 내부 API 키 가져오기
|
17 |
-
INTERNAL_API_KEY = os.getenv("INTERNAL_API_KEY", "")
|
18 |
-
|
19 |
-
def verify_internal_api_key(authorization: str = Header(None)):
|
20 |
-
"""내부 API 키 검증"""
|
21 |
-
if not authorization or authorization != f"Bearer {INTERNAL_API_KEY}":
|
22 |
-
raise HTTPException(status_code=401, detail="Unauthorized")
|
23 |
-
return True
|
24 |
-
|
25 |
-
def register_internal_routes(app: FastAPI):
|
26 |
-
"""FastAPI 앱에 내부 API 라우트 등록"""
|
27 |
-
|
28 |
-
@app.post("/api/internal/collect_data", tags=["Internal"])
|
29 |
-
async def collect_data_endpoint(
|
30 |
-
request: Request,
|
31 |
-
authorization: str = Header(None)
|
32 |
-
):
|
33 |
-
"""데이터 수집 및 처리 엔드포인트"""
|
34 |
-
verify_internal_api_key(authorization)
|
35 |
-
|
36 |
-
try:
|
37 |
-
# 데이터 수집 작업 시작
|
38 |
-
from automation.data_collector import DataCollector
|
39 |
-
from automation.data_processor import DataProcessor
|
40 |
-
|
41 |
-
collector = DataCollector()
|
42 |
-
processor = DataProcessor()
|
43 |
-
|
44 |
-
# 1. 모든 관측소 데이터 수집
|
45 |
-
collected_data = await collector.collect_all_stations()
|
46 |
-
|
47 |
-
# 2. 데이터 처리 (결측치 처리, 리샘플링)
|
48 |
-
processed_data = await processor.process_data(collected_data)
|
49 |
-
|
50 |
-
# 3. Supabase 저장
|
51 |
-
saved_count = await processor.save_to_database(processed_data)
|
52 |
-
|
53 |
-
return {
|
54 |
-
"success": True,
|
55 |
-
"timestamp": datetime.now().isoformat(),
|
56 |
-
"stations_collected": len(collected_data),
|
57 |
-
"records_saved": saved_count,
|
58 |
-
"message": f"Successfully collected and processed data for {len(collected_data)} stations"
|
59 |
-
}
|
60 |
-
|
61 |
-
except Exception as e:
|
62 |
-
logger.error(f"Data collection failed: {str(e)}")
|
63 |
-
raise HTTPException(status_code=500, detail=str(e))
|
64 |
-
|
65 |
-
@app.post("/api/internal/update_predictions", tags=["Internal"])
|
66 |
-
async def update_predictions_endpoint(
|
67 |
-
request: Request,
|
68 |
-
authorization: str = Header(None)
|
69 |
-
):
|
70 |
-
"""예측 업데이트 엔드포인트"""
|
71 |
-
verify_internal_api_key(authorization)
|
72 |
-
|
73 |
-
try:
|
74 |
-
from automation.prediction_updater import PredictionUpdater
|
75 |
-
|
76 |
-
updater = PredictionUpdater()
|
77 |
-
|
78 |
-
# 1. 모든 관측소에 대한 예측 업데이트
|
79 |
-
results = await updater.update_all_predictions()
|
80 |
-
|
81 |
-
return {
|
82 |
-
"success": True,
|
83 |
-
"timestamp": datetime.now().isoformat(),
|
84 |
-
"predictions_updated": results["updated_count"],
|
85 |
-
"stations": results["stations"],
|
86 |
-
"prediction_horizon": "72 hours",
|
87 |
-
"message": f"Successfully updated predictions for {results['updated_count']} stations"
|
88 |
-
}
|
89 |
-
|
90 |
-
except Exception as e:
|
91 |
-
logger.error(f"Prediction update failed: {str(e)}")
|
92 |
-
raise HTTPException(status_code=500, detail=str(e))
|
93 |
-
|
94 |
-
@app.get("/api/internal/data_freshness", tags=["Internal"])
|
95 |
-
async def check_data_freshness(
|
96 |
-
authorization: str = Header(None)
|
97 |
-
):
|
98 |
-
"""데이터 신선도 체크"""
|
99 |
-
verify_internal_api_key(authorization)
|
100 |
-
|
101 |
-
try:
|
102 |
-
from supabase_utils import get_supabase_client
|
103 |
-
|
104 |
-
client = get_supabase_client()
|
105 |
-
|
106 |
-
# 각 관측소의 최신 데이터 시간 확인
|
107 |
-
freshness_report = {}
|
108 |
-
|
109 |
-
for station_id in ["DT_0001", "DT_0002", "DT_0003", "DT_0004", "DT_0005"]:
|
110 |
-
response = client.table("tide_observations_processed").select("observed_at").eq(
|
111 |
-
"station_id", station_id
|
112 |
-
).order("observed_at", desc=True).limit(1).execute()
|
113 |
-
|
114 |
-
if response.data:
|
115 |
-
last_update = datetime.fromisoformat(response.data[0]["observed_at"])
|
116 |
-
minutes_old = (datetime.now() - last_update).total_seconds() / 60
|
117 |
-
freshness_report[station_id] = {
|
118 |
-
"last_update": last_update.isoformat(),
|
119 |
-
"minutes_old": round(minutes_old, 2),
|
120 |
-
"status": "fresh" if minutes_old < 10 else "stale"
|
121 |
-
}
|
122 |
-
else:
|
123 |
-
freshness_report[station_id] = {
|
124 |
-
"last_update": None,
|
125 |
-
"minutes_old": None,
|
126 |
-
"status": "no_data"
|
127 |
-
}
|
128 |
-
|
129 |
-
# 가장 오래된 데이터 찾기
|
130 |
-
oldest_minutes = max(
|
131 |
-
[v["minutes_old"] for v in freshness_report.values() if v["minutes_old"]],
|
132 |
-
default=0
|
133 |
-
)
|
134 |
-
|
135 |
-
return {
|
136 |
-
"timestamp": datetime.now().isoformat(),
|
137 |
-
"oldest_data_minutes": round(oldest_minutes, 2),
|
138 |
-
"stations": freshness_report,
|
139 |
-
"overall_status": "healthy" if oldest_minutes < 15 else "warning"
|
140 |
-
}
|
141 |
-
|
142 |
-
except Exception as e:
|
143 |
-
logger.error(f"Freshness check failed: {str(e)}")
|
144 |
-
raise HTTPException(status_code=500, detail=str(e))
|
145 |
-
|
146 |
-
@app.post("/api/internal/manual_trigger", tags=["Internal"])
|
147 |
-
async def manual_trigger(
|
148 |
-
task: str,
|
149 |
-
authorization: str = Header(None)
|
150 |
-
):
|
151 |
-
"""수동 작업 트리거"""
|
152 |
-
verify_internal_api_key(authorization)
|
153 |
-
|
154 |
-
if task == "collect_now":
|
155 |
-
# 즉시 데이터 수집 실행
|
156 |
-
result = await collect_data_endpoint(None, authorization)
|
157 |
-
return result
|
158 |
-
elif task == "predict_now":
|
159 |
-
# 즉시 예측 업데이트
|
160 |
-
result = await update_predictions_endpoint(None, authorization)
|
161 |
-
return result
|
162 |
-
elif task == "cleanup":
|
163 |
-
# 오래된 데이터 정리
|
164 |
-
from automation.data_cleaner import cleanup_old_data
|
165 |
-
deleted_count = await cleanup_old_data(days_to_keep=7)
|
166 |
-
return {
|
167 |
-
"success": True,
|
168 |
-
"deleted_records": deleted_count,
|
169 |
-
"message": f"Cleaned up {deleted_count} old records"
|
170 |
-
}
|
171 |
-
else:
|
172 |
-
raise HTTPException(status_code=400, detail=f"Unknown task: {task}")
|
173 |
-
|
174 |
-
logger.info("Internal API routes registered successfully")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
automation/prediction_updater.py
DELETED
File without changes
|
internal_api.py
ADDED
@@ -0,0 +1,754 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Extended Internal API endpoints for automation and demo control
|
3 |
+
GitHub Actions 및 시연 제어를 위한 확장된 내부 API
|
4 |
+
"""
|
5 |
+
|
6 |
+
from fastapi import FastAPI, HTTPException, Header, Request, BackgroundTasks
|
7 |
+
from datetime import datetime, timedelta
|
8 |
+
import os
|
9 |
+
import asyncio
|
10 |
+
import json
|
11 |
+
import uuid
|
12 |
+
import numpy as np
|
13 |
+
from typing import Optional, List, Dict, Any
|
14 |
+
import logging
|
15 |
+
from config import INTERNAL_API_KEY
|
16 |
+
|
17 |
+
logger = logging.getLogger(__name__)
|
18 |
+
|
19 |
+
# 환경변수에서 내부 API 키 가져오기
|
20 |
+
INTERNAL_API_KEY = os.getenv("INTERNAL_API_KEY", "")
|
21 |
+
|
22 |
+
# 전역 상태 관리
|
23 |
+
demo_session = {
|
24 |
+
"active": False,
|
25 |
+
"session_id": None,
|
26 |
+
"start_time": None,
|
27 |
+
"simulation_start_time": None,
|
28 |
+
"active_stations": [],
|
29 |
+
"current_issues": [],
|
30 |
+
"performance_history": [],
|
31 |
+
"total_processed": 0
|
32 |
+
}
|
33 |
+
|
34 |
+
# 활성 문제 상황들
|
35 |
+
active_issues = {}
|
36 |
+
|
37 |
+
def verify_internal_api_key(authorization: str = Header(None)):
|
38 |
+
"""내부 API 키 검증"""
|
39 |
+
if not authorization or authorization != f"Bearer {INTERNAL_API_KEY}":
|
40 |
+
raise HTTPException(status_code=401, detail="Unauthorized")
|
41 |
+
return True
|
42 |
+
|
43 |
+
def register_internal_routes(app: FastAPI):
|
44 |
+
"""FastAPI 앱에 확장된 내부 API 라우트 등록"""
|
45 |
+
|
46 |
+
# ============================================================================
|
47 |
+
# 기존 엔드포인트들 (호환성 유지)
|
48 |
+
# ============================================================================
|
49 |
+
|
50 |
+
@app.post("/api/internal/collect_data", tags=["Internal"])
|
51 |
+
async def collect_data_endpoint(
|
52 |
+
request: Request,
|
53 |
+
authorization: str = Header(None)
|
54 |
+
):
|
55 |
+
"""데이터 수집 및 처리 엔드포인트 (확장된 버전)"""
|
56 |
+
verify_internal_api_key(authorization)
|
57 |
+
|
58 |
+
try:
|
59 |
+
# 요청 데이터 파싱
|
60 |
+
request_data = await request.json()
|
61 |
+
task_type = request_data.get('task', 'normal_collection')
|
62 |
+
stations_data = request_data.get('stations_data', [])
|
63 |
+
simulation_time = request_data.get('simulation_time')
|
64 |
+
|
65 |
+
logger.info(f"Data collection request: {task_type}, {len(stations_data)} records")
|
66 |
+
|
67 |
+
# 데이터 처리 모듈 동적 임포트
|
68 |
+
try:
|
69 |
+
from data_processor import DataProcessor
|
70 |
+
processor = DataProcessor()
|
71 |
+
except ImportError:
|
72 |
+
# 간단한 처리 로직 사용
|
73 |
+
processor = SimpleDataProcessor()
|
74 |
+
|
75 |
+
# 배치 데이터 처리
|
76 |
+
if task_type == 'batch_data_collection':
|
77 |
+
processed_count = await process_batch_data(stations_data, simulation_time)
|
78 |
+
|
79 |
+
# 시연 세션 업데이트
|
80 |
+
if demo_session["active"]:
|
81 |
+
demo_session["total_processed"] += processed_count
|
82 |
+
|
83 |
+
return {
|
84 |
+
"success": True,
|
85 |
+
"timestamp": datetime.now().isoformat(),
|
86 |
+
"task_type": task_type,
|
87 |
+
"records_saved": processed_count,
|
88 |
+
"stations_processed": len(set(d.get('station_id') for d in stations_data)),
|
89 |
+
"processing_time_ms": 150, # 시뮬레이션
|
90 |
+
"simulation_time": simulation_time
|
91 |
+
}
|
92 |
+
|
93 |
+
# 일반 데이터 수집 (기존 로직)
|
94 |
+
else:
|
95 |
+
# 실제 외부 API 수집 시뮬레이션
|
96 |
+
collected_data = await simulate_data_collection()
|
97 |
+
processed_data = await processor.process_data(collected_data)
|
98 |
+
saved_count = await processor.save_to_database(processed_data)
|
99 |
+
|
100 |
+
return {
|
101 |
+
"success": True,
|
102 |
+
"timestamp": datetime.now().isoformat(),
|
103 |
+
"stations_collected": len(collected_data),
|
104 |
+
"records_saved": saved_count,
|
105 |
+
"message": f"Successfully collected and processed data for {len(collected_data)} stations"
|
106 |
+
}
|
107 |
+
|
108 |
+
except Exception as e:
|
109 |
+
logger.error(f"Data collection failed: {str(e)}")
|
110 |
+
raise HTTPException(status_code=500, detail=str(e))
|
111 |
+
|
112 |
+
@app.post("/api/internal/update_predictions", tags=["Internal"])
|
113 |
+
async def update_predictions_endpoint(
|
114 |
+
request: Request,
|
115 |
+
authorization: str = Header(None)
|
116 |
+
):
|
117 |
+
"""예측 업데이트 엔드포인트 (확장된 버전)"""
|
118 |
+
verify_internal_api_key(authorization)
|
119 |
+
|
120 |
+
try:
|
121 |
+
request_data = await request.json()
|
122 |
+
demo_mode = request_data.get('demo_mode', False)
|
123 |
+
speed_multiplier = request_data.get('speed_multiplier', 1)
|
124 |
+
|
125 |
+
# 예측 업데이트 실행
|
126 |
+
if demo_mode:
|
127 |
+
results = await update_demo_predictions(speed_multiplier)
|
128 |
+
else:
|
129 |
+
# 기존 예측 업데이트 로직
|
130 |
+
try:
|
131 |
+
from prediction_updater import PredictionUpdater
|
132 |
+
updater = PredictionUpdater()
|
133 |
+
results = await updater.update_all_predictions()
|
134 |
+
except ImportError:
|
135 |
+
results = await simulate_prediction_update()
|
136 |
+
|
137 |
+
return {
|
138 |
+
"success": True,
|
139 |
+
"timestamp": datetime.now().isoformat(),
|
140 |
+
"predictions_updated": results["updated_count"],
|
141 |
+
"stations": results["stations"],
|
142 |
+
"prediction_horizon": "72 points (6 hours)",
|
143 |
+
"demo_mode": demo_mode,
|
144 |
+
"speed_multiplier": speed_multiplier,
|
145 |
+
"message": f"Successfully updated predictions for {results['updated_count']} stations"
|
146 |
+
}
|
147 |
+
|
148 |
+
except Exception as e:
|
149 |
+
logger.error(f"Prediction update failed: {str(e)}")
|
150 |
+
raise HTTPException(status_code=500, detail=str(e))
|
151 |
+
|
152 |
+
# ============================================================================
|
153 |
+
# 새로운 시연 제어 엔드포인트들
|
154 |
+
# ============================================================================
|
155 |
+
|
156 |
+
@app.post("/api/internal/demo_control", tags=["Demo Control"])
|
157 |
+
async def demo_control_endpoint(
|
158 |
+
request: Request,
|
159 |
+
authorization: str = Header(None)
|
160 |
+
):
|
161 |
+
"""시연 제어 (시작/중지)"""
|
162 |
+
verify_internal_api_key(authorization)
|
163 |
+
|
164 |
+
try:
|
165 |
+
request_data = await request.json()
|
166 |
+
action = request_data.get('action')
|
167 |
+
|
168 |
+
if action == 'start_demo':
|
169 |
+
return await start_demo_session(request_data)
|
170 |
+
elif action == 'stop_demo':
|
171 |
+
return await stop_demo_session(request_data)
|
172 |
+
else:
|
173 |
+
raise HTTPException(status_code=400, detail=f"Unknown action: {action}")
|
174 |
+
|
175 |
+
except Exception as e:
|
176 |
+
logger.error(f"Demo control failed: {str(e)}")
|
177 |
+
raise HTTPException(status_code=500, detail=str(e))
|
178 |
+
|
179 |
+
@app.get("/api/internal/demo_status", tags=["Demo Control"])
|
180 |
+
async def demo_status_endpoint(authorization: str = Header(None)):
|
181 |
+
"""시연 상태 조회"""
|
182 |
+
verify_internal_api_key(authorization)
|
183 |
+
|
184 |
+
try:
|
185 |
+
current_time = datetime.now()
|
186 |
+
|
187 |
+
if demo_session["active"]:
|
188 |
+
running_minutes = (current_time - demo_session["start_time"]).total_seconds() / 60
|
189 |
+
|
190 |
+
return {
|
191 |
+
"demo_active": True,
|
192 |
+
"session_id": demo_session["session_id"],
|
193 |
+
"start_time": demo_session["start_time"].isoformat(),
|
194 |
+
"running_minutes": round(running_minutes, 1),
|
195 |
+
"simulation_start_time": demo_session["simulation_start_time"],
|
196 |
+
"current_sim_time": calculate_current_simulation_time(),
|
197 |
+
"active_stations": len(demo_session["active_stations"]),
|
198 |
+
"stations_list": demo_session["active_stations"],
|
199 |
+
"total_processed": demo_session["total_processed"],
|
200 |
+
"active_issues": len(active_issues),
|
201 |
+
"current_rmse": get_current_rmse()
|
202 |
+
}
|
203 |
+
else:
|
204 |
+
return {
|
205 |
+
"demo_active": False,
|
206 |
+
"last_session_end": demo_session.get("end_time"),
|
207 |
+
"total_sessions_today": 1 # 간단한 카운터
|
208 |
+
}
|
209 |
+
|
210 |
+
except Exception as e:
|
211 |
+
logger.error(f"Demo status check failed: {str(e)}")
|
212 |
+
raise HTTPException(status_code=500, detail=str(e))
|
213 |
+
|
214 |
+
@app.get("/api/internal/simulation_status", tags=["Demo Control"])
|
215 |
+
async def simulation_status_endpoint(authorization: str = Header(None)):
|
216 |
+
"""시뮬레이션 시간 상태"""
|
217 |
+
verify_internal_api_key(authorization)
|
218 |
+
|
219 |
+
try:
|
220 |
+
current_sim_time = calculate_current_simulation_time()
|
221 |
+
|
222 |
+
return {
|
223 |
+
"demo_active": demo_session["active"],
|
224 |
+
"current_simulation_time": current_sim_time,
|
225 |
+
"simulation_start_time": demo_session["simulation_start_time"],
|
226 |
+
"real_start_time": demo_session["start_time"].isoformat() if demo_session["start_time"] else None,
|
227 |
+
"elapsed_real_minutes": calculate_elapsed_real_minutes(),
|
228 |
+
"simulation_speed": 1 # 현재는 1:1 비율
|
229 |
+
}
|
230 |
+
|
231 |
+
except Exception as e:
|
232 |
+
logger.error(f"Simulation status check failed: {str(e)}")
|
233 |
+
raise HTTPException(status_code=500, detail=str(e))
|
234 |
+
|
235 |
+
@app.post("/api/internal/trigger_demo", tags=["Demo Control"])
|
236 |
+
async def trigger_demo_issue(
|
237 |
+
request: Request,
|
238 |
+
authorization: str = Header(None)
|
239 |
+
):
|
240 |
+
"""시연용 문제 상황 발생"""
|
241 |
+
verify_internal_api_key(authorization)
|
242 |
+
|
243 |
+
try:
|
244 |
+
request_data = await request.json()
|
245 |
+
issue_type = request_data.get('issue_type')
|
246 |
+
issue_config = request_data.get('config', {})
|
247 |
+
station_id = request_data.get('station_id')
|
248 |
+
|
249 |
+
logger.info(f"Triggering demo issue: {issue_type}")
|
250 |
+
|
251 |
+
# 문제 상황 활성화
|
252 |
+
issue_id = str(uuid.uuid4())[:8]
|
253 |
+
|
254 |
+
issue_record = {
|
255 |
+
"id": issue_id,
|
256 |
+
"type": issue_type,
|
257 |
+
"config": issue_config,
|
258 |
+
"station_id": station_id,
|
259 |
+
"start_time": datetime.now(),
|
260 |
+
"active": True,
|
261 |
+
"demo_mode": True
|
262 |
+
}
|
263 |
+
|
264 |
+
active_issues[issue_id] = issue_record
|
265 |
+
demo_session["current_issues"].append(issue_id)
|
266 |
+
|
267 |
+
# 문제 유형별 처리
|
268 |
+
response_data = {
|
269 |
+
"success": True,
|
270 |
+
"issue_id": issue_id,
|
271 |
+
"issue_type": issue_type,
|
272 |
+
"timestamp": datetime.now().isoformat(),
|
273 |
+
"target": station_id or "system-wide",
|
274 |
+
"auto_recovery": issue_config.get('auto_recovery', True)
|
275 |
+
}
|
276 |
+
|
277 |
+
if issue_type == 'network_failure':
|
278 |
+
duration = issue_config.get('duration_seconds', 60)
|
279 |
+
response_data.update({
|
280 |
+
"expected_duration": f"{duration} seconds",
|
281 |
+
"expected_impact": "API timeouts, data collection delays"
|
282 |
+
})
|
283 |
+
|
284 |
+
elif issue_type == 'extreme_weather':
|
285 |
+
magnitude = issue_config.get('magnitude', 'moderate')
|
286 |
+
duration = issue_config.get('duration_minutes', 90)
|
287 |
+
response_data.update({
|
288 |
+
"expected_duration": f"{duration} minutes",
|
289 |
+
"expected_impact": f"{magnitude} tide surge, RMSE increase"
|
290 |
+
})
|
291 |
+
|
292 |
+
elif issue_type == 'sensor_malfunction':
|
293 |
+
target = issue_config.get('target_station', station_id)
|
294 |
+
sensors = issue_config.get('affected_sensors', ['tide_level'])
|
295 |
+
response_data.update({
|
296 |
+
"target": target,
|
297 |
+
"expected_duration": "30-90 minutes",
|
298 |
+
"expected_impact": f"Missing data for {', '.join(sensors)}"
|
299 |
+
})
|
300 |
+
|
301 |
+
elif issue_type == 'data_corruption':
|
302 |
+
rate = issue_config.get('corruption_rate', 0.2)
|
303 |
+
response_data.update({
|
304 |
+
"expected_duration": "15-60 minutes",
|
305 |
+
"expected_impact": f"{rate:.1%} data corruption rate"
|
306 |
+
})
|
307 |
+
|
308 |
+
# 자동 복구 스케줄링
|
309 |
+
if issue_config.get('auto_recovery', True):
|
310 |
+
asyncio.create_task(schedule_auto_recovery(issue_id, issue_config))
|
311 |
+
|
312 |
+
return response_data
|
313 |
+
|
314 |
+
except Exception as e:
|
315 |
+
logger.error(f"Issue trigger failed: {str(e)}")
|
316 |
+
raise HTTPException(status_code=500, detail=str(e))
|
317 |
+
|
318 |
+
@app.post("/api/internal/system_recovery", tags=["System Recovery"])
|
319 |
+
async def system_recovery_endpoint(
|
320 |
+
request: Request,
|
321 |
+
authorization: str = Header(None)
|
322 |
+
):
|
323 |
+
"""시스템 복구"""
|
324 |
+
verify_internal_api_key(authorization)
|
325 |
+
|
326 |
+
try:
|
327 |
+
request_data = await request.json()
|
328 |
+
action = request_data.get('action')
|
329 |
+
|
330 |
+
if action == 'clear_all_issues':
|
331 |
+
return await clear_all_issues()
|
332 |
+
elif action == 'reset_simulation':
|
333 |
+
return await reset_simulation_state(request_data)
|
334 |
+
else:
|
335 |
+
raise HTTPException(status_code=400, detail=f"Unknown recovery action: {action}")
|
336 |
+
|
337 |
+
except Exception as e:
|
338 |
+
logger.error(f"System recovery failed: {str(e)}")
|
339 |
+
raise HTTPException(status_code=500, detail=str(e))
|
340 |
+
|
341 |
+
@app.post("/api/internal/collection_control", tags=["System Recovery"])
|
342 |
+
async def collection_control_endpoint(
|
343 |
+
request: Request,
|
344 |
+
authorization: str = Header(None)
|
345 |
+
):
|
346 |
+
"""데이터 수집 제어"""
|
347 |
+
verify_internal_api_key(authorization)
|
348 |
+
|
349 |
+
try:
|
350 |
+
request_data = await request.json()
|
351 |
+
action = request_data.get('action')
|
352 |
+
|
353 |
+
if action == 'start_collection':
|
354 |
+
stations = request_data.get('stations', demo_session["active_stations"])
|
355 |
+
return {
|
356 |
+
"success": True,
|
357 |
+
"action": "start_collection",
|
358 |
+
"active_stations": stations,
|
359 |
+
"timestamp": datetime.now().isoformat()
|
360 |
+
}
|
361 |
+
elif action == 'stop_collection':
|
362 |
+
return {
|
363 |
+
"success": True,
|
364 |
+
"action": "stop_collection",
|
365 |
+
"timestamp": datetime.now().isoformat()
|
366 |
+
}
|
367 |
+
else:
|
368 |
+
raise HTTPException(status_code=400, detail=f"Unknown collection action: {action}")
|
369 |
+
|
370 |
+
except Exception as e:
|
371 |
+
logger.error(f"Collection control failed: {str(e)}")
|
372 |
+
raise HTTPException(status_code=500, detail=str(e))
|
373 |
+
|
374 |
+
# ============================================================================
|
375 |
+
# 기존 엔드포인트들 (호환성 유지)
|
376 |
+
# ============================================================================
|
377 |
+
|
378 |
+
@app.get("/api/internal/data_freshness", tags=["Internal"])
|
379 |
+
async def check_data_freshness(authorization: str = Header(None)):
|
380 |
+
"""데이터 신선도 체크 (확장된 버전)"""
|
381 |
+
verify_internal_api_key(authorization)
|
382 |
+
|
383 |
+
try:
|
384 |
+
from supabase_utils import get_supabase_client
|
385 |
+
client = get_supabase_client()
|
386 |
+
|
387 |
+
# 시연용 관측소들
|
388 |
+
demo_stations = demo_session["active_stations"] if demo_session["active"] else [
|
389 |
+
"DT_0001", "DT_0002", "DT_0003", "DT_0008", "DT_0017"
|
390 |
+
]
|
391 |
+
|
392 |
+
freshness_report = {}
|
393 |
+
|
394 |
+
for station_id in demo_stations:
|
395 |
+
try:
|
396 |
+
response = client.table("tide_observations_processed").select("observed_at").eq(
|
397 |
+
"station_id", station_id
|
398 |
+
).order("observed_at", desc=True).limit(1).execute()
|
399 |
+
|
400 |
+
if response.data:
|
401 |
+
last_update = datetime.fromisoformat(response.data[0]["observed_at"])
|
402 |
+
minutes_old = (datetime.now() - last_update).total_seconds() / 60
|
403 |
+
freshness_report[station_id] = {
|
404 |
+
"last_update": last_update.isoformat(),
|
405 |
+
"minutes_old": round(minutes_old, 2),
|
406 |
+
"status": "fresh" if minutes_old < 10 else "stale"
|
407 |
+
}
|
408 |
+
else:
|
409 |
+
freshness_report[station_id] = {
|
410 |
+
"last_update": None,
|
411 |
+
"minutes_old": None,
|
412 |
+
"status": "no_data"
|
413 |
+
}
|
414 |
+
except Exception as e:
|
415 |
+
freshness_report[station_id] = {
|
416 |
+
"last_update": None,
|
417 |
+
"minutes_old": None,
|
418 |
+
"status": "error",
|
419 |
+
"error": str(e)
|
420 |
+
}
|
421 |
+
|
422 |
+
# 가장 오래된 데이터 계산
|
423 |
+
valid_ages = [v["minutes_old"] for v in freshness_report.values()
|
424 |
+
if v["minutes_old"] is not None]
|
425 |
+
oldest_minutes = max(valid_ages) if valid_ages else 0
|
426 |
+
|
427 |
+
return {
|
428 |
+
"timestamp": datetime.now().isoformat(),
|
429 |
+
"oldest_data_minutes": round(oldest_minutes, 2),
|
430 |
+
"stations": freshness_report,
|
431 |
+
"overall_status": "healthy" if oldest_minutes < 15 else "warning",
|
432 |
+
"demo_active": demo_session["active"]
|
433 |
+
}
|
434 |
+
|
435 |
+
except Exception as e:
|
436 |
+
logger.error(f"Freshness check failed: {str(e)}")
|
437 |
+
raise HTTPException(status_code=500, detail=str(e))
|
438 |
+
|
439 |
+
@app.post("/api/internal/manual_trigger", tags=["Internal"])
|
440 |
+
async def manual_trigger(
|
441 |
+
task: str,
|
442 |
+
authorization: str = Header(None)
|
443 |
+
):
|
444 |
+
"""수동 작업 트리거 (확장된 버전)"""
|
445 |
+
verify_internal_api_key(authorization)
|
446 |
+
|
447 |
+
if task == "collect_now":
|
448 |
+
result = await collect_data_endpoint(None, authorization)
|
449 |
+
return result
|
450 |
+
elif task == "predict_now":
|
451 |
+
result = await update_predictions_endpoint(None, authorization)
|
452 |
+
return result
|
453 |
+
elif task == "recovery_mode":
|
454 |
+
return await emergency_recovery()
|
455 |
+
else:
|
456 |
+
raise HTTPException(status_code=400, detail=f"Unknown task: {task}")
|
457 |
+
|
458 |
+
logger.info("Extended Internal API routes registered successfully")
|
459 |
+
|
460 |
+
# ============================================================================
|
461 |
+
# 헬퍼 함수들
|
462 |
+
# ============================================================================
|
463 |
+
|
464 |
+
async def start_demo_session(request_data: dict):
|
465 |
+
"""시연 세션 시작"""
|
466 |
+
global demo_session
|
467 |
+
|
468 |
+
session_id = str(uuid.uuid4())[:8]
|
469 |
+
current_time = datetime.now()
|
470 |
+
sim_start_time = request_data.get('simulation_settings', {}).get('start_time', '2025-07-01T00:00:00')
|
471 |
+
stations = request_data.get('stations', ['DT_0001', 'DT_0002', 'DT_0003', 'DT_0008', 'DT_0017'])
|
472 |
+
|
473 |
+
demo_session.update({
|
474 |
+
"active": True,
|
475 |
+
"session_id": session_id,
|
476 |
+
"start_time": current_time,
|
477 |
+
"simulation_start_time": sim_start_time,
|
478 |
+
"active_stations": stations,
|
479 |
+
"current_issues": [],
|
480 |
+
"performance_history": [],
|
481 |
+
"total_processed": 0
|
482 |
+
})
|
483 |
+
|
484 |
+
logger.info(f"Demo session started: {session_id}")
|
485 |
+
|
486 |
+
return {
|
487 |
+
"success": True,
|
488 |
+
"session_id": session_id,
|
489 |
+
"demo_mode": "interactive",
|
490 |
+
"simulation_start_time": sim_start_time,
|
491 |
+
"stations": stations,
|
492 |
+
"timestamp": current_time.isoformat()
|
493 |
+
}
|
494 |
+
|
495 |
+
async def stop_demo_session(request_data: dict):
|
496 |
+
"""시연 세션 중지"""
|
497 |
+
global demo_session
|
498 |
+
|
499 |
+
if not demo_session["active"]:
|
500 |
+
raise HTTPException(status_code=400, detail="No active demo session")
|
501 |
+
|
502 |
+
end_time = datetime.now()
|
503 |
+
duration_minutes = (end_time - demo_session["start_time"]).total_seconds() / 60
|
504 |
+
|
505 |
+
# 최종 성능 계산
|
506 |
+
final_rmse = get_current_rmse()
|
507 |
+
final_accuracy = calculate_final_accuracy()
|
508 |
+
|
509 |
+
session_summary = {
|
510 |
+
"session_id": demo_session["session_id"],
|
511 |
+
"duration_minutes": round(duration_minutes, 1),
|
512 |
+
"total_processed": demo_session["total_processed"],
|
513 |
+
"final_rmse": final_rmse,
|
514 |
+
"final_accuracy": final_accuracy,
|
515 |
+
"issues_encountered": len(demo_session["current_issues"]),
|
516 |
+
"end_time": end_time.isoformat()
|
517 |
+
}
|
518 |
+
|
519 |
+
# 세션 초기화
|
520 |
+
demo_session.update({
|
521 |
+
"active": False,
|
522 |
+
"session_id": None,
|
523 |
+
"start_time": None,
|
524 |
+
"end_time": end_time.isoformat()
|
525 |
+
})
|
526 |
+
|
527 |
+
# 활성 문제들 정리
|
528 |
+
global active_issues
|
529 |
+
active_issues.clear()
|
530 |
+
|
531 |
+
logger.info(f"Demo session ended: {session_summary['session_id']}")
|
532 |
+
|
533 |
+
return {
|
534 |
+
"success": True,
|
535 |
+
**session_summary
|
536 |
+
}
|
537 |
+
|
538 |
+
async def clear_all_issues():
|
539 |
+
"""모든 활성 문제 해제"""
|
540 |
+
global active_issues
|
541 |
+
|
542 |
+
cleared_issues = []
|
543 |
+
for issue_id, issue in active_issues.items():
|
544 |
+
cleared_issues.append({
|
545 |
+
"id": issue_id,
|
546 |
+
"type": issue["type"],
|
547 |
+
"description": f"{issue['type']} affecting {issue.get('station_id', 'system')}"
|
548 |
+
})
|
549 |
+
|
550 |
+
active_issues.clear()
|
551 |
+
demo_session["current_issues"].clear()
|
552 |
+
|
553 |
+
return {
|
554 |
+
"success": True,
|
555 |
+
"cleared_issues": cleared_issues,
|
556 |
+
"timestamp": datetime.now().isoformat()
|
557 |
+
}
|
558 |
+
|
559 |
+
async def reset_simulation_state(request_data: dict):
|
560 |
+
"""시뮬레이션 상태 리셋"""
|
561 |
+
preserve_data = request_data.get('preserve_data', True)
|
562 |
+
|
563 |
+
# 성능 히스토리 리셋
|
564 |
+
demo_session["performance_history"].clear()
|
565 |
+
|
566 |
+
# 카운터 리셋
|
567 |
+
if request_data.get('reset_counters', True):
|
568 |
+
demo_session["total_processed"] = 0
|
569 |
+
|
570 |
+
return {
|
571 |
+
"success": True,
|
572 |
+
"preserved_records": demo_session["total_processed"] if preserve_data else 0,
|
573 |
+
"cache_cleared": True,
|
574 |
+
"counters_reset": True,
|
575 |
+
"timestamp": datetime.now().isoformat()
|
576 |
+
}
|
577 |
+
|
578 |
+
async def schedule_auto_recovery(issue_id: str, issue_config: dict):
|
579 |
+
"""자동 복구 스케줄링"""
|
580 |
+
recovery_delay = issue_config.get('duration_seconds', 60)
|
581 |
+
|
582 |
+
# 지정된 시간 후 문제 해제
|
583 |
+
await asyncio.sleep(recovery_delay)
|
584 |
+
|
585 |
+
if issue_id in active_issues:
|
586 |
+
del active_issues[issue_id]
|
587 |
+
if issue_id in demo_session["current_issues"]:
|
588 |
+
demo_session["current_issues"].remove(issue_id)
|
589 |
+
|
590 |
+
logger.info(f"Auto-recovered issue: {issue_id}")
|
591 |
+
|
592 |
+
def calculate_current_simulation_time():
|
593 |
+
"""현재 시뮬레이션 시간 계산"""
|
594 |
+
if not demo_session["active"] or not demo_session["start_time"]:
|
595 |
+
return demo_session.get("simulation_start_time", "2025-07-01T00:00:00")
|
596 |
+
|
597 |
+
# 실제 경과 시간 (분)
|
598 |
+
elapsed_real = (datetime.now() - demo_session["start_time"]).total_seconds() / 60
|
599 |
+
|
600 |
+
# 시뮬레이션 시작 시간에 경과 시간 추가
|
601 |
+
sim_start = datetime.fromisoformat(demo_session["simulation_start_time"])
|
602 |
+
current_sim = sim_start + timedelta(minutes=elapsed_real)
|
603 |
+
|
604 |
+
return current_sim.isoformat()
|
605 |
+
|
606 |
+
def calculate_elapsed_real_minutes():
|
607 |
+
"""실제 경과 시간 (분)"""
|
608 |
+
if not demo_session["active"] or not demo_session["start_time"]:
|
609 |
+
return 0
|
610 |
+
|
611 |
+
return (datetime.now() - demo_session["start_time"]).total_seconds() / 60
|
612 |
+
|
613 |
+
def get_current_rmse():
|
614 |
+
"""현재 RMSE 값 (시뮬레이션)"""
|
615 |
+
base_rmse = 18.5 # 기본 RMSE
|
616 |
+
|
617 |
+
# 활성 문제들의 영향
|
618 |
+
for issue in active_issues.values():
|
619 |
+
if issue["type"] == "extreme_weather":
|
620 |
+
base_rmse *= 1.8 # 극값 상황에서 RMSE 증가
|
621 |
+
elif issue["type"] == "sensor_malfunction":
|
622 |
+
base_rmse *= 1.3 # 센서 오류시 증가
|
623 |
+
elif issue["type"] == "data_corruption":
|
624 |
+
base_rmse *= 1.5 # 데이터 손상시 증가
|
625 |
+
|
626 |
+
# 랜덤 변동
|
627 |
+
variation = np.random.normal(0, 2)
|
628 |
+
return round(max(10, base_rmse + variation), 1)
|
629 |
+
|
630 |
+
def calculate_final_accuracy():
|
631 |
+
"""최종 정확도 계산 (시뮬레이션)"""
|
632 |
+
base_accuracy = 89.2
|
633 |
+
|
634 |
+
# 문제 상황들의 영향
|
635 |
+
for issue in active_issues.values():
|
636 |
+
if issue["type"] == "extreme_weather":
|
637 |
+
base_accuracy -= 15
|
638 |
+
elif issue["type"] == "sensor_malfunction":
|
639 |
+
base_accuracy -= 8
|
640 |
+
elif issue["type"] == "data_corruption":
|
641 |
+
base_accuracy -= 12
|
642 |
+
|
643 |
+
return round(max(50, base_accuracy), 1)
|
644 |
+
|
645 |
+
# ============================================================================
|
646 |
+
# 간단한 데이터 처리기 (모듈이 없는 경우 대체)
|
647 |
+
# ============================================================================
|
648 |
+
|
649 |
+
class SimpleDataProcessor:
|
650 |
+
"""간단한 데이터 처리기"""
|
651 |
+
|
652 |
+
async def process_data(self, raw_data):
|
653 |
+
"""데이터 처리 시뮬레이션"""
|
654 |
+
return raw_data
|
655 |
+
|
656 |
+
async def save_to_database(self, processed_data):
|
657 |
+
"""데이터베이스 저장 시뮬레이션"""
|
658 |
+
return len(processed_data)
|
659 |
+
|
660 |
+
async def process_batch_data(stations_data: List[Dict], simulation_time: str):
|
661 |
+
"""배치 데이터 처리"""
|
662 |
+
processed_count = 0
|
663 |
+
|
664 |
+
for data_point in stations_data:
|
665 |
+
# 기본 검증
|
666 |
+
required_fields = ['station_id', 'date', 'air_pres', 'wind_dir', 'wind_speed', 'air_temp', 'tide_level']
|
667 |
+
|
668 |
+
if all(field in data_point for field in required_fields):
|
669 |
+
# 천문조 계산 및 잔차 추가 (시뮬레이션)
|
670 |
+
astronomical_tide = calculate_astronomical_tide(
|
671 |
+
data_point['station_id'],
|
672 |
+
data_point['date']
|
673 |
+
)
|
674 |
+
data_point['astronomical_tide'] = astronomical_tide
|
675 |
+
data_point['residual'] = data_point['tide_level'] - astronomical_tide
|
676 |
+
|
677 |
+
processed_count += 1
|
678 |
+
|
679 |
+
return processed_count
|
680 |
+
|
681 |
+
def calculate_astronomical_tide(station_id: str, timestamp: str):
|
682 |
+
"""천문조 계산 (시뮬레이션)"""
|
683 |
+
from datetime import datetime
|
684 |
+
|
685 |
+
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00') if timestamp.endswith('Z') else timestamp)
|
686 |
+
hours = dt.hour + dt.minute / 60
|
687 |
+
|
688 |
+
# 간단한 조석 계산
|
689 |
+
M2_period = 12.42
|
690 |
+
tide = 300 + 200 * np.sin(2 * np.pi * hours / M2_period)
|
691 |
+
|
692 |
+
return round(tide, 1)
|
693 |
+
|
694 |
+
async def simulate_data_collection():
|
695 |
+
"""데이터 수집 시뮬레이션"""
|
696 |
+
stations = demo_session["active_stations"] if demo_session["active"] else ["DT_0001", "DT_0002"]
|
697 |
+
|
698 |
+
collected_data = []
|
699 |
+
for station_id in stations:
|
700 |
+
data_point = {
|
701 |
+
"station_id": station_id,
|
702 |
+
"observed_at": datetime.now().isoformat(),
|
703 |
+
"tide_level": 300 + np.random.normal(0, 50),
|
704 |
+
"air_temp": 25 + np.random.normal(0, 3),
|
705 |
+
"air_pres": 1013 + np.random.normal(0, 5),
|
706 |
+
"wind_speed": max(0, np.random.normal(3, 2)),
|
707 |
+
"wind_dir": np.random.uniform(0, 360)
|
708 |
+
}
|
709 |
+
collected_data.append(data_point)
|
710 |
+
|
711 |
+
return collected_data
|
712 |
+
|
713 |
+
async def simulate_prediction_update():
|
714 |
+
"""예측 업데이트 시뮬레이션"""
|
715 |
+
stations = demo_session["active_stations"] if demo_session["active"] else ["DT_0001", "DT_0002"]
|
716 |
+
|
717 |
+
return {
|
718 |
+
"updated_count": len(stations),
|
719 |
+
"stations": stations
|
720 |
+
}
|
721 |
+
|
722 |
+
async def update_demo_predictions(speed_multiplier: int):
|
723 |
+
"""시연용 예측 업데이트"""
|
724 |
+
stations = demo_session["active_stations"]
|
725 |
+
|
726 |
+
# 성능 히스토리 업데이트
|
727 |
+
current_rmse = get_current_rmse()
|
728 |
+
demo_session["performance_history"].append({
|
729 |
+
"timestamp": datetime.now().isoformat(),
|
730 |
+
"rmse": current_rmse,
|
731 |
+
"accuracy": calculate_final_accuracy()
|
732 |
+
})
|
733 |
+
|
734 |
+
return {
|
735 |
+
"updated_count": len(stations),
|
736 |
+
"stations": stations,
|
737 |
+
"current_rmse": current_rmse
|
738 |
+
}
|
739 |
+
|
740 |
+
async def emergency_recovery():
|
741 |
+
"""응급 복구"""
|
742 |
+
global active_issues
|
743 |
+
|
744 |
+
# 모든 문제 해제
|
745 |
+
cleared_count = len(active_issues)
|
746 |
+
active_issues.clear()
|
747 |
+
demo_session["current_issues"].clear()
|
748 |
+
|
749 |
+
return {
|
750 |
+
"success": True,
|
751 |
+
"action": "emergency_recovery",
|
752 |
+
"cleared_issues": cleared_count,
|
753 |
+
"timestamp": datetime.now().isoformat()
|
754 |
+
}
|
performance_api.py
ADDED
@@ -0,0 +1,651 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Real-time Performance Evaluation API
|
3 |
+
실시간 예측 성능 평가 및 모니터링 API
|
4 |
+
"""
|
5 |
+
|
6 |
+
from fastapi import FastAPI, HTTPException, Header, Query
|
7 |
+
from datetime import datetime, timedelta
|
8 |
+
import numpy as np
|
9 |
+
import pandas as pd
|
10 |
+
from typing import Optional, List, Dict, Any
|
11 |
+
import logging
|
12 |
+
from config import INTERNAL_API_KEY
|
13 |
+
|
14 |
+
logger = logging.getLogger(__name__)
|
15 |
+
|
16 |
+
# 성능 데이터 저장소 (실제로는 데이터베이스 사용)
|
17 |
+
performance_cache = {
|
18 |
+
"realtime_metrics": {},
|
19 |
+
"historical_data": [],
|
20 |
+
"station_performance": {},
|
21 |
+
"alert_thresholds": {
|
22 |
+
"rmse_warning": 30.0,
|
23 |
+
"rmse_critical": 50.0,
|
24 |
+
"accuracy_warning": 80.0,
|
25 |
+
"accuracy_critical": 70.0
|
26 |
+
}
|
27 |
+
}
|
28 |
+
|
29 |
+
def verify_internal_api_key(authorization: str = Header(None)):
|
30 |
+
"""내부 API 키 검증"""
|
31 |
+
if authorization and authorization.startswith("Bearer "):
|
32 |
+
return authorization == f"Bearer {INTERNAL_API_KEY}"
|
33 |
+
return False
|
34 |
+
|
35 |
+
def register_performance_routes(app: FastAPI):
|
36 |
+
"""성능 평가 API 라우트 등록"""
|
37 |
+
|
38 |
+
@app.get("/api/performance/realtime", tags=["Performance"])
|
39 |
+
async def get_realtime_performance(
|
40 |
+
station_id: Optional[str] = Query(None, description="특정 관측소 성능 (전체: None)"),
|
41 |
+
authorization: str = Header(None)
|
42 |
+
):
|
43 |
+
"""실시간 예측 성능 지표 조회"""
|
44 |
+
|
45 |
+
# 내부 API는 인증 필요, 외부는 읽기 전용
|
46 |
+
is_internal = verify_internal_api_key(authorization)
|
47 |
+
|
48 |
+
try:
|
49 |
+
current_time = datetime.now()
|
50 |
+
|
51 |
+
if station_id:
|
52 |
+
# 특정 관측소 성능
|
53 |
+
station_metrics = await get_station_performance(station_id)
|
54 |
+
return {
|
55 |
+
"timestamp": current_time.isoformat(),
|
56 |
+
"station_id": station_id,
|
57 |
+
**station_metrics,
|
58 |
+
"data_source": "realtime"
|
59 |
+
}
|
60 |
+
else:
|
61 |
+
# 전체 시스템 성능
|
62 |
+
overall_metrics = await get_overall_performance()
|
63 |
+
|
64 |
+
response_data = {
|
65 |
+
"timestamp": current_time.isoformat(),
|
66 |
+
"rmse": overall_metrics["rmse"],
|
67 |
+
"mae": overall_metrics["mae"],
|
68 |
+
"accuracy": overall_metrics["accuracy"],
|
69 |
+
"prediction_count": overall_metrics["prediction_count"],
|
70 |
+
"active_stations": overall_metrics["active_stations"],
|
71 |
+
"data_quality_score": overall_metrics["data_quality_score"],
|
72 |
+
"status": overall_metrics["status"]
|
73 |
+
}
|
74 |
+
|
75 |
+
# 내부 요청시 추가 정보 제공
|
76 |
+
if is_internal:
|
77 |
+
response_data.update({
|
78 |
+
"detailed_metrics": overall_metrics.get("detailed_metrics", {}),
|
79 |
+
"station_breakdown": overall_metrics.get("station_breakdown", {}),
|
80 |
+
"recent_alerts": overall_metrics.get("recent_alerts", [])
|
81 |
+
})
|
82 |
+
|
83 |
+
return response_data
|
84 |
+
|
85 |
+
except Exception as e:
|
86 |
+
logger.error(f"Realtime performance query failed: {str(e)}")
|
87 |
+
raise HTTPException(status_code=500, detail=str(e))
|
88 |
+
|
89 |
+
@app.get("/api/performance/historical", tags=["Performance"])
|
90 |
+
async def get_historical_performance(
|
91 |
+
hours: int = Query(24, description="조회할 시간 범위 (시간)"),
|
92 |
+
station_id: Optional[str] = Query(None, description="특정 관측소"),
|
93 |
+
metric: str = Query("rmse", description="성능 지표 (rmse/mae/accuracy)"),
|
94 |
+
authorization: str = Header(None)
|
95 |
+
):
|
96 |
+
"""성능 히스토리 조회"""
|
97 |
+
|
98 |
+
is_internal = verify_internal_api_key(authorization)
|
99 |
+
|
100 |
+
try:
|
101 |
+
# 시간 범위 검증
|
102 |
+
if hours > 168: # 최대 1주일
|
103 |
+
hours = 168
|
104 |
+
|
105 |
+
end_time = datetime.now()
|
106 |
+
start_time = end_time - timedelta(hours=hours)
|
107 |
+
|
108 |
+
historical_data = await get_performance_history(
|
109 |
+
start_time, end_time, station_id, metric
|
110 |
+
)
|
111 |
+
|
112 |
+
# 통계 계산
|
113 |
+
if historical_data:
|
114 |
+
values = [item[metric] for item in historical_data if item.get(metric) is not None]
|
115 |
+
|
116 |
+
if values:
|
117 |
+
statistics = {
|
118 |
+
"mean": round(np.mean(values), 2),
|
119 |
+
"std": round(np.std(values), 2),
|
120 |
+
"min": round(min(values), 2),
|
121 |
+
"max": round(max(values), 2),
|
122 |
+
"trend": calculate_trend(values)
|
123 |
+
}
|
124 |
+
else:
|
125 |
+
statistics = None
|
126 |
+
else:
|
127 |
+
statistics = None
|
128 |
+
|
129 |
+
return {
|
130 |
+
"timestamp": end_time.isoformat(),
|
131 |
+
"query_range": {
|
132 |
+
"start_time": start_time.isoformat(),
|
133 |
+
"end_time": end_time.isoformat(),
|
134 |
+
"hours": hours
|
135 |
+
},
|
136 |
+
"station_id": station_id,
|
137 |
+
"metric": metric,
|
138 |
+
"data_points": len(historical_data),
|
139 |
+
"data": historical_data[-100:] if not is_internal else historical_data, # 외부는 최근 100개만
|
140 |
+
"statistics": statistics
|
141 |
+
}
|
142 |
+
|
143 |
+
except Exception as e:
|
144 |
+
logger.error(f"Historical performance query failed: {str(e)}")
|
145 |
+
raise HTTPException(status_code=500, detail=str(e))
|
146 |
+
|
147 |
+
@app.get("/api/performance/comparison", tags=["Performance"])
|
148 |
+
async def compare_station_performance(
|
149 |
+
station_ids: str = Query(..., description="비교할 관측소들 (쉼표 구분)"),
|
150 |
+
metric: str = Query("rmse", description="비교할 성능 지표"),
|
151 |
+
period: str = Query("24h", description="비교 기간 (1h/6h/24h/7d)"),
|
152 |
+
authorization: str = Header(None)
|
153 |
+
):
|
154 |
+
"""관측소별 성능 비교"""
|
155 |
+
|
156 |
+
is_internal = verify_internal_api_key(authorization)
|
157 |
+
|
158 |
+
try:
|
159 |
+
# 관측소 목록 파싱
|
160 |
+
stations = [s.strip() for s in station_ids.split(",")]
|
161 |
+
|
162 |
+
if len(stations) > 10: # 최대 10개 관측소
|
163 |
+
stations = stations[:10]
|
164 |
+
|
165 |
+
# 기간 파싱
|
166 |
+
period_hours = {
|
167 |
+
"1h": 1, "6h": 6, "24h": 24, "7d": 168
|
168 |
+
}.get(period, 24)
|
169 |
+
|
170 |
+
comparison_data = {}
|
171 |
+
|
172 |
+
for station_id in stations:
|
173 |
+
station_metrics = await get_station_performance_summary(
|
174 |
+
station_id, period_hours, metric
|
175 |
+
)
|
176 |
+
comparison_data[station_id] = station_metrics
|
177 |
+
|
178 |
+
# 순위 계산
|
179 |
+
if metric in ["rmse", "mae"]:
|
180 |
+
# 낮을수록 좋음
|
181 |
+
sorted_stations = sorted(
|
182 |
+
comparison_data.items(),
|
183 |
+
key=lambda x: x[1].get("current_value", float('inf'))
|
184 |
+
)
|
185 |
+
else: # accuracy 등
|
186 |
+
# 높을수록 좋음
|
187 |
+
sorted_stations = sorted(
|
188 |
+
comparison_data.items(),
|
189 |
+
key=lambda x: x[1].get("current_value", 0),
|
190 |
+
reverse=True
|
191 |
+
)
|
192 |
+
|
193 |
+
return {
|
194 |
+
"timestamp": datetime.now().isoformat(),
|
195 |
+
"metric": metric,
|
196 |
+
"period": period,
|
197 |
+
"stations_count": len(stations),
|
198 |
+
"comparison": comparison_data,
|
199 |
+
"ranking": [{"rank": i+1, "station_id": station, "value": data["current_value"]}
|
200 |
+
for i, (station, data) in enumerate(sorted_stations)],
|
201 |
+
"best_performer": sorted_stations[0][0] if sorted_stations else None,
|
202 |
+
"summary": {
|
203 |
+
"best_value": sorted_stations[0][1]["current_value"] if sorted_stations else None,
|
204 |
+
"worst_value": sorted_stations[-1][1]["current_value"] if sorted_stations else None,
|
205 |
+
"average_value": round(np.mean([data["current_value"] for data in comparison_data.values()]), 2)
|
206 |
+
}
|
207 |
+
}
|
208 |
+
|
209 |
+
except Exception as e:
|
210 |
+
logger.error(f"Station comparison failed: {str(e)}")
|
211 |
+
raise HTTPException(status_code=500, detail=str(e))
|
212 |
+
|
213 |
+
@app.get("/api/performance/alerts", tags=["Performance"])
|
214 |
+
async def get_performance_alerts(
|
215 |
+
active_only: bool = Query(True, description="활성 알림만 조회"),
|
216 |
+
hours: int = Query(24, description="조회할 시간 범위"),
|
217 |
+
authorization: str = Header(None)
|
218 |
+
):
|
219 |
+
"""성능 알림 조회"""
|
220 |
+
|
221 |
+
verify_internal_api_key(authorization) # 내부 API만 접근 가능
|
222 |
+
|
223 |
+
try:
|
224 |
+
alerts = await get_current_alerts(active_only, hours)
|
225 |
+
|
226 |
+
# 알림 분류
|
227 |
+
critical_alerts = [a for a in alerts if a["severity"] == "critical"]
|
228 |
+
warning_alerts = [a for a in alerts if a["severity"] == "warning"]
|
229 |
+
|
230 |
+
return {
|
231 |
+
"timestamp": datetime.now().isoformat(),
|
232 |
+
"query_range_hours": hours,
|
233 |
+
"active_only": active_only,
|
234 |
+
"total_alerts": len(alerts),
|
235 |
+
"critical_count": len(critical_alerts),
|
236 |
+
"warning_count": len(warning_alerts),
|
237 |
+
"alerts": alerts,
|
238 |
+
"summary": {
|
239 |
+
"system_status": "critical" if critical_alerts else ("warning" if warning_alerts else "normal"),
|
240 |
+
"requires_attention": len(critical_alerts) > 0,
|
241 |
+
"most_recent": alerts[0] if alerts else None
|
242 |
+
}
|
243 |
+
}
|
244 |
+
|
245 |
+
except Exception as e:
|
246 |
+
logger.error(f"Performance alerts query failed: {str(e)}")
|
247 |
+
raise HTTPException(status_code=500, detail=str(e))
|
248 |
+
|
249 |
+
@app.post("/api/performance/update", tags=["Performance"])
|
250 |
+
async def update_performance_metrics(
|
251 |
+
request_data: Dict[str, Any],
|
252 |
+
authorization: str = Header(None)
|
253 |
+
):
|
254 |
+
"""성능 지표 업데이트 (내부 사용)"""
|
255 |
+
|
256 |
+
verify_internal_api_key(authorization) # 내부 API만 접근 가능
|
257 |
+
|
258 |
+
try:
|
259 |
+
station_id = request_data.get("station_id")
|
260 |
+
predictions = request_data.get("predictions", [])
|
261 |
+
actual_values = request_data.get("actual_values", [])
|
262 |
+
timestamp = request_data.get("timestamp", datetime.now().isoformat())
|
263 |
+
|
264 |
+
if not predictions or not actual_values:
|
265 |
+
raise HTTPException(status_code=400, detail="Predictions and actual values required")
|
266 |
+
|
267 |
+
if len(predictions) != len(actual_values):
|
268 |
+
raise HTTPException(status_code=400, detail="Predictions and actual values must have same length")
|
269 |
+
|
270 |
+
# 성능 지표 계산
|
271 |
+
metrics = calculate_performance_metrics(predictions, actual_values)
|
272 |
+
|
273 |
+
# 성능 데이터 저장
|
274 |
+
performance_record = {
|
275 |
+
"station_id": station_id,
|
276 |
+
"timestamp": timestamp,
|
277 |
+
"predictions": predictions,
|
278 |
+
"actual_values": actual_values,
|
279 |
+
"metrics": metrics,
|
280 |
+
"data_points": len(predictions)
|
281 |
+
}
|
282 |
+
|
283 |
+
await save_performance_record(performance_record)
|
284 |
+
|
285 |
+
# 실시간 캐시 업데이트
|
286 |
+
performance_cache["realtime_metrics"][station_id] = metrics
|
287 |
+
performance_cache["historical_data"].append(performance_record)
|
288 |
+
|
289 |
+
# 오래된 데이터 정리 (메모리 관리)
|
290 |
+
if len(performance_cache["historical_data"]) > 1000:
|
291 |
+
performance_cache["historical_data"] = performance_cache["historical_data"][-500:]
|
292 |
+
|
293 |
+
# 알림 체크
|
294 |
+
alerts = check_performance_alerts(station_id, metrics)
|
295 |
+
|
296 |
+
return {
|
297 |
+
"success": True,
|
298 |
+
"timestamp": datetime.now().isoformat(),
|
299 |
+
"station_id": station_id,
|
300 |
+
"metrics": metrics,
|
301 |
+
"data_points": len(predictions),
|
302 |
+
"alerts_triggered": len(alerts),
|
303 |
+
"alerts": alerts
|
304 |
+
}
|
305 |
+
|
306 |
+
except Exception as e:
|
307 |
+
logger.error(f"Performance update failed: {str(e)}")
|
308 |
+
raise HTTPException(status_code=500, detail=str(e))
|
309 |
+
|
310 |
+
# ============================================================================
|
311 |
+
# 성능 계산 및 분석 함수들
|
312 |
+
# ============================================================================
|
313 |
+
|
314 |
+
async def get_overall_performance():
|
315 |
+
"""전체 시스템 성능 조회"""
|
316 |
+
|
317 |
+
# 시연 모드에서는 시뮬레이션 데이터 사용
|
318 |
+
from internal_api import demo_session, active_issues
|
319 |
+
|
320 |
+
base_rmse = 18.5
|
321 |
+
base_mae = 14.2
|
322 |
+
base_accuracy = 89.2
|
323 |
+
|
324 |
+
# 활성 문제들의 영향 계산
|
325 |
+
rmse_multiplier = 1.0
|
326 |
+
accuracy_penalty = 0
|
327 |
+
|
328 |
+
for issue in active_issues.values():
|
329 |
+
if issue["type"] == "extreme_weather":
|
330 |
+
rmse_multiplier *= 1.8
|
331 |
+
accuracy_penalty += 15
|
332 |
+
elif issue["type"] == "sensor_malfunction":
|
333 |
+
rmse_multiplier *= 1.3
|
334 |
+
accuracy_penalty += 8
|
335 |
+
elif issue["type"] == "data_corruption":
|
336 |
+
rmse_multiplier *= 1.5
|
337 |
+
accuracy_penalty += 12
|
338 |
+
elif issue["type"] == "network_failure":
|
339 |
+
rmse_multiplier *= 1.2
|
340 |
+
accuracy_penalty += 5
|
341 |
+
|
342 |
+
# 랜덤 변동 추가
|
343 |
+
rmse_variation = np.random.normal(0, 2)
|
344 |
+
accuracy_variation = np.random.normal(0, 3)
|
345 |
+
|
346 |
+
current_rmse = max(10, base_rmse * rmse_multiplier + rmse_variation)
|
347 |
+
current_mae = max(8, base_mae * rmse_multiplier * 0.8 + rmse_variation * 0.7)
|
348 |
+
current_accuracy = max(50, min(98, base_accuracy - accuracy_penalty + accuracy_variation))
|
349 |
+
|
350 |
+
# 데이터 품질 점수
|
351 |
+
data_quality = 95 - len(active_issues) * 10
|
352 |
+
|
353 |
+
# 상태 결정
|
354 |
+
if current_rmse > 40 or current_accuracy < 75:
|
355 |
+
status = "critical"
|
356 |
+
elif current_rmse > 25 or current_accuracy < 85:
|
357 |
+
status = "warning"
|
358 |
+
else:
|
359 |
+
status = "good"
|
360 |
+
|
361 |
+
return {
|
362 |
+
"rmse": round(current_rmse, 1),
|
363 |
+
"mae": round(current_mae, 1),
|
364 |
+
"accuracy": round(current_accuracy, 1),
|
365 |
+
"prediction_count": demo_session.get("total_processed", 0),
|
366 |
+
"active_stations": len(demo_session.get("active_stations", [])),
|
367 |
+
"data_quality_score": round(data_quality, 1),
|
368 |
+
"status": status,
|
369 |
+
"detailed_metrics": {
|
370 |
+
"rmse_trend": "increasing" if len(active_issues) > 0 else "stable",
|
371 |
+
"prediction_latency_ms": np.random.randint(50, 200),
|
372 |
+
"data_freshness_minutes": np.random.randint(1, 10)
|
373 |
+
},
|
374 |
+
"station_breakdown": await get_all_stations_summary(),
|
375 |
+
"recent_alerts": await get_recent_alerts_summary()
|
376 |
+
}
|
377 |
+
|
378 |
+
async def get_station_performance(station_id: str):
|
379 |
+
"""특정 관측소 성능 조회"""
|
380 |
+
|
381 |
+
# 기본 성능 + 관측소별 변동
|
382 |
+
station_seed = hash(station_id) % 1000
|
383 |
+
np.random.seed(station_seed)
|
384 |
+
|
385 |
+
base_rmse = 18.5 + np.random.normal(0, 3)
|
386 |
+
base_mae = 14.2 + np.random.normal(0, 2)
|
387 |
+
base_accuracy = 89.2 + np.random.normal(0, 5)
|
388 |
+
|
389 |
+
# 활성 문제 영향
|
390 |
+
from internal_api import active_issues
|
391 |
+
|
392 |
+
for issue in active_issues.values():
|
393 |
+
if issue.get("station_id") == station_id or not issue.get("station_id"):
|
394 |
+
if issue["type"] == "sensor_malfunction" and issue.get("station_id") == station_id:
|
395 |
+
base_rmse *= 2.0
|
396 |
+
base_accuracy -= 25
|
397 |
+
elif issue["type"] == "extreme_weather":
|
398 |
+
base_rmse *= 1.6
|
399 |
+
base_accuracy -= 12
|
400 |
+
|
401 |
+
return {
|
402 |
+
"rmse": round(max(10, base_rmse), 1),
|
403 |
+
"mae": round(max(8, base_mae), 1),
|
404 |
+
"accuracy": round(max(50, min(98, base_accuracy)), 1),
|
405 |
+
"data_points": np.random.randint(50, 200),
|
406 |
+
"last_prediction": (datetime.now() - timedelta(minutes=np.random.randint(1, 15))).isoformat(),
|
407 |
+
"status": "active" if station_id in demo_session.get("active_stations", []) else "inactive"
|
408 |
+
}
|
409 |
+
|
410 |
+
async def get_performance_history(start_time, end_time, station_id, metric):
|
411 |
+
"""성능 히스토리 생성 (시뮬레이션)"""
|
412 |
+
|
413 |
+
data_points = []
|
414 |
+
current_time = start_time
|
415 |
+
|
416 |
+
# 1시간 간격으로 데이터 생성
|
417 |
+
while current_time <= end_time:
|
418 |
+
|
419 |
+
# 시간대별 기본값
|
420 |
+
hour = current_time.hour
|
421 |
+
base_value = {
|
422 |
+
"rmse": 18.5 + 5 * np.sin(2 * np.pi * hour / 24),
|
423 |
+
"mae": 14.2 + 3 * np.sin(2 * np.pi * hour / 24),
|
424 |
+
"accuracy": 89.2 - 8 * np.sin(2 * np.pi * hour / 24)
|
425 |
+
}
|
426 |
+
|
427 |
+
# 랜덤 변동
|
428 |
+
variation = np.random.normal(0, 2)
|
429 |
+
value = base_value[metric] + variation
|
430 |
+
|
431 |
+
# 관측소별 조정
|
432 |
+
if station_id:
|
433 |
+
station_offset = hash(station_id) % 10 - 5
|
434 |
+
value += station_offset
|
435 |
+
|
436 |
+
data_points.append({
|
437 |
+
"timestamp": current_time.isoformat(),
|
438 |
+
"station_id": station_id,
|
439 |
+
metric: round(value, 1),
|
440 |
+
"data_points": np.random.randint(10, 50)
|
441 |
+
})
|
442 |
+
|
443 |
+
current_time += timedelta(hours=1)
|
444 |
+
|
445 |
+
return data_points
|
446 |
+
|
447 |
+
async def get_station_performance_summary(station_id, period_hours, metric):
|
448 |
+
"""관측소 성능 요약"""
|
449 |
+
|
450 |
+
# 현재 성능
|
451 |
+
current_perf = await get_station_performance(station_id)
|
452 |
+
current_value = current_perf[metric]
|
453 |
+
|
454 |
+
# 기간별 평균 (시뮬레이션)
|
455 |
+
period_variation = np.random.normal(0, 5)
|
456 |
+
period_average = current_value + period_variation
|
457 |
+
|
458 |
+
# 트렌드 계산
|
459 |
+
trend_change = np.random.uniform(-10, 10)
|
460 |
+
trend = "improving" if trend_change < -2 else ("degrading" if trend_change > 2 else "stable")
|
461 |
+
|
462 |
+
return {
|
463 |
+
"station_id": station_id,
|
464 |
+
"current_value": current_value,
|
465 |
+
"period_average": round(period_average, 1),
|
466 |
+
"trend": trend,
|
467 |
+
"trend_change": round(trend_change, 1),
|
468 |
+
"data_points": np.random.randint(20, 100),
|
469 |
+
"last_update": datetime.now().isoformat()
|
470 |
+
}
|
471 |
+
|
472 |
+
def calculate_performance_metrics(predictions, actual_values):
|
473 |
+
"""성능 지표 계산"""
|
474 |
+
|
475 |
+
predictions = np.array(predictions)
|
476 |
+
actual_values = np.array(actual_values)
|
477 |
+
|
478 |
+
# RMSE
|
479 |
+
rmse = np.sqrt(np.mean((predictions - actual_values) ** 2))
|
480 |
+
|
481 |
+
# MAE
|
482 |
+
mae = np.mean(np.abs(predictions - actual_values))
|
483 |
+
|
484 |
+
# 정확도 (95% 신뢰구간 내 예측 비율)
|
485 |
+
errors = np.abs(predictions - actual_values)
|
486 |
+
threshold = np.percentile(errors, 95)
|
487 |
+
accuracy = np.mean(errors <= threshold) * 100
|
488 |
+
|
489 |
+
# 추가 지표
|
490 |
+
mape = np.mean(np.abs((actual_values - predictions) / actual_values)) * 100
|
491 |
+
r2 = 1 - (np.sum((actual_values - predictions) ** 2) / np.sum((actual_values - np.mean(actual_values)) ** 2))
|
492 |
+
|
493 |
+
return {
|
494 |
+
"rmse": round(rmse, 2),
|
495 |
+
"mae": round(mae, 2),
|
496 |
+
"accuracy": round(accuracy, 1),
|
497 |
+
"mape": round(mape, 2),
|
498 |
+
"r2_score": round(r2, 3),
|
499 |
+
"data_points": len(predictions)
|
500 |
+
}
|
501 |
+
|
502 |
+
def calculate_trend(values):
|
503 |
+
"""트렌드 계산"""
|
504 |
+
if len(values) < 3:
|
505 |
+
return "insufficient_data"
|
506 |
+
|
507 |
+
# 선형 회귀로 기울기 계산
|
508 |
+
x = np.arange(len(values))
|
509 |
+
slope = np.polyfit(x, values, 1)[0]
|
510 |
+
|
511 |
+
if slope > 0.5:
|
512 |
+
return "increasing"
|
513 |
+
elif slope < -0.5:
|
514 |
+
return "decreasing"
|
515 |
+
else:
|
516 |
+
return "stable"
|
517 |
+
|
518 |
+
def check_performance_alerts(station_id, metrics):
|
519 |
+
"""성능 알림 체크"""
|
520 |
+
|
521 |
+
alerts = []
|
522 |
+
thresholds = performance_cache["alert_thresholds"]
|
523 |
+
|
524 |
+
# RMSE 체크
|
525 |
+
rmse = metrics["rmse"]
|
526 |
+
if rmse > thresholds["rmse_critical"]:
|
527 |
+
alerts.append({
|
528 |
+
"type": "rmse_critical",
|
529 |
+
"severity": "critical",
|
530 |
+
"message": f"Critical RMSE level: {rmse} cm",
|
531 |
+
"station_id": station_id,
|
532 |
+
"threshold": thresholds["rmse_critical"],
|
533 |
+
"current_value": rmse,
|
534 |
+
"timestamp": datetime.now().isoformat()
|
535 |
+
})
|
536 |
+
elif rmse > thresholds["rmse_warning"]:
|
537 |
+
alerts.append({
|
538 |
+
"type": "rmse_warning",
|
539 |
+
"severity": "warning",
|
540 |
+
"message": f"High RMSE level: {rmse} cm",
|
541 |
+
"station_id": station_id,
|
542 |
+
"threshold": thresholds["rmse_warning"],
|
543 |
+
"current_value": rmse,
|
544 |
+
"timestamp": datetime.now().isoformat()
|
545 |
+
})
|
546 |
+
|
547 |
+
# 정확도 체크
|
548 |
+
accuracy = metrics["accuracy"]
|
549 |
+
if accuracy < thresholds["accuracy_critical"]:
|
550 |
+
alerts.append({
|
551 |
+
"type": "accuracy_critical",
|
552 |
+
"severity": "critical",
|
553 |
+
"message": f"Critical accuracy drop: {accuracy}%",
|
554 |
+
"station_id": station_id,
|
555 |
+
"threshold": thresholds["accuracy_critical"],
|
556 |
+
"current_value": accuracy,
|
557 |
+
"timestamp": datetime.now().isoformat()
|
558 |
+
})
|
559 |
+
elif accuracy < thresholds["accuracy_warning"]:
|
560 |
+
alerts.append({
|
561 |
+
"type": "accuracy_warning",
|
562 |
+
"severity": "warning",
|
563 |
+
"message": f"Low accuracy: {accuracy}%",
|
564 |
+
"station_id": station_id,
|
565 |
+
"threshold": thresholds["accuracy_warning"],
|
566 |
+
"current_value": accuracy,
|
567 |
+
"timestamp": datetime.now().isoformat()
|
568 |
+
})
|
569 |
+
|
570 |
+
return alerts
|
571 |
+
|
572 |
+
async def get_current_alerts(active_only, hours):
|
573 |
+
"""현재 알림 조회 (시뮬레이션)"""
|
574 |
+
|
575 |
+
from internal_api import active_issues, demo_session
|
576 |
+
|
577 |
+
alerts = []
|
578 |
+
|
579 |
+
# 활성 문제들을 알림으로 변환
|
580 |
+
for issue_id, issue in active_issues.items():
|
581 |
+
alerts.append({
|
582 |
+
"id": issue_id,
|
583 |
+
"type": f"{issue['type']}_alert",
|
584 |
+
"severity": "critical" if issue["type"] in ["extreme_weather", "sensor_malfunction"] else "warning",
|
585 |
+
"message": f"{issue['type'].replace('_', ' ').title()} detected",
|
586 |
+
"station_id": issue.get("station_id"),
|
587 |
+
"timestamp": issue["start_time"].isoformat(),
|
588 |
+
"duration_minutes": (datetime.now() - issue["start_time"]).total_seconds() / 60,
|
589 |
+
"active": True
|
590 |
+
})
|
591 |
+
|
592 |
+
# 추가 성능 알림 시뮬레이션
|
593 |
+
if demo_session.get("active"):
|
594 |
+
current_perf = await get_overall_performance()
|
595 |
+
|
596 |
+
if current_perf["rmse"] > 30:
|
597 |
+
alerts.append({
|
598 |
+
"id": "perf_rmse_high",
|
599 |
+
"type": "performance_degradation",
|
600 |
+
"severity": "warning",
|
601 |
+
"message": f"High system RMSE: {current_perf['rmse']} cm",
|
602 |
+
"timestamp": (datetime.now() - timedelta(minutes=5)).isoformat(),
|
603 |
+
"duration_minutes": 5,
|
604 |
+
"active": True
|
605 |
+
})
|
606 |
+
|
607 |
+
# 정렬 (최신순)
|
608 |
+
alerts.sort(key=lambda x: x["timestamp"], reverse=True)
|
609 |
+
|
610 |
+
return alerts
|
611 |
+
|
612 |
+
async def get_all_stations_summary():
|
613 |
+
"""모든 관측소 성능 요약"""
|
614 |
+
|
615 |
+
from internal_api import demo_session
|
616 |
+
stations = demo_session.get("active_stations", ["DT_0001", "DT_0002"])
|
617 |
+
|
618 |
+
summary = {}
|
619 |
+
for station_id in stations:
|
620 |
+
perf = await get_station_performance(station_id)
|
621 |
+
summary[station_id] = {
|
622 |
+
"rmse": perf["rmse"],
|
623 |
+
"accuracy": perf["accuracy"],
|
624 |
+
"status": perf["status"]
|
625 |
+
}
|
626 |
+
|
627 |
+
return summary
|
628 |
+
|
629 |
+
async def get_recent_alerts_summary():
|
630 |
+
"""최근 알림 요약"""
|
631 |
+
|
632 |
+
alerts = await get_current_alerts(True, 1) # 최근 1시간
|
633 |
+
|
634 |
+
return {
|
635 |
+
"total_count": len(alerts),
|
636 |
+
"critical_count": sum(1 for a in alerts if a["severity"] == "critical"),
|
637 |
+
"most_recent": alerts[0] if alerts else None
|
638 |
+
}
|
639 |
+
|
640 |
+
async def save_performance_record(performance_record):
|
641 |
+
"""성능 기록 저장 (실제로는 데이터베이스 사용)"""
|
642 |
+
|
643 |
+
# 여기서는 메모리에만 저장 (시뮬레이션)
|
644 |
+
performance_cache["historical_data"].append(performance_record)
|
645 |
+
|
646 |
+
# 실제 구현에서는 Supabase나 다른 DB에 저장
|
647 |
+
# await supabase.table("performance_metrics").insert(performance_record)
|
648 |
+
|
649 |
+
pass
|
650 |
+
|
651 |
+
logger.info("Performance API module loaded successfully")
|
test.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app_simple.py - Gradio만 사용
|
2 |
+
import gradio as gr
|
3 |
+
import warnings
|
4 |
+
warnings.filterwarnings('ignore')
|
5 |
+
|
6 |
+
from prediction import single_prediction
|
7 |
+
from chatbot import process_chatbot_query_with_llm
|
8 |
+
from ui import create_ui
|
9 |
+
from api_utils import (
|
10 |
+
api_get_tide_level,
|
11 |
+
api_get_tide_series,
|
12 |
+
api_get_extremes_info,
|
13 |
+
api_check_tide_alert,
|
14 |
+
api_compare_stations,
|
15 |
+
api_health_check
|
16 |
+
)
|
17 |
+
|
18 |
+
# API 핸들러
|
19 |
+
api_handlers = {
|
20 |
+
"health": api_health_check,
|
21 |
+
"tide_level": api_get_tide_level,
|
22 |
+
"tide_series": api_get_tide_series,
|
23 |
+
"extremes": api_get_extremes_info,
|
24 |
+
"alert": api_check_tide_alert,
|
25 |
+
"compare": api_compare_stations
|
26 |
+
}
|
27 |
+
|
28 |
+
# Gradio UI만 실행
|
29 |
+
demo = create_ui(
|
30 |
+
prediction_handler=single_prediction,
|
31 |
+
chatbot_handler=process_chatbot_query_with_llm,
|
32 |
+
api_handlers=api_handlers
|
33 |
+
)
|
34 |
+
|
35 |
+
# FastAPI 없이 Gradio만 실행
|
36 |
+
if __name__ == "__main__":
|
37 |
+
demo.launch()
|