alwaysgood commited on
Commit
3f479c4
·
verified ·
1 Parent(s): dc5ed68

Update inference.py

Browse files
Files changed (1) hide show
  1. inference.py +15 -3
inference.py CHANGED
@@ -157,11 +157,18 @@ def evaluate_performance(args, model, scaler, device):
157
  return np.array(preds_unscaled), np.array(trues_unscaled)
158
 
159
 
160
- # --- 5. 메인 로직 ---
161
  if __name__ == '__main__':
 
162
  final_output = {}
 
 
 
 
 
 
163
  try:
164
  model, scaler, device = load_model_and_scaler(args)
 
165
  if args.predict_input_file:
166
  print("--- Running in Single Prediction Mode ---", file=sys.stderr)
167
  prediction = predict_future(args, model, scaler, device)
@@ -171,8 +178,6 @@ if __name__ == '__main__':
171
  print("--- Running in Rolling Evaluation Mode ---", file=sys.stderr)
172
  eval_preds, eval_trues = evaluate_performance(args, model, scaler, device)
173
 
174
- # ⭐️⭐️⭐️ 안전장치 추가 ⭐️⭐️⭐️
175
- # 예측 결과가 비어있는지 확인합니다.
176
  if eval_preds.size == 0 or eval_trues.size == 0:
177
  final_output = {"status": "error", "message": "Evaluation resulted in empty arrays. Check input data length."}
178
  else:
@@ -180,7 +185,14 @@ if __name__ == '__main__':
180
  final_output = {"status": "success", "mode": "rolling_evaluation", "mse": mse, "mae": mae}
181
  else:
182
  final_output = {"status": "error", "message": "No mode selected."}
 
183
  except Exception as e:
 
 
 
 
 
184
  final_output = {"status": "error", "message": str(e)}
185
 
 
186
  print(json.dumps(final_output, indent=2))
 
157
  return np.array(preds_unscaled), np.array(trues_unscaled)
158
 
159
 
 
160
  if __name__ == '__main__':
161
+
162
  final_output = {}
163
+ log_file_path = 'error_log.txt'
164
+
165
+ # 이전 로그 파일이 있다면 삭제
166
+ if os.path.exists(log_file_path):
167
+ os.remove(log_file_path)
168
+
169
  try:
170
  model, scaler, device = load_model_and_scaler(args)
171
+
172
  if args.predict_input_file:
173
  print("--- Running in Single Prediction Mode ---", file=sys.stderr)
174
  prediction = predict_future(args, model, scaler, device)
 
178
  print("--- Running in Rolling Evaluation Mode ---", file=sys.stderr)
179
  eval_preds, eval_trues = evaluate_performance(args, model, scaler, device)
180
 
 
 
181
  if eval_preds.size == 0 or eval_trues.size == 0:
182
  final_output = {"status": "error", "message": "Evaluation resulted in empty arrays. Check input data length."}
183
  else:
 
185
  final_output = {"status": "success", "mode": "rolling_evaluation", "mse": mse, "mae": mae}
186
  else:
187
  final_output = {"status": "error", "message": "No mode selected."}
188
+
189
  except Exception as e:
190
+ # ⭐️ 에러 발생 시, 상세 내용을 파일에 기록합니다.
191
+ error_message = traceback.format_exc()
192
+ with open(log_file_path, 'w') as f:
193
+ f.write(error_message)
194
+ # 최종 출력에도 에러 정보를 담습니다.
195
  final_output = {"status": "error", "message": str(e)}
196
 
197
+ # 메인 로직의 끝에서, 최종 결과를 JSON으로 출력합니다.
198
  print(json.dumps(final_output, indent=2))