alwaysgood commited on
Commit
30cf6c6
·
verified ·
1 Parent(s): 3f479c4

Update inference.py

Browse files
Files changed (1) hide show
  1. inference.py +7 -17
inference.py CHANGED
@@ -62,15 +62,14 @@ def load_model_and_scaler(args):
62
  def predict_future(args, model, scaler, device):
63
  df_input = pd.read_csv(args.predict_input_file)
64
  df_input['date'] = pd.to_datetime(df_input['date'])
65
-
66
  cols_to_scale = ['air_pres', 'wind_dir', 'wind_speed', 'air_temp', 'residual']
67
-
68
  raw_input = df_input[cols_to_scale].tail(args.seq_len).values
69
  input_scaled = scaler.transform(raw_input)
70
  batch_x = torch.from_numpy(input_scaled).float().unsqueeze(0).to(device)
71
 
72
  df_stamp_enc = df_input.tail(args.seq_len)[['date']].reset_index(drop=True)
73
- enc_mark = time_features(df_stamp_enc, timeenc=0, freq=args.freq)
 
74
  batch_x_mark = torch.from_numpy(enc_mark).float().unsqueeze(0).to(device)
75
 
76
  dec_inp_label = input_scaled[-args.label_len:]
@@ -81,7 +80,8 @@ def predict_future(args, model, scaler, device):
81
  last_date = df_stamp_enc['date'].iloc[-1]
82
  future_dates = pd.date_range(start=last_date, periods=args.pred_len + 1, freq='5T')[1:]
83
  df_stamp_dec = pd.DataFrame({'date': list(df_stamp_enc['date'].values[-args.label_len:]) + list(future_dates)})
84
- dec_mark = time_features(df_stamp_dec, timeenc=0, freq=args.freq)
 
85
  batch_y_mark = torch.from_numpy(dec_mark).float().unsqueeze(0).to(device)
86
 
87
  with torch.no_grad():
@@ -89,7 +89,6 @@ def predict_future(args, model, scaler, device):
89
 
90
  prediction_scaled = outputs.detach().cpu().numpy()[0]
91
 
92
- # ⭐️ 이 블록의 들여쓰기가 문제였습니다.
93
  if scaler.n_features_in_ > 1:
94
  padding = np.zeros((prediction_scaled.shape[0], scaler.n_features_in_ - args.c_out))
95
  prediction_padded = np.concatenate((padding, prediction_scaled), axis=1)
@@ -107,56 +106,47 @@ def evaluate_performance(args, model, scaler, device):
107
  raw_data = df_eval[cols_to_scale].values
108
  data_scaled = scaler.transform(raw_data)
109
 
110
- df_stamp = time_features(df_eval[['date']], timeenc=0, freq=args.freq)
 
111
 
112
  preds_unscaled = []
113
  trues_unscaled = []
114
 
115
  num_samples = len(data_scaled) - args.seq_len - args.pred_len + 1
116
  for i in tqdm(range(num_samples), desc="Evaluating", file=sys.stderr):
 
117
  s_begin = i
118
  s_end = s_begin + args.seq_len
119
-
120
  batch_x = data_scaled[s_begin:s_end]
121
  batch_x_mark = df_stamp[s_begin:s_end]
122
-
123
  true_begin = s_end
124
  true_end = true_begin + args.pred_len
125
  true_scaled = data_scaled[true_begin:true_end]
126
-
127
  dec_inp_label = batch_x[-args.label_len:]
128
  dec_inp_pred = np.zeros([args.pred_len, args.enc_in])
129
  batch_y = np.concatenate([dec_inp_label, dec_inp_pred], axis=0)
130
-
131
  dec_mark_label = df_stamp[s_end-args.label_len:s_end]
132
  dec_mark_pred = df_stamp[true_begin:true_end]
133
  batch_y_mark = np.concatenate([dec_mark_label, dec_mark_pred], axis=0)
134
-
135
  batch_x = torch.from_numpy(batch_x).float().unsqueeze(0).to(device)
136
  batch_x_mark = torch.from_numpy(batch_x_mark).float().unsqueeze(0).to(device)
137
  batch_y = torch.from_numpy(batch_y).float().unsqueeze(0).to(device)
138
  batch_y_mark = torch.from_numpy(batch_y_mark).float().unsqueeze(0).to(device)
139
-
140
  with torch.no_grad():
141
  outputs = model(batch_x, batch_x_mark, batch_y, batch_y_mark)
142
-
143
  pred_scaled = outputs.detach().cpu().numpy()[0]
144
-
145
  if scaler.n_features_in_ > 1:
146
  padding = np.zeros((pred_scaled.shape[0], scaler.n_features_in_ - args.c_out))
147
  pred_padded = np.concatenate((padding, pred_scaled), axis=1)
148
  pred_unscaled = scaler.inverse_transform(pred_padded)[:, -args.c_out:]
149
  else:
150
  pred_unscaled = scaler.inverse_transform(pred_scaled)
151
-
152
  true_unscaled = scaler.inverse_transform(true_scaled)[:, -args.c_out:]
153
-
154
  preds_unscaled.append(pred_unscaled)
155
  trues_unscaled.append(true_unscaled)
156
 
157
  return np.array(preds_unscaled), np.array(trues_unscaled)
158
 
159
-
160
  if __name__ == '__main__':
161
 
162
  final_output = {}
 
62
  def predict_future(args, model, scaler, device):
63
  df_input = pd.read_csv(args.predict_input_file)
64
  df_input['date'] = pd.to_datetime(df_input['date'])
 
65
  cols_to_scale = ['air_pres', 'wind_dir', 'wind_speed', 'air_temp', 'residual']
 
66
  raw_input = df_input[cols_to_scale].tail(args.seq_len).values
67
  input_scaled = scaler.transform(raw_input)
68
  batch_x = torch.from_numpy(input_scaled).float().unsqueeze(0).to(device)
69
 
70
  df_stamp_enc = df_input.tail(args.seq_len)[['date']].reset_index(drop=True)
71
+ # ⭐️ 1번 문제 해결: 불필요한 timeenc=0 인자 제거
72
+ enc_mark = time_features(df_stamp_enc, freq=args.freq)
73
  batch_x_mark = torch.from_numpy(enc_mark).float().unsqueeze(0).to(device)
74
 
75
  dec_inp_label = input_scaled[-args.label_len:]
 
80
  last_date = df_stamp_enc['date'].iloc[-1]
81
  future_dates = pd.date_range(start=last_date, periods=args.pred_len + 1, freq='5T')[1:]
82
  df_stamp_dec = pd.DataFrame({'date': list(df_stamp_enc['date'].values[-args.label_len:]) + list(future_dates)})
83
+ # ⭐️ 1번 문제 해결: 불필요한 timeenc=0 인자 제거
84
+ dec_mark = time_features(df_stamp_dec, freq=args.freq)
85
  batch_y_mark = torch.from_numpy(dec_mark).float().unsqueeze(0).to(device)
86
 
87
  with torch.no_grad():
 
89
 
90
  prediction_scaled = outputs.detach().cpu().numpy()[0]
91
 
 
92
  if scaler.n_features_in_ > 1:
93
  padding = np.zeros((prediction_scaled.shape[0], scaler.n_features_in_ - args.c_out))
94
  prediction_padded = np.concatenate((padding, prediction_scaled), axis=1)
 
106
  raw_data = df_eval[cols_to_scale].values
107
  data_scaled = scaler.transform(raw_data)
108
 
109
+ # ⭐️ 1번 문제 해결: 불필요한 timeenc=0 인자 제거
110
+ df_stamp = time_features(df_eval[['date']], freq=args.freq)
111
 
112
  preds_unscaled = []
113
  trues_unscaled = []
114
 
115
  num_samples = len(data_scaled) - args.seq_len - args.pred_len + 1
116
  for i in tqdm(range(num_samples), desc="Evaluating", file=sys.stderr):
117
+ # ... (이하 로직은 이전과 동일, 수정 없음) ...
118
  s_begin = i
119
  s_end = s_begin + args.seq_len
 
120
  batch_x = data_scaled[s_begin:s_end]
121
  batch_x_mark = df_stamp[s_begin:s_end]
 
122
  true_begin = s_end
123
  true_end = true_begin + args.pred_len
124
  true_scaled = data_scaled[true_begin:true_end]
 
125
  dec_inp_label = batch_x[-args.label_len:]
126
  dec_inp_pred = np.zeros([args.pred_len, args.enc_in])
127
  batch_y = np.concatenate([dec_inp_label, dec_inp_pred], axis=0)
 
128
  dec_mark_label = df_stamp[s_end-args.label_len:s_end]
129
  dec_mark_pred = df_stamp[true_begin:true_end]
130
  batch_y_mark = np.concatenate([dec_mark_label, dec_mark_pred], axis=0)
 
131
  batch_x = torch.from_numpy(batch_x).float().unsqueeze(0).to(device)
132
  batch_x_mark = torch.from_numpy(batch_x_mark).float().unsqueeze(0).to(device)
133
  batch_y = torch.from_numpy(batch_y).float().unsqueeze(0).to(device)
134
  batch_y_mark = torch.from_numpy(batch_y_mark).float().unsqueeze(0).to(device)
 
135
  with torch.no_grad():
136
  outputs = model(batch_x, batch_x_mark, batch_y, batch_y_mark)
 
137
  pred_scaled = outputs.detach().cpu().numpy()[0]
 
138
  if scaler.n_features_in_ > 1:
139
  padding = np.zeros((pred_scaled.shape[0], scaler.n_features_in_ - args.c_out))
140
  pred_padded = np.concatenate((padding, pred_scaled), axis=1)
141
  pred_unscaled = scaler.inverse_transform(pred_padded)[:, -args.c_out:]
142
  else:
143
  pred_unscaled = scaler.inverse_transform(pred_scaled)
 
144
  true_unscaled = scaler.inverse_transform(true_scaled)[:, -args.c_out:]
 
145
  preds_unscaled.append(pred_unscaled)
146
  trues_unscaled.append(true_unscaled)
147
 
148
  return np.array(preds_unscaled), np.array(trues_unscaled)
149
 
 
150
  if __name__ == '__main__':
151
 
152
  final_output = {}