Spaces:
Sleeping
Sleeping
Update inference.py
Browse files- inference.py +9 -8
inference.py
CHANGED
|
@@ -62,14 +62,16 @@ def load_model_and_scaler(args):
|
|
| 62 |
def predict_future(args, model, scaler, device):
|
| 63 |
df_input = pd.read_csv(args.predict_input_file)
|
| 64 |
df_input['date'] = pd.to_datetime(df_input['date'])
|
|
|
|
| 65 |
cols_to_scale = ['air_pres', 'wind_dir', 'wind_speed', 'air_temp', 'residual']
|
|
|
|
| 66 |
raw_input = df_input[cols_to_scale].tail(args.seq_len).values
|
| 67 |
input_scaled = scaler.transform(raw_input)
|
| 68 |
batch_x = torch.from_numpy(input_scaled).float().unsqueeze(0).to(device)
|
| 69 |
|
| 70 |
df_stamp_enc = df_input.tail(args.seq_len)[['date']].reset_index(drop=True)
|
| 71 |
-
# ⭐️
|
| 72 |
-
enc_mark = time_features(df_stamp_enc, freq=args.freq)
|
| 73 |
batch_x_mark = torch.from_numpy(enc_mark).float().unsqueeze(0).to(device)
|
| 74 |
|
| 75 |
dec_inp_label = input_scaled[-args.label_len:]
|
|
@@ -80,8 +82,8 @@ def predict_future(args, model, scaler, device):
|
|
| 80 |
last_date = df_stamp_enc['date'].iloc[-1]
|
| 81 |
future_dates = pd.date_range(start=last_date, periods=args.pred_len + 1, freq='5T')[1:]
|
| 82 |
df_stamp_dec = pd.DataFrame({'date': list(df_stamp_enc['date'].values[-args.label_len:]) + list(future_dates)})
|
| 83 |
-
# ⭐️
|
| 84 |
-
dec_mark = time_features(df_stamp_dec, freq=args.freq)
|
| 85 |
batch_y_mark = torch.from_numpy(dec_mark).float().unsqueeze(0).to(device)
|
| 86 |
|
| 87 |
with torch.no_grad():
|
|
@@ -106,15 +108,14 @@ def evaluate_performance(args, model, scaler, device):
|
|
| 106 |
raw_data = df_eval[cols_to_scale].values
|
| 107 |
data_scaled = scaler.transform(raw_data)
|
| 108 |
|
| 109 |
-
# ⭐️
|
| 110 |
-
df_stamp = time_features(df_eval[
|
| 111 |
|
| 112 |
preds_unscaled = []
|
| 113 |
trues_unscaled = []
|
| 114 |
|
| 115 |
num_samples = len(data_scaled) - args.seq_len - args.pred_len + 1
|
| 116 |
for i in tqdm(range(num_samples), desc="Evaluating", file=sys.stderr):
|
| 117 |
-
# ... (이하 로직은 이전과 동일, 수정 없음) ...
|
| 118 |
s_begin = i
|
| 119 |
s_end = s_begin + args.seq_len
|
| 120 |
batch_x = data_scaled[s_begin:s_end]
|
|
@@ -146,7 +147,7 @@ def evaluate_performance(args, model, scaler, device):
|
|
| 146 |
trues_unscaled.append(true_unscaled)
|
| 147 |
|
| 148 |
return np.array(preds_unscaled), np.array(trues_unscaled)
|
| 149 |
-
|
| 150 |
if __name__ == '__main__':
|
| 151 |
|
| 152 |
final_output = {}
|
|
|
|
| 62 |
def predict_future(args, model, scaler, device):
|
| 63 |
df_input = pd.read_csv(args.predict_input_file)
|
| 64 |
df_input['date'] = pd.to_datetime(df_input['date'])
|
| 65 |
+
|
| 66 |
cols_to_scale = ['air_pres', 'wind_dir', 'wind_speed', 'air_temp', 'residual']
|
| 67 |
+
|
| 68 |
raw_input = df_input[cols_to_scale].tail(args.seq_len).values
|
| 69 |
input_scaled = scaler.transform(raw_input)
|
| 70 |
batch_x = torch.from_numpy(input_scaled).float().unsqueeze(0).to(device)
|
| 71 |
|
| 72 |
df_stamp_enc = df_input.tail(args.seq_len)[['date']].reset_index(drop=True)
|
| 73 |
+
# ⭐️ 수정: DataFrame[['date']] 대신 Series['date']를 전달
|
| 74 |
+
enc_mark = time_features(pd.to_datetime(df_stamp_enc['date']), freq=args.freq)
|
| 75 |
batch_x_mark = torch.from_numpy(enc_mark).float().unsqueeze(0).to(device)
|
| 76 |
|
| 77 |
dec_inp_label = input_scaled[-args.label_len:]
|
|
|
|
| 82 |
last_date = df_stamp_enc['date'].iloc[-1]
|
| 83 |
future_dates = pd.date_range(start=last_date, periods=args.pred_len + 1, freq='5T')[1:]
|
| 84 |
df_stamp_dec = pd.DataFrame({'date': list(df_stamp_enc['date'].values[-args.label_len:]) + list(future_dates)})
|
| 85 |
+
# ⭐️ 수정: DataFrame[['date']] 대신 Series['date']를 전달
|
| 86 |
+
dec_mark = time_features(pd.to_datetime(df_stamp_dec['date']), freq=args.freq)
|
| 87 |
batch_y_mark = torch.from_numpy(dec_mark).float().unsqueeze(0).to(device)
|
| 88 |
|
| 89 |
with torch.no_grad():
|
|
|
|
| 108 |
raw_data = df_eval[cols_to_scale].values
|
| 109 |
data_scaled = scaler.transform(raw_data)
|
| 110 |
|
| 111 |
+
# ⭐️ 수정: DataFrame[['date']] 대신 Series['date']를 전달
|
| 112 |
+
df_stamp = time_features(pd.to_datetime(df_eval['date']), freq=args.freq)
|
| 113 |
|
| 114 |
preds_unscaled = []
|
| 115 |
trues_unscaled = []
|
| 116 |
|
| 117 |
num_samples = len(data_scaled) - args.seq_len - args.pred_len + 1
|
| 118 |
for i in tqdm(range(num_samples), desc="Evaluating", file=sys.stderr):
|
|
|
|
| 119 |
s_begin = i
|
| 120 |
s_end = s_begin + args.seq_len
|
| 121 |
batch_x = data_scaled[s_begin:s_end]
|
|
|
|
| 147 |
trues_unscaled.append(true_unscaled)
|
| 148 |
|
| 149 |
return np.array(preds_unscaled), np.array(trues_unscaled)
|
| 150 |
+
|
| 151 |
if __name__ == '__main__':
|
| 152 |
|
| 153 |
final_output = {}
|