Update tools/ts_forecast_tool.py
Browse files- tools/ts_forecast_tool.py +22 -82
tools/ts_forecast_tool.py
CHANGED
|
@@ -1,90 +1,30 @@
|
|
| 1 |
# space/tools/ts_forecast_tool.py
|
| 2 |
-
import
|
| 3 |
import pandas as pd
|
| 4 |
-
from
|
| 5 |
-
|
| 6 |
-
from utils.tracing import Tracer
|
| 7 |
-
from utils.config import AppConfig
|
| 8 |
-
|
| 9 |
-
# Granite TTM imports (from tsfm_public)
|
| 10 |
-
from tsfm_public.models.tinytimemixer.modeling_tinytimemixer import TinyTimeMixerForPrediction
|
| 11 |
-
from tsfm_public.toolkit.config import TSPPConfig
|
| 12 |
-
from tsfm_public.toolkit.dataset import TimeSeriesDataset
|
| 13 |
-
from tsfm_public.toolkit.trainer import Trainer
|
| 14 |
-
|
| 15 |
|
| 16 |
class TimeseriesForecastTool:
|
| 17 |
"""
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
- 'timestamp' (datetime64[ns])
|
| 21 |
-
- one or more numeric series columns to forecast (targets)
|
| 22 |
-
- optional control/exogenous columns (known-in-future features)
|
| 23 |
-
You must provide context_length and forecast_length to match a TTM variant.
|
| 24 |
"""
|
| 25 |
-
def __init__(self,
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
self.tracer = tracer
|
| 34 |
-
|
| 35 |
-
self.context_length = context_length
|
| 36 |
-
self.forecast_length = forecast_length
|
| 37 |
-
self.target_cols = target_cols or []
|
| 38 |
-
self.control_cols = control_cols or []
|
| 39 |
-
|
| 40 |
-
# Build TSPP config
|
| 41 |
-
self.tspp_config = TSPPConfig(
|
| 42 |
-
context_length=context_length,
|
| 43 |
-
prediction_length=forecast_length,
|
| 44 |
-
target_cols=self.target_cols,
|
| 45 |
-
known_cov_cols=self.control_cols, # known-in-future exogenous
|
| 46 |
-
time_col="timestamp",
|
| 47 |
-
freq=None # inferred; you can set "H" or "T" if you know it
|
| 48 |
-
)
|
| 49 |
-
|
| 50 |
-
# Load model from HF (r1; try r2 for newer variants if needed)
|
| 51 |
-
self.model = TinyTimeMixerForPrediction.from_pretrained(
|
| 52 |
-
hf_model_id, revision=revision
|
| 53 |
-
)
|
| 54 |
-
|
| 55 |
-
def _build_dataset(self, df: pd.DataFrame) -> TimeSeriesDataset:
|
| 56 |
-
# Minimal build: single item dataset from dataframe (you can batch multiple series)
|
| 57 |
-
item = df.sort_values("timestamp").reset_index(drop=True)
|
| 58 |
-
return TimeSeriesDataset.from_pandas(
|
| 59 |
-
item,
|
| 60 |
-
tspp_config=self.tspp_config
|
| 61 |
-
)
|
| 62 |
-
|
| 63 |
-
def zeroshot_forecast(self, df: pd.DataFrame) -> Dict[str, pd.DataFrame]:
|
| 64 |
"""
|
| 65 |
-
|
| 66 |
-
|
| 67 |
"""
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
forecast_df = pd.DataFrame(preds, index=horizon_idx).reset_index(drop=True)
|
| 77 |
-
|
| 78 |
-
try:
|
| 79 |
-
self.tracer.trace_event("ts_forecast", {
|
| 80 |
-
"targets": self.target_cols,
|
| 81 |
-
"ctx": self.context_length,
|
| 82 |
-
"h": self.forecast_length
|
| 83 |
-
})
|
| 84 |
-
except Exception:
|
| 85 |
-
pass
|
| 86 |
-
|
| 87 |
-
return {
|
| 88 |
-
"forecast": forecast_df,
|
| 89 |
-
"context": df.tail(self.context_length).reset_index(drop=True)
|
| 90 |
-
}
|
|
|
|
| 1 |
# space/tools/ts_forecast_tool.py
|
| 2 |
+
import torch
|
| 3 |
import pandas as pd
|
| 4 |
+
from transformers import AutoModelForTimeSeriesForecasting, AutoTokenizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
class TimeseriesForecastTool:
|
| 7 |
"""
|
| 8 |
+
Lightweight wrapper around ibm-granite/granite-timeseries-ttm-r1
|
| 9 |
+
using the Transformers interface.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
"""
|
| 11 |
+
def __init__(self,
|
| 12 |
+
model_id="ibm-granite/granite-timeseries-ttm-r1",
|
| 13 |
+
device=None):
|
| 14 |
+
self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
|
| 15 |
+
self.model = AutoModelForTimeSeriesForecasting.from_pretrained(model_id).to(self.device)
|
| 16 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 17 |
+
|
| 18 |
+
def zeroshot_forecast(self, series: pd.Series, horizon: int = 96):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
"""
|
| 20 |
+
series: pd.Series indexed by datetime
|
| 21 |
+
horizon: forecast steps
|
| 22 |
"""
|
| 23 |
+
values = series.values.astype("float32")
|
| 24 |
+
inputs = torch.tensor(values, dtype=torch.float32).unsqueeze(0).to(self.device)
|
| 25 |
+
with torch.no_grad():
|
| 26 |
+
preds = self.model(inputs, prediction_length=horizon).predictions
|
| 27 |
+
return pd.DataFrame(
|
| 28 |
+
preds.squeeze().cpu().numpy(),
|
| 29 |
+
columns=["forecast"]
|
| 30 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|