All code, docs, notebooks, and examples MUST reference this file. When the API changes, update this file FIRST, then propagate to code/docs.
forecast(
data, # str | DataFrame | ndarray | list | tuple | Series | dict
date=None, # str — date column name
value=None, # str — value column name
steps=30, # int — forecast horizon
verbose=False, # bool
models=None, # list[str] | None — model IDs
ensemble=None, # str | None — 'mean', 'weighted', 'median', 'best'
confidence=0.95 # float — 0.80, 0.90, 0.95, 0.99
) -> EasyForecastResultAvailable model IDs: 'dot', 'auto_ets', 'auto_arima', 'auto_ces', 'four_theta', 'auto_mstl', 'tbats', 'theta', 'dtsf', 'esn', 'garch', 'croston', 'ets_aan', 'ets_aaa', 'naive', 'mean', 'rwd', 'window_avg', 'egarch', 'gjr_garch', 'seasonal_naive', 'mstl'
analyze(
data, # str | DataFrame | ndarray | list | tuple | Series | dict
date=None, # str
value=None, # str
period=None, # int | None — seasonal period (auto if None)
features=True, # bool
changepoints=True, # bool
anomalies=True, # bool
anomalyThreshold=3.0, # float — z-score threshold
anomaly_threshold=None # float | None — snake_case alias for anomalyThreshold
) -> EasyAnalysisResultregress(
y=None, # ndarray | Series | str | None (direct mode)
X=None, # ndarray | DataFrame | None (direct mode)
data=None, # DataFrame | None (formula mode)
formula=None, # str | None — "y ~ x1 + x2"
method='ols', # str — 'ols', 'ridge', 'lasso', 'huber', 'quantile'
summary=True, # bool — auto-print summary
alpha=None, # float | None — regularization strength
diagnostics=False # bool — auto-run diagnostics
) -> EasyRegressionResultcompare(
data, # str | DataFrame | ndarray | list | Series | dict
date=None, # str
value=None, # str
steps=30, # int
verbose=False, # bool
models=None # list[str] | None
) -> pd.DataFrame # !! Returns DataFrame directly, NOT a Result objectReturned DataFrame columns: model, mape, rmse, mae, smape, time_ms, selected
quickReport(
data, date=None, value=None, steps=30
) -> dict # !! Returns dict, NOT a Result objectReturned dict keys: 'forecast' (EasyForecastResult), 'analysis' (EasyAnalysisResult), 'summary' (str)
Alias: quick_report = quickReport (backward compatibility)
loadSample(name: str) -> pd.DataFrame
listSamples() -> pd.DataFrameAvailable samples: 'airline', 'retail', 'stock', 'temperature', 'energy', 'web', 'intermittent'
Column names per sample:
| Sample | date col | value col |
|---|---|---|
| airline | date | passengers |
| retail | date | sales |
| stock | date | close |
| temperature | date | temperature |
| energy | date | consumption_kwh |
| web | date | pageviews |
| intermittent | date | demand |
Attributes:
| Name | Type | Description |
|---|---|---|
| predictions | np.ndarray | Forecast values |
| dates | list[str] | Forecast dates |
| lower | np.ndarray | Lower CI |
| upper | np.ndarray | Upper CI |
| model | str | Best model name |
| mape | float | MAPE % |
| rmse | float | RMSE |
| mae | float | MAE |
| smape | float | sMAPE |
| models | list[str] | All valid model names (sorted by MAPE) |
Methods:
| Method | Alias | Returns | Description |
|---|---|---|---|
| summary() | — | str | Text summary |
| toDataframe() | to_dataframe() | DataFrame | date, prediction, lower95, upper95 |
| compare() | — | DataFrame | All models ranked by MAPE |
| allForecasts() | all_forecasts() | DataFrame | date + one col per model |
| describe() | — | DataFrame | .describe() style stats |
| toCsv(path) | to_csv(path) | self | Save to CSV |
| toJson(path=None) | to_json(path=None) | str | JSON string or save to file |
| save(path) | — | self | Alias for toJson(path) |
| plot() | — | Figure | matplotlib plot (optional dep) |
NOT available: .table()
Attributes:
| Name | Type | Description |
|---|---|---|
| dna | DNAProfile | DNA profile object |
| changepoints | np.ndarray | Array of int indices (NOT dicts!) |
| anomalies | np.ndarray | Array of int indices (NOT dicts!) |
| features | dict | Statistical features dict |
| characteristics | DataCharacteristics | Data characteristics |
Methods:
| Method | Returns |
|---|---|
| summary() | str |
IMPORTANT — anomalies/changepoints are int arrays, NOT dict lists!
# CORRECT
for idx in analysis.anomalies:
print(f"Anomaly at index {idx}")
# WRONG — will crash
for a in analysis.anomalies:
print(a['index'], a['value']) # TypeError!Attributes (camelCase is primary, snake_case aliases available):
| Primary | Alias | Type | Description |
|---|---|---|---|
| coefficients | — | np.ndarray | Including intercept |
| pvalues | — | np.ndarray | P-values |
| rSquared | r_squared | float | R² |
| adjRSquared | adj_r_squared | float | Adjusted R² |
| fStat | f_stat | float | F-statistic |
| durbinWatson | durbin_watson | float | Durbin-Watson statistic |
Methods:
| Method | Returns |
|---|---|
| summary() | str |
| diagnose() | str |
| predict(X, interval, alpha) | DataFrame |
Attributes:
| Name | Type | Description |
|---|---|---|
| features | dict[str, float] | 65+ statistical features |
| fingerprint | str | 8-char hex hash |
| difficulty | str | 'easy', 'medium', 'hard', 'very_hard' |
| difficultyScore | float | 0-100 |
| recommendedModels | list[str] | Sorted by fitness |
| category | str | 'trending', 'seasonal', 'stationary', etc. |
| summary | str | Natural language summary |
IMPORTANT — trendStrength etc. are inside features dict, NOT direct attributes!
# CORRECT
dna.features['trendStrength']
dna.features['seasonalStrength']
dna.features['hurstExponent']
dna.features['seasonalPeakPeriod']
# WRONG — will crash
dna.trendStrength # AttributeError!
dna.seasonalStrength # AttributeError!
dna.noiseLevel # AttributeError!
dna.isStationary # AttributeError! (use dna.features['adfStatistic'])Key feature names (from features dict):
trendStrength, seasonalStrength, seasonalPeakPeriod, hurstExponent, volatility, cv, skewness, kurtosis, adfStatistic, spectralEntropy, approximateEntropy, garchEffect, volatilityClustering, demandDensity, nonlinearAutocorr, forecastability, trendSlope, trendDirection, trendLinearity, trendCurvature
vx = Vectrix(locale='ko_KR', verbose=False, nJobs=-1)vx.forecast(
df, # DataFrame
dateCol='date', # str (camelCase!)
valueCol='value', # str (camelCase!)
steps=30, # int
trainRatio=0.8, # float
models=None, # list[str] | None
ensembleMethod=None, # str | None (camelCase!)
confidenceLevel=0.95 # float (camelCase!)
) -> ForecastResult # Raw result, NOT EasyForecastResultvx.analyze(
df, dateCol, valueCol
) -> dict # {'characteristics': DataCharacteristics, 'flatRisk': FlatRiskAssessment}NOTE: Vectrix class does NOT have: detectRegimes(), fit(), healForecast()
| Name | Type |
|---|---|
| success | bool |
| predictions | np.ndarray |
| dates | list[str] |
| lower95 | np.ndarray |
| upper95 | np.ndarray |
| bestModelId | str |
| bestModelName | str |
| allModelResults | dict[str, ModelResult] |
| characteristics | DataCharacteristics |
All models follow the same interface:
model.fit(y) # y: np.ndarray, returns self
predictions, lower, upper = model.predict(steps) # all np.ndarray
model.refit(newData) # re-fit with cached hyperparams, returns selffrom vectrix.engine.ets import AutoETS
from vectrix.engine.arima import AutoARIMA, ARIMAModel
from vectrix.engine.theta import OptimizedTheta
from vectrix.engine.dot import DynamicOptimizedTheta
from vectrix.engine.ces import AutoCES
from vectrix.engine.mstl import AutoMSTL
from vectrix.engine.tbats import AutoTBATS
from vectrix.engine.garch import GARCHModel, EGARCHModel, GJRGARCHModel
from vectrix.engine.croston import AutoCroston
from vectrix.engine.fourTheta import AdaptiveThetaEnsemble
from vectrix.engine.dtsf import DynamicTimeScanForecaster
from vectrix.engine.esn import EchoStateForecaster
from vectrix.engine.baselines import NaiveModel, SeasonalNaiveModel, MeanModel, RandomWalkDrift, WindowAverage| modelId | Class | needsPeriod | minData | Best for |
|---|---|---|---|---|
auto_ets |
AutoETS | Yes | 20 | Stable patterns, short-term |
auto_arima |
AutoARIMA | No | 30 | Stationary with complex autocorrelation |
theta |
OptimizedTheta | Yes | 10 | Simple trend extrapolation |
dot |
DynamicOptimizedTheta | Yes | 10 | General purpose (M4 OWA 0.848) |
auto_ces |
AutoCES | Yes | 20 | Nonlinear, complex seasonality |
auto_mstl |
AutoMSTL | No | 50 | Multiple seasonality |
mstl |
MSTLDecomposition | Yes | 50 | Multiple seasonality (explicit period) |
tbats |
AutoTBATS | Yes | 30 | Complex multi-seasonal |
four_theta |
AdaptiveThetaEnsemble | Yes | 10 | M4-validated ensemble (Yearly OWA 0.879) |
dtsf |
DynamicTimeScanForecaster | No | 30 | Pattern matching, hourly data |
esn |
EchoStateForecaster | No | 20 | Ensemble diversity (not standalone) |
garch |
GARCHModel | No | 50 | Financial volatility |
egarch |
EGARCHModel | No | 50 | Asymmetric volatility |
gjr_garch |
GJRGARCHModel | No | 50 | Leverage effect |
croston |
AutoCroston | No | 10 | Intermittent/lumpy demand |
ets_aan |
ETSModel('A','A','N') | Yes | 10 | Trending data |
ets_aaa |
ETSModel('A','A','A') | Yes | 20 | Seasonal data |
naive |
NaiveModel | No | 2 | Baseline |
seasonal_naive |
SeasonalNaiveModel | Yes | 14 | Seasonal baseline |
mean |
MeanModel | No | 2 | Baseline |
rwd |
RandomWalkDrift | No | 5 | Trending data baseline |
window_avg |
WindowAverage | Yes | 5 | Stable data baseline |
from vectrix.engine.registry import getRegistry, getModelSpec, listModelIds, createModel, getModelInfo
getRegistry() # -> Dict[str, ModelSpec]
getModelSpec('dot') # -> ModelSpec or None
listModelIds() # -> List[str]
createModel('dot', period=12) # -> model instance
getModelInfo() # -> backward-compatible MODEL_INFO dictfrom vectrix.business import (
AnomalyDetector, ForecastExplainer, WhatIfAnalyzer,
Backtester, BusinessMetrics, ReportGenerator, HTMLReportGenerator
)detector = AnomalyDetector()
result = detector.detect(
y, # np.ndarray
method='auto', # 'zscore', 'iqr', 'seasonal', 'rolling', 'auto'
threshold=3.0, # float
period=1 # int — seasonal period
) -> AnomalyResultAnomalyResult attributes: indices, scores, method, threshold, nAnomalies, anomalyRatio, details
explainer = ForecastExplainer()
result = explainer.explain(
y, # np.ndarray — historical data
predictions, # np.ndarray — forecast values
period=7, # int
locale='ko' # str — 'ko' or 'en'
) -> dictReturned dict keys: drivers, narrative, decomposition, confidence, summary
analyzer = WhatIfAnalyzer()
results = analyzer.analyze(
basePredictions, # np.ndarray
historicalData, # np.ndarray
scenarios, # List[dict] — each with 'name', 'trend_change', etc.
period=7 # int
) -> List[ScenarioResult]
summary = analyzer.compareSummary(results) # -> strScenario dict keys: name, trend_change, seasonal_multiplier, shock_at, shock_magnitude, shock_duration, level_shift
ScenarioResult attributes: name, predictions, baselinePredictions, difference, percentChange, impact
bt = Backtester(
nFolds=5, # int
horizon=30, # int
strategy='expanding', # 'expanding' or 'sliding'
minTrainSize=50, # int
stepSize=None # int | None
)
result = bt.run(y, modelFactory=AutoETS) # -> BacktestResult
summary = bt.summary(result) # -> strBacktestResult attributes: nFolds, avgMAPE, avgRMSE, avgMAE, avgSMAPE, avgBias, mapeStd, folds, bestFold, worstFold
metrics = BusinessMetrics()
result = metrics.calculate(actual, predicted) # -> dictReturned dict keys: bias, biasPercent, trackingSignal, wape, mase, overForecastRatio, underForecastRatio, forecastAccuracy, fillRateImpact
rg = ReportGenerator(locale='ko')
report = rg.generate(historicalData, predictions, lower95, upper95, period=7, modelName='Vectrix', dates=None) # -> dict
html = HTMLReportGenerator()
path = html.generate(historicalData, predictions, lower95, upper95, modelName='Auto', title='Report', outputPath='report.html') # -> str (file path)from vectrix.intervals import ConformalInterval, BootstrapInterval
from vectrix.intervals.distribution import ForecastDistribution, DistributionFitter, empiricalCRPSci = ConformalInterval(
method='split', # 'split' or 'jackknife'
coverageLevel=0.95, # float
calibrationRatio=0.2 # float
)
ci.calibrate(y, modelFactory, steps=1) # returns self
lower, upper = ci.predict(pointPredictions)bi = BootstrapInterval(
nBoot=100, # int
coverageLevel=0.95 # float
)
bi.calibrate(y, modelFactory, steps=1) # returns self
lower, upper = bi.predict(pointPredictions)fitter = DistributionFitter()
dist = fitter.fit(residuals) # -> ForecastDistribution
q50 = dist.quantile(0.5)
crps = dist.crps(actual)
score = empiricalCRPS(actual, samples)from vectrix.hierarchy import BottomUp, TopDown, MinTracebu = BottomUp()
reconciled = bu.reconcile(
bottomForecasts, # np.ndarray [nBottom, steps]
summingMatrix # np.ndarray [nTotal, nBottom]
) -> np.ndarray # [nTotal, steps]td = TopDown(method='proportions') # 'proportions' or 'forecast_proportions'
reconciled = td.reconcile(
topForecast, # np.ndarray [steps]
proportions, # np.ndarray [nBottom]
summingMatrix # np.ndarray [nTotal, nBottom]
) -> np.ndarray # [nTotal, steps]
proportions = TopDown.computeProportions(historicalBottom) # static methodmt = MinTrace(method='ols') # 'ols' or 'wls'
reconciled = mt.reconcile(
forecasts, # np.ndarray [nTotal, steps]
summingMatrix, # np.ndarray [nTotal, nBottom]
residuals=None # np.ndarray [nTotal, T] — required for WLS
) -> np.ndarray # [nTotal, steps]
S = MinTrace.buildSummingMatrix(structure) # static, {parent: [children]}from vectrix.pipeline import (
ForecastPipeline, Differencer, LogTransformer, BoxCoxTransformer,
Scaler, Deseasonalizer, Detrend, OutlierClipper, MissingValueImputer
)All transformers implement: fit(y), transform(y), inverseTransform(y), fitTransform(y)
| Transformer | Constructor | Description |
|---|---|---|
| Differencer | Differencer(d=1) |
d-th order differencing |
| LogTransformer | LogTransformer(shift=None) |
log(1+y), auto-shift for negatives |
| BoxCoxTransformer | BoxCoxTransformer(lmbda=None) |
Auto Box-Cox lambda |
| Scaler | Scaler(method='zscore') |
'zscore' or 'minmax' |
| Deseasonalizer | Deseasonalizer(period=7) |
Remove seasonal component |
| Detrend | Detrend() |
Remove linear trend |
| OutlierClipper | OutlierClipper(factor=3.0) |
IQR-based clipping |
| MissingValueImputer | MissingValueImputer(method='linear') |
'linear', 'mean', 'ffill' |
pipe = ForecastPipeline([
("log", LogTransformer()),
("deseason", Deseasonalizer(period=12)),
("model", AutoETS()),
])
pipe.fit(y)
pred, lower, upper = pipe.predict(steps=12)Methods: fit(y), predict(steps), transform(y), inverseTransform(y), getStep(name), getParams(), listSteps()
from vectrix import loadSample, listSamples| name | frequency | rows | date col | value col |
|---|---|---|---|---|
'airline' |
monthly | 144 | date | passengers |
'retail' |
daily | 730 | date | sales |
'stock' |
business_daily | 252 | date | close |
'temperature' |
daily | 1095 | date | temperature |
'energy' |
hourly | 720 | date | consumption_kwh |
'web' |
daily | 180 | date | pageviews |
'intermittent' |
daily | 365 | date | demand |
Returns DataFrame with columns: name, description, valueCol, frequency, rows
| Layer | Parameters | Attributes | Methods |
|---|---|---|---|
| easy.py functions | snake_case (date=, value=, steps=) + camelCase (anomalyThreshold) |
— | snake_case (forecast, analyze) + camelCase (quickReport) |
| EasyForecastResult | — | camelCase-ish (predictions, model) |
camelCase primary (toDataframe, allForecasts) + snake_case alias |
| EasyRegressionResult | — | camelCase primary (rSquared, adjRSquared, durbinWatson) + snake_case alias |
snake_case (summary, diagnose) |
| DNAProfile | — | camelCase (difficultyScore, recommendedModels) |
— |
| Vectrix class | camelCase (dateCol, valueCol, ensembleMethod) |
camelCase | camelCase (setProgressCallback) |
| Internal types | camelCase | camelCase | — |
from vectrix import RegimeDetector
rd = RegimeDetector(nRegimes=2)
result = rd.detect(values) # np.ndarray
# result.regimeStats: list[dict] with 'mean', 'std', 'size'
# result.labels: np.ndarrayfrom vectrix import ConstraintAwareForecaster, Constraint
caf = ConstraintAwareForecaster()
result = caf.apply(
predictions, lower95, upper95,
constraints=[Constraint('non_negative', {})]
)
# result.predictions, result.lower95, result.upper95Constraint types: 'non_negative', 'range', 'sum_constraint', 'yoy_change', 'monotone', 'capacity', 'ratio', 'custom'
from vectrix import ForecastDNA
dna = ForecastDNA()
profile = dna.analyze(values, period=7) # -> DNAProfilefrom vectrix import TimeSeriesCrossValidator
cv = TimeSeriesCrossValidator(nSplits=5, horizon=30, strategy='expanding', minTrainSize=50, stepSize=None)
splits = cv.split(y) # -> List[Tuple[ndarray, ndarray]]
result = cv.evaluate(y, modelFactory, period=7) # -> dictevaluate() return dict keys: 'mape', 'rmse', 'mae', 'smape', 'foldResults', 'nFolds'
Optional dependency. Install with:
pip install vectrix[viz](requires Plotly >= 5.0)
from vectrix.viz import forecastChart, dnaRadar, modelHeatmap, scenarioChart, backtestChart, metricsCardforecastChart(
forecastResult, # EasyForecastResult
historical=None, # pd.DataFrame | None — auto-detects date/value columns
title=None, # str | None — auto: "Forecast — {model} (MAPE {mape}%)"
theme="dark" # str — 'dark' or 'light'
) -> go.FigurednaRadar(
analysisResult, # EasyAnalysisResult
title=None, # str | None — auto: "DNA — {category} ({difficulty}, {score}/100)"
theme="dark" # str
) -> go.Figure # Polar chart with 6 features: Trend, Seasonality, Memory, Vol.Clustering, Nonlinear, ForecastabilitymodelHeatmap(
comparisonDf, # pd.DataFrame — from compare()
top=10, # int — number of top models
title=None, # str | None
theme="dark" # str
) -> go.Figure # Heatmap with min-max normalized errors (green=best, red=worst)scenarioChart(
scenarios, # list[ScenarioResult] — from WhatIfAnalyzer.analyze()
dates=None, # list | pd.DatetimeIndex | None — if None, uses numeric steps
title=None, # str | None
theme="dark" # str
) -> go.Figure # Baseline=solid, scenarios=dashedbacktestChart(
backtestResult, # BacktestResult — from Backtester.run()
metric="mape", # str — 'mape' or 'rmse'
title=None, # str | None
theme="dark" # str
) -> go.Figure # Bar per fold + average hline, best=green, worst=redmetricsCard(
metricsDict, # dict — from BusinessMetrics.calculate()
title=None, # str | None
thresholds=None, # dict | None — custom thresholds
theme="dark" # str
) -> go.Figure # 4 indicator cards: Accuracy, Bias, WAPE, MASEDefault thresholds: {'accuracy': 95, 'bias': 3, 'wape': 5, 'mase': 1.0}. Values beyond threshold turn red.
from vectrix.viz import forecastReport, analysisReport
forecastReport(
forecastResult, # EasyForecastResult
historical=None, # pd.DataFrame | None
title=None, # str | None
theme="dark" # str
) -> go.Figure # 2-row: forecast line chart (75%) + 4 metric bars MAPE/RMSE/MAE/sMAPE (25%)
analysisReport(
analysisResult, # EasyAnalysisResult
title=None, # str | None
theme="dark" # str
) -> go.Figure # 2x2: DNA radar (top-left) + feature bars (top-right) + difficulty indicator (bottom)from vectrix.viz import dashboard
dashboard(
forecast=None, # EasyForecastResult | None
analysis=None, # EasyAnalysisResult | None
comparison=None, # pd.DataFrame | None — from compare()
historical=None, # pd.DataFrame | None
title="Vectrix Report", # str — custom report title
theme="dark" # str — 'dark' or 'light'
) -> DashboardResult # Self-contained HTML reportDashboardResult methods:
.show()— display inline (Jupyter) or open browser (terminal).save(path)— save to HTML file.html— raw HTML string
Report sections: Overview KPIs → Data Profile (DNA) → Forecast Results (metrics + model comparison) → Visualizations (forecast chart + DNA radar)
from vectrix.viz import COLORS, LIGHT_COLORS, PALETTE, LAYOUT, HEIGHT, applyTheme| Export | Type | Description |
|---|---|---|
COLORS |
dict | 10 dark theme colors: primary #6366f1, accent #a855f7, positive #22c55e, negative #ef4444, warning #f59e0b, muted #94a3b8, bg #0f172a, card #1e293b, text #f1f5f9, grid rgba(255,255,255,0.06) |
LIGHT_COLORS |
dict | 10 light theme colors: same keys, adjusted values (bg #ffffff, text #0f172a) |
PALETTE |
list | 10 cycling colors for multi-series charts |
LAYOUT |
dict | Plotly layout defaults (dark theme, Inter font, margins) |
HEIGHT |
dict | Standard heights: chart 450, card 220, report 600, analysis 650, small 350 |
applyTheme(
fig, # go.Figure
title=None, # str | None
height=450, # int
theme="dark" # str — 'dark' or 'light'
) -> go.Figure # Applies brand theme, legend, grid stylingLast updated: 2026-03-05 When modifying ANY public API, update this file FIRST.