1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
|
#!/usr/bin/env python
# DO NOT EDIT
# Autogenerated from the notebook predict.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Prediction (out of sample)
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
plt.rc("figure", figsize=(16, 8))
plt.rc("font", size=14)
# ## Artificial data
nsample = 50
sig = 0.25
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, np.sin(x1), (x1 - 5)**2))
X = sm.add_constant(X)
beta = [5.0, 0.5, 0.5, -0.02]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
# ## Estimation
olsmod = sm.OLS(y, X)
olsres = olsmod.fit()
print(olsres.summary())
# ## In-sample prediction
ypred = olsres.predict(X)
print(ypred)
# ## Create a new sample of explanatory variables Xnew, predict and plot
x1n = np.linspace(20.5, 25, 10)
Xnew = np.column_stack((x1n, np.sin(x1n), (x1n - 5)**2))
Xnew = sm.add_constant(Xnew)
ynewpred = olsres.predict(Xnew) # predict out of sample
print(ynewpred)
# ## Plot comparison
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(x1, y, "o", label="Data")
ax.plot(x1, y_true, "b-", label="True")
ax.plot(np.hstack((x1, x1n)),
np.hstack((ypred, ynewpred)),
"r",
label="OLS prediction")
ax.legend(loc="best")
# ## Predicting with Formulas
# Using formulas can make both estimation and prediction a lot easier
from statsmodels.formula.api import ols
data = {"x1": x1, "y": y}
res = ols("y ~ x1 + np.sin(x1) + I((x1-5)**2)", data=data).fit()
# We use the `I` to indicate use of the Identity transform. Ie., we do not
# want any expansion magic from using `**2`
res.params
# Now we only have to pass the single variable and we get the transformed
# right-hand side variables automatically
res.predict(exog=dict(x1=x1n))
|