#Splitting out maringal effects to see if they can be generalized
from statsmodels.compat.python import lzip
import numpy as np
from scipy.stats import norm
from statsmodels.tools.decorators import cache_readonly
#### margeff helper functions ####
#NOTE: todo marginal effects for group 2
# group 2 oprobit, ologit, gologit, mlogit, biprobit
def _check_margeff_args(at, method):
"""
Checks valid options for margeff
"""
if at not in ['overall','mean','median','zero','all']:
raise ValueError("%s not a valid option for `at`." % at)
if method not in ['dydx','eyex','dyex','eydx']:
raise ValueError("method is not understood. Got %s" % method)
def _check_discrete_args(at, method):
"""
Checks the arguments for margeff if the exogenous variables are discrete.
"""
if method in ['dyex','eyex']:
raise ValueError("%s not allowed for discrete variables" % method)
if at in ['median', 'zero']:
raise ValueError("%s not allowed for discrete variables" % at)
def _get_const_index(exog):
"""
Returns a boolean array of non-constant column indices in exog and
an scalar array of where the constant is or None
"""
effects_idx = exog.var(0) != 0
if np.any(~effects_idx):
const_idx = np.where(~effects_idx)[0]
else:
const_idx = None
return effects_idx, const_idx
def _isdummy(X):
"""
Given an array X, returns the column indices for the dummy variables.
Parameters
----------
X : array_like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 2, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _isdummy(X)
>>> ind
array([0, 3, 4])
"""
X = np.asarray(X)
if X.ndim > 1:
ind = np.zeros(X.shape[1]).astype(bool)
max = (np.max(X, axis=0) == 1)
min = (np.min(X, axis=0) == 0)
remainder = np.all(X % 1. == 0, axis=0)
ind = min & max & remainder
if X.ndim == 1:
ind = np.asarray([ind])
return np.where(ind)[0]
def _get_dummy_index(X, const_idx):
dummy_ind = _isdummy(X)
dummy = True
if dummy_ind.size == 0: # do not waste your time
dummy = False
dummy_ind = None # this gets passed to stand err func
return dummy_ind, dummy
def _iscount(X):
"""
Given an array X, returns the column indices for count variables.
Parameters
----------
X : array_like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 10, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _iscount(X)
>>> ind
array([0, 3, 4])
"""
X = np.asarray(X)
remainder = np.logical_and(np.logical_and(np.all(X % 1. == 0, axis = 0),
X.var(0) != 0), np.all(X >= 0, axis=0))
dummy = _isdummy(X)
remainder = np.where(remainder)[0].tolist()
for idx in dummy:
remainder.remove(idx)
return np.array(remainder)
def _get_count_index(X, const_idx):
count_ind = _iscount(X)
count = True
if count_ind.size == 0: # do not waste your time
count = False
count_ind = None # for stand err func
return count_ind, count
def _get_margeff_exog(exog, at, atexog, ind):
if atexog is not None: # user supplied
if isinstance(atexog, dict):
# assumes values are singular or of len(exog)
for key in atexog:
exog[:,key] = atexog[key]
elif isinstance(atexog, np.ndarray): #TODO: handle DataFrames
if atexog.ndim == 1:
k_vars = len(atexog)
else:
k_vars = atexog.shape[1]
try:
assert k_vars == exog.shape[1]
except:
raise ValueError("atexog does not have the same number "
"of variables as exog")
exog = atexog
#NOTE: we should fill in atexog after we process at
if at == 'mean':
exog = np.atleast_2d(exog.mean(0))
elif at == 'median':
exog = np.atleast_2d(np.median(exog, axis=0))
elif at == 'zero':
exog = np.zeros((1,exog.shape[1]))
exog[0,~ind] = 1
return exog
def _get_count_effects(effects, exog, count_ind, method, model, params):
"""
If there's a count variable, the predicted difference is taken by
subtracting one and adding one to exog then averaging the difference
"""
# this is the index for the effect and the index for count col in exog
for i in count_ind:
exog0 = exog.copy()
exog0[:, i] -= 1
effect0 = model.predict(params, exog0)
exog0[:, i] += 2
effect1 = model.predict(params, exog0)
#NOTE: done by analogy with dummy effects but untested bc
# stata does not handle both count and eydx anywhere
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = ((effect1 - effect0)/2)
return effects
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params):
"""
If there's a dummy variable, the predicted difference is taken at
0 and 1
"""
# this is the index for the effect and the index for dummy col in exog
for i in dummy_ind:
exog0 = exog.copy() # only copy once, can we avoid a copy?
exog0[:,i] = 0
effect0 = model.predict(params, exog0)
#fittedvalues0 = np.dot(exog0,params)
exog0[:,i] = 1
effect1 = model.predict(params, exog0)
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = (effect1 - effect0)
return effects
def _effects_at(effects, at):
if at == 'all':
effects = effects
elif at == 'overall':
effects = effects.mean(0)
else:
effects = effects[0,:]
return effects
def _margeff_cov_params_dummy(model, cov_margins, params, exog, dummy_ind,
method, J):
r"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d = 1 - F(XB) | d = 0
The row of the Jacobian for this variable is given by
f(XB)*X | d = 1 - f(XB)*X | d = 0
Where F is the default prediction of the model.
"""
for i in dummy_ind:
exog0 = exog.copy()
exog1 = exog.copy()
exog0[:,i] = 0
exog1[:,i] = 1
dfdb0 = model._derivative_predict(params, exog0, method)
dfdb1 = model._derivative_predict(params, exog1, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0)
if J > 1:
K = dfdb.shape[1] // (J-1)
cov_margins[i::K, :] = dfdb
else:
# dfdb could be too short if there are extra params, k_extra > 0
cov_margins[i, :len(dfdb)] = dfdb # how each F changes with change in B
return cov_margins
def _margeff_cov_params_count(model, cov_margins, params, exog, count_ind,
method, J):
r"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d += 1 - F(XB) | d -= 1
The row of the Jacobian for this variable is given by
(f(XB)*X | d += 1 - f(XB)*X | d -= 1) / 2
where F is the default prediction for the model.
"""
for i in count_ind:
exog0 = exog.copy()
exog0[:,i] -= 1
dfdb0 = model._derivative_predict(params, exog0, method)
exog0[:,i] += 2
dfdb1 = model._derivative_predict(params, exog0, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0) / 2
if J > 1:
K = dfdb.shape[1] / (J-1)
cov_margins[i::K, :] = dfdb
else:
# dfdb could be too short if there are extra params, k_extra > 0
cov_margins[i, :len(dfdb)] = dfdb # how each F changes with change in B
return cov_margins
def margeff_cov_params(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
Computes the variance-covariance of marginal effects by the delta method.
Parameters
----------
model : model instance
The model that returned the fitted results. Its pdf method is used
for computing the Jacobian of discrete variables in dummy_ind and
count_ind
params : array_like
estimated model parameters
exog : array_like
exogenous variables at which to calculate the derivative
cov_params : array_like
The variance-covariance of the parameters
at : str
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation.
Only overall has any effect here.you
derivative : function or array_like
If a function, it returns the marginal effects of the model with
respect to the exogenous variables evaluated at exog. Expected to be
called derivative(params, exog). This will be numerically
differentiated. Otherwise, it can be the Jacobian of the marginal
effects with respect to the parameters.
dummy_ind : array_like
Indices of the columns of exog that contain dummy variables
count_ind : array_like
Indices of the columns of exog that contain count variables
Notes
-----
For continuous regressors, the variance-covariance is given by
Asy. Var[MargEff] = [d margeff / d params] V [d margeff / d params]'
where V is the parameter variance-covariance.
The outer Jacobians are computed via numerical differentiation if
derivative is a function.
"""
if callable(derivative):
from statsmodels.tools.numdiff import approx_fprime_cs
params = params.ravel('F') # for Multinomial
try:
jacobian_mat = approx_fprime_cs(params, derivative,
args=(exog,method))
except TypeError: # norm.cdf does not take complex values
from statsmodels.tools.numdiff import approx_fprime
jacobian_mat = approx_fprime(params, derivative,
args=(exog,method))
if at == 'overall':
jacobian_mat = np.mean(jacobian_mat, axis=1)
else:
jacobian_mat = jacobian_mat.squeeze() # exog was 2d row vector
if dummy_ind is not None:
jacobian_mat = _margeff_cov_params_dummy(model, jacobian_mat,
params, exog, dummy_ind, method, J)
if count_ind is not None:
jacobian_mat = _margeff_cov_params_count(model, jacobian_mat,
params, exog, count_ind, method, J)
else:
jacobian_mat = derivative
#NOTE: this will not go through for at == 'all'
return np.dot(np.dot(jacobian_mat, cov_params), jacobian_mat.T)
def margeff_cov_with_se(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
See margeff_cov_params.
Same function but returns both the covariance of the marginal effects
and their standard errors.
"""
cov_me = margeff_cov_params(model, params, exog, cov_params, at,
derivative, dummy_ind,
count_ind, method, J)
return cov_me, np.sqrt(np.diag(cov_me))
Loading ...