Why Gemfury? Push, build, and install  RubyGems npm packages Python packages Maven artifacts PHP packages Go Modules Debian packages RPM packages NuGet packages

Repository URL to install this package:

Details    
statsmodels / stats / outliers_influence.py
Size: Mime:
# -*- coding: utf-8 -*-
"""Influence and Outlier Measures

Created on Sun Jan 29 11:16:09 2012

Author: Josef Perktold
License: BSD-3
"""
from collections import defaultdict

import numpy as np

from statsmodels.compat.python import lzip
from statsmodels.graphics._regressionplots_doc import _plot_influence_doc
from statsmodels.regression.linear_model import OLS
from statsmodels.stats.multitest import multipletests
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.tools import maybe_unwrap_results


# outliers test convenience wrapper

def outlier_test(model_results, method='bonf', alpha=.05, labels=None,
                 order=False, cutoff=None):
    """
    Outlier Tests for RegressionResults instances.

    Parameters
    ----------
    model_results : RegressionResults
        Linear model results
    method : str
        - `bonferroni` : one-step correction
        - `sidak` : one-step correction
        - `holm-sidak` :
        - `holm` :
        - `simes-hochberg` :
        - `hommel` :
        - `fdr_bh` : Benjamini/Hochberg
        - `fdr_by` : Benjamini/Yekutieli
        See `statsmodels.stats.multitest.multipletests` for details.
    alpha : float
        familywise error rate
    labels : None or array_like
        If `labels` is not None, then it will be used as index to the
        returned pandas DataFrame. See also Returns below
    order : bool
        Whether or not to order the results by the absolute value of the
        studentized residuals. If labels are provided they will also be sorted.
    cutoff : None or float in [0, 1]
        If cutoff is not None, then the return only includes observations with
        multiple testing corrected p-values strictly below the cutoff. The
        returned array or dataframe can be empty if there are no outlier
        candidates at the specified cutoff.

    Returns
    -------
    table : ndarray or DataFrame
        Returns either an ndarray or a DataFrame if labels is not None.
        Will attempt to get labels from model_results if available. The
        columns are the Studentized residuals, the unadjusted p-value,
        and the corrected p-value according to method.

    Notes
    -----
    The unadjusted p-value is stats.t.sf(abs(resid), df) where
    df = df_resid - 1.
    """
    from scipy import stats  # lazy import
    if labels is None:
        labels = getattr(model_results.model.data, 'row_labels', None)
    infl = getattr(model_results, 'get_influence', None)
    if infl is None:
        results = maybe_unwrap_results(model_results)
        raise AttributeError("model_results object %s does not have a "
                             "get_influence "
                             "method." % results.__class__.__name__)
    resid = infl().resid_studentized_external
    if order:
        idx = np.abs(resid).argsort()[::-1]
        resid = resid[idx]
        if labels is not None:
            labels = np.asarray(labels)[idx]
    df = model_results.df_resid - 1
    unadj_p = stats.t.sf(np.abs(resid), df) * 2
    adj_p = multipletests(unadj_p, alpha=alpha, method=method)

    data = np.c_[resid, unadj_p, adj_p[1]]
    if cutoff is not None:
        mask = data[:, -1] < cutoff
        data = data[mask]
    else:
        mask = slice(None)

    if labels is not None:
        from pandas import DataFrame
        return DataFrame(data,
                         columns=['student_resid', 'unadj_p', method + "(p)"],
                         index=np.asarray(labels)[mask])
    return data


# influence measures

def reset_ramsey(res, degree=5):
    """Ramsey's RESET specification test for linear models

    This is a general specification test, for additional non-linear effects
    in a model.


    Notes
    -----
    The test fits an auxiliary OLS regression where the design matrix, exog,
    is augmented by powers 2 to degree of the fitted values. Then it performs
    an F-test whether these additional terms are significant.

    If the p-value of the f-test is below a threshold, e.g. 0.1, then this
    indicates that there might be additional non-linear effects in the model
    and that the linear model is mis-specified.


    References
    ----------
    http://en.wikipedia.org/wiki/Ramsey_RESET_test

    """
    order = degree + 1
    k_vars = res.model.exog.shape[1]
    # vander without constant and x, and drop constant
    y_fitted_vander = np.vander(res.fittedvalues, order)[:, :-2]
    exog = np.column_stack((res.model.exog, y_fitted_vander))
    res_aux = OLS(res.model.endog, exog).fit()
    # r_matrix = np.eye(degree, exog.shape[1], k_vars)
    r_matrix = np.eye(degree - 1, exog.shape[1], k_vars)
    # df1 = degree - 1
    # df2 = exog.shape[0] - degree - res.df_model  (without constant)
    return res_aux.f_test(r_matrix)  # , r_matrix, res_aux


def variance_inflation_factor(exog, exog_idx):
    """variance inflation factor, VIF, for one exogenous variable

    The variance inflation factor is a measure for the increase of the
    variance of the parameter estimates if an additional variable, given by
    exog_idx is added to the linear regression. It is a measure for
    multicollinearity of the design matrix, exog.

    One recommendation is that if VIF is greater than 5, then the explanatory
    variable given by exog_idx is highly collinear with the other explanatory
    variables, and the parameter estimates will have large standard errors
    because of this.

    Parameters
    ----------
    exog : ndarray
        design matrix with all explanatory variables, as for example used in
        regression
    exog_idx : int
        index of the exogenous variable in the columns of exog

    Returns
    -------
    vif : float
        variance inflation factor

    Notes
    -----
    This function does not save the auxiliary regression.

    See Also
    --------
    xxx : class for regression diagnostics  TODO: doesn't exist yet

    References
    ----------
    http://en.wikipedia.org/wiki/Variance_inflation_factor

    """
    k_vars = exog.shape[1]
    x_i = exog[:, exog_idx]
    mask = np.arange(k_vars) != exog_idx
    x_noti = exog[:, mask]
    r_squared_i = OLS(x_i, x_noti).fit().rsquared
    vif = 1. / (1. - r_squared_i)
    return vif


class _BaseInfluenceMixin(object):
    """common methods between OLSInfluence and MLE/GLMInfluence
    """

    def plot_influence(self, external=None, alpha=.05, criterion="cooks",
                       size=48, plot_alpha=.75, ax=None, **kwargs):

        if external is None:
            external = hasattr(self, '_cache') and 'res_looo' in self._cache
        from statsmodels.graphics.regressionplots import _influence_plot
        res = _influence_plot(self.results, self, external=external,
                              alpha=alpha,
                              criterion=criterion, size=size,
                              plot_alpha=plot_alpha, ax=ax, **kwargs)
        return res

    plot_influence.__doc__ = _plot_influence_doc.format({
        'extra_params_doc': ""})

    def _plot_index(self, y, ylabel, threshold=None, title=None, ax=None,
                    **kwds):
        from statsmodels.graphics import utils
        fig, ax = utils.create_mpl_ax(ax)
        if title is None:
            title = "Index Plot"
        nobs = len(self.endog)
        index = np.arange(nobs)
        ax.scatter(index, y, **kwds)

        if threshold == 'all':
            large_points = np.ones(nobs, np.bool_)
        else:
            large_points = np.abs(y) > threshold
        psize = 3 * np.ones(nobs)
        # add point labels
        labels = self.results.model.data.row_labels
        if labels is None:
            labels = np.arange(nobs)
        ax = utils.annotate_axes(np.where(large_points)[0], labels,
                                 lzip(index, y),
                                 lzip(-psize, psize), "large",
                                 ax)

        font = {"fontsize": 16, "color": "black"}
        ax.set_ylabel(ylabel, **font)
        ax.set_xlabel("Observation", **font)
        ax.set_title(title, **font)
        return fig

    def plot_index(self, y_var='cooks', threshold=None, title=None, ax=None,
                   idx=None, **kwds):
        """index plot for influence attributes

        Parameters
        ----------
        y_var : string
            Name of attribute or shortcut for predefined attributes that will
            be plotted on the y-axis.
        threshold : None or float
            Threshold for adding annotation with observation labels.
            Observations for which the absolute value of the y_var is larger
            than the threshold will be annotated. Set to a negative number to
            label all observations or to a large number to have no annotation.
        title : string
            If provided, the title will replace the default "Index Plot" title.
        ax : matplolib axis instance
            The plot will be added to the `ax` if provided, otherwise a new
            figure is created.
        idx : None or integer
            Some attributes require an additional index to select the y-var.
            In dfbetas this refers to the column indes.
        kwds : optional keywords
            Keywords will be used in the call to matplotlib scatter function.

        """
        criterion = y_var  # alias
        if threshold is None:
            # TODO: criterion specific defaults
            threshold = 'all'

        if criterion == 'dfbeta':
            y = self.dfbetas[:, idx]
            ylabel = 'DFBETA for ' + self.results.model.exog_names[idx]
        elif criterion.startswith('cook'):
            y = self.cooks_distance[0]
            ylabel = "Cook's distance"
        elif criterion.startswith('hat') or criterion.startswith('lever'):
            y = self.hat_matrix_diag
            ylabel = "Leverage (diagonal of hat matrix)"
        elif criterion.startswith('cook'):
            y = self.cooks_distance[0]
            ylabel = "Cook's distance"
        elif criterion.startswith('resid'):
            y = self.resid_studentized
            ylabel = "Internally Studentized Residuals"
        else:
            # assume we have the name of an attribute
            y = getattr(self, y_var)
            if idx is not None:
                y = y[idx]
            ylabel = y_var

        fig = self._plot_index(y, ylabel, threshold=threshold, title=title,
                               ax=ax, **kwds)
        return fig


class MLEInfluence(_BaseInfluenceMixin):
    """Local Influence and outlier measures (experimental)

    This currently subclasses GLMInfluence instead of the other way.
    No common superclass yet.
    This is another version before checking what is common

    Parameters
    ----------
    results : instance of results class
        This only works for model and results classes that have the necessary
        helper methods.
    other arguments are only to override default behavior and are used instead
    of the corresponding attribute of the results class.
    By default resid_pearson is used as resid.




    Attributes
    ----------
    hat_matrix_diag (hii) : This is the generalized leverage computed as the
        local derivative of fittedvalues (predicted mean) with respect to the
        observed response for each observation.
    d_params : Change in parameters computed with one Newton step using the
        full Hessian corrected by division by (1 - hii).
    dbetas : change in parameters divided by the standard error of parameters
        from the full model results, ``bse``.
    cooks_distance : quadratic form for change in parameters weighted by
        ``cov_params`` from the full model divided by the number of variables.
        It includes p-values based on the F-distribution which are only
        approximate outside of linear Gaussian models.
    resid_studentized : In the general MLE case resid_studentized are
        computed from the score residuals scaled by hessian factor and
        leverage. This does not use ``cov_params``.
    d_fittedvalues : local change of expected mean given the change in the
        parameters as computed in ``d_params``.
    d_fittedvalues_scaled : same as d_fittedvalues but scaled by the standard
        errors of a predicted mean of the response.
    params_one : is the one step parameter estimate computed as ``params``
        from the full sample minus ``d_params``.

    Notes
    -----
    MLEInfluence produces the same results as GLMInfluence (verified for GLM
    Binomial and Gaussian). There will be some differences for non-canonical
    links or if a robust cov_type is used.

    Warning: This does currently not work for constrained or penalized models,
    e.g. models estimated with fit_constrained or fit_regularized.

    This has not yet been tested for correctness when offset or exposure
    are used, although they should be supported by the code.

    status: experimental,
    This class will need changes to support different kinds of models, e.g.
    extra parameters in discrete.NegativeBinomial or two-part models like
    ZeroInflatedPoisson.


    """

    def __init__(self, results, resid=None, endog=None, exog=None,
                 hat_matrix_diag=None, cov_params=None, scale=None):
        # I'm not calling super for now, OLS attributes might not be available
        # check which model is allowed
        self.results = results = maybe_unwrap_results(results)
        # TODO: check for extra params in e.g. NegBin
        self.nobs, self.k_vars = results.model.exog.shape
        self.endog = endog if endog is not None else results.model.endog
        self.exog = exog if exog is not None else results.model.exog
        self.resid = resid if resid is not None else results.resid_pearson
        self.scale = scale if scale is not None else results.scale
        self.cov_params = (cov_params if cov_params is not None
                           else results.cov_params())
        self.model_class = results.model.__class__

        self.hessian = self.results.model.hessian(self.results.params)
        self.score_obs = self.results.model.score_obs(self.results.params)
        if hat_matrix_diag is not None:
            self._hat_matrix_diag = hat_matrix_diag

    @cache_readonly
    def hat_matrix_diag(self):
        """Diagonal of the generalized leverage

        This is the analogue of the hat matrix diagonal for general MLE.

        """
        if hasattr(self, '_hat_matrix_diag'):
            return self._hat_matrix_diag

        dmu_dp = self.results.model._deriv_mean_dparams(self.results.params)
        dsdy = self.results.model._deriv_score_obs_dendog(self.results.params)
        # dmu_dp = 1 /
        #      self.results.model.family.link.deriv(self.results.fittedvalues)
        h = (dmu_dp * np.linalg.solve(-self.hessian, dsdy.T).T).sum(1)
        return h

    @cache_readonly
    def d_params(self):
        """Change in parameter estimates

        This uses one-step approximation of the parameter change to deleting
        one observation.
        """
        so_noti = self.score_obs.sum(0) - self.score_obs
        beta_i = np.linalg.solve(self.hessian, so_noti.T).T
        return beta_i / (1 - self.hat_matrix_diag)[:, None]

    @cache_readonly
    def dfbetas(self):
        """Scaled change in parameter estimates

        The one-step change of parameters in d_params is rescaled by dividing
        by the standard error of the parameter estimate given by results.bse.
        """

        beta_i = self.d_params / self.results.bse
        return beta_i

    @cache_readonly
    def params_one(self):
        """Parameter estimate based on one-step approximation

        This the one step parameter estimate computed as
        ``params`` from the full sample minus ``d_params``.
        """
        return self.results.params - self.d_params

    @cache_readonly
    def cooks_distance(self):
        """Cook's distance and p-values

        Based on one step approximation d_params and on results.cov_params
        Cook's distance divides by the number of explanatory variables.

        p-values are based on the F-distribution which are only approximate
        outside of linear Gaussian models.

        Warning: The definition of p-values might change if we switch to using
        chi-square distribution instead of F-distribution, or if we make it
        dependent on the fit keyword use_t.

        """
        cooks_d2 = (self.d_params * np.linalg.solve(self.cov_params,
                                                    self.d_params.T).T).sum(1)
        cooks_d2 /= self.k_vars
        from scipy import stats
        # alpha = 0.1
        # print stats.f.isf(1-alpha, n_params, res.df_modelwc)
        # TODO use chi2   # use_f option
        pvals = stats.f.sf(cooks_d2, self.k_vars, self.results.df_resid)

        return cooks_d2, pvals

    @cache_readonly
    def resid_studentized(self):
        """Score residual divided by sqrt of hessian factor

        experimental, agrees with GLMInfluence for Binomial and Gaussian.
        no reference for this
        """
        sf = self.results.model.score_factor(self.results.params)
        hf = self.results.model.hessian_factor(self.results.params)
        return sf / np.sqrt(hf) / np.sqrt(1 - self.hat_matrix_diag)

    @cache_readonly
    def _get_prediction(self):
        # TODO: do we cache this or does it need to be a method
        # we only need unchanging parts, alpha for confint could change
        return self.results.get_prediction()

    @cache_readonly
    def d_fittedvalues(self):
        """Change in expected response, fittedvalues

        Local change of expected mean given the change in the parameters as
        computed in d_params.

        Notes
        -----
        This uses one-step approximation of the parameter change to deleting
        one observation ``d_params``.
        """
        # results.params might be a pandas.Series
        params = np.asarray(self.results.params)
        deriv = self.results.model._deriv_mean_dparams(params)
        return (deriv * self.d_params).sum(1)

    @property
    def d_fittedvalues_scaled(self):
        """
        Change in fittedvalues scaled by standard errors

        This uses one-step approximation of the parameter change to deleting
        one observation ``d_params``, and divides by the standard errors
        for the predicted mean provided by results.get_prediction.
        """
        # Note: this and the previous methods are for the response
        # and not for a weighted response, i.e. not the self.exog, self.endog
        # this will be relevant for WLS comparing fitted endog versus wendog
        return self.d_fittedvalues / self._get_prediction.se_mean

    def summary_frame(self):
        """
        Creates a DataFrame with influence results.

        Returns
        -------
        frame : pandas DataFrame
            A DataFrame with selected results for each observation.
            The index will be the same as provided to the model.

        Notes
        -----
        The resultant DataFrame contains six variables in addition to the
        ``dfbetas``. These are:

        * cooks_d : Cook's Distance defined in ``cooks_distance``
        * standard_resid : Standardized residuals defined in
          `resid_studentizedl`
        * hat_diag : The diagonal of the projection, or hat, matrix defined in
          `hat_matrix_diag`
        * dffits_internal : DFFITS statistics using internally Studentized
          residuals defined in `d_fittedvalues_scaled`

        """
        from pandas import DataFrame

        # row and column labels
        data = self.results.model.data
        row_labels = data.row_labels
        beta_labels = ['dfb_' + i for i in data.xnames]

        # grab the results
        summary_data = DataFrame(dict(
            cooks_d=self.cooks_distance[0],
            standard_resid=self.resid_studentized,
            hat_diag=self.hat_matrix_diag,
            dffits_internal=self.d_fittedvalues_scaled),
            index=row_labels)
        # NOTE: if we don't give columns, order of above will be arbitrary
        dfbeta = DataFrame(self.dfbetas, columns=beta_labels,
                           index=row_labels)

        return dfbeta.join(summary_data)


class OLSInfluence(_BaseInfluenceMixin):
    """class to calculate outlier and influence measures for OLS result

    Parameters
    ----------
    results : RegressionResults
        currently assumes the results are from an OLS regression

    Notes
    -----
    One part of the results can be calculated without any auxiliary regression
    (some of which have the `_internal` postfix in the name. Other statistics
    require leave-one-observation-out (LOOO) auxiliary regression, and will be
    slower (mainly results with `_external` postfix in the name).
    The auxiliary LOOO regression only the required results are stored.

    Using the LOO measures is currently only recommended if the data set
    is not too large. One possible approach for LOOO measures would be to
    identify possible problem observations with the _internal measures, and
    then run the leave-one-observation-out only with observations that are
    possible outliers. (However, this is not yet available in an automized way.)

    This should be extended to general least squares.

    The leave-one-variable-out (LOVO) auxiliary regression are currently not
    used.

    """

    def __init__(self, results):
        # check which model is allowed
        self.results = maybe_unwrap_results(results)
        self.nobs, self.k_vars = results.model.exog.shape
        self.endog = results.model.endog
        self.exog = results.model.exog
        self.resid = results.resid
        self.model_class = results.model.__class__

        # self.sigma_est = np.sqrt(results.mse_resid)
        self.scale = results.mse_resid

        self.aux_regression_exog = {}
        self.aux_regression_endog = {}

    @cache_readonly
    def hat_matrix_diag(self):
        """Diagonal of the hat_matrix for OLS

        Notes
        -----
        temporarily calculated here, this should go to model class
        """
        return (self.exog * self.results.model.pinv_wexog.T).sum(1)

    @cache_readonly
    def resid_press(self):
        """PRESS residuals
        """
        hii = self.hat_matrix_diag
        return self.resid / (1 - hii)

    @cache_readonly
    def influence(self):
        """Influence measure

        matches the influence measure that gretl reports
        u * h / (1 - h)
        where u are the residuals and h is the diagonal of the hat_matrix
        """
        hii = self.hat_matrix_diag
        return self.resid * hii / (1 - hii)

    @cache_readonly
    def hat_diag_factor(self):
        """Factor of diagonal of hat_matrix used in influence

        this might be useful for internal reuse
        h / (1 - h)
        """
        hii = self.hat_matrix_diag
        return hii / (1 - hii)

    @cache_readonly
    def ess_press(self):
        """Error sum of squares of PRESS residuals
        """
        return np.dot(self.resid_press, self.resid_press)

    @cache_readonly
    def resid_studentized(self):
        """Studentized residuals using variance from OLS

        alias for resid_studentized_internal for compatibility with
        MLEInfluence this uses sigma from original estimate and does
        not require leave one out loop
        """
        return self.resid_studentized_internal

    @cache_readonly
    def resid_studentized_internal(self):
        """Studentized residuals using variance from OLS

        this uses sigma from original estimate
        does not require leave one out loop
        """
        return self.get_resid_studentized_external(sigma=None)
        # return self.results.resid / self.sigma_est

    @cache_readonly
    def resid_studentized_external(self):
        """Studentized residuals using LOOO variance

        this uses sigma from leave-one-out estimates

        requires leave one out loop for observations
        """
        sigma_looo = np.sqrt(self.sigma2_not_obsi)
        return self.get_resid_studentized_external(sigma=sigma_looo)

    def get_resid_studentized_external(self, sigma=None):
        """calculate studentized residuals

        Parameters
        ----------
        sigma : None or float
            estimate of the standard deviation of the residuals. If None, then
            the estimate from the regression results is used.

        Returns
        -------
        stzd_resid : ndarray
            studentized residuals

        Notes
        -----
        studentized residuals are defined as ::

           resid / sigma / np.sqrt(1 - hii)

        where resid are the residuals from the regression, sigma is an
        estimate of the standard deviation of the residuals, and hii is the
        diagonal of the hat_matrix.

        """
        hii = self.hat_matrix_diag
        if sigma is None:
            sigma2_est = self.scale
            # can be replace by different estimators of sigma
            sigma = np.sqrt(sigma2_est)

        return self.resid / sigma / np.sqrt(1 - hii)

    # same computation as GLMInfluence
    @cache_readonly
    def cooks_distance(self):
        """Cooks distance

        uses original results, no nobs loop

        """
        hii = self.hat_matrix_diag
        # Eubank p.93, 94
        cooks_d2 = self.resid_studentized ** 2 / self.k_vars
        cooks_d2 *= hii / (1 - hii)

        from scipy import stats
        # alpha = 0.1
        # print stats.f.isf(1-alpha, n_params, res.df_modelwc)
        pvals = stats.f.sf(cooks_d2, self.k_vars, self.results.df_resid)

        return cooks_d2, pvals

    @cache_readonly
    def dffits_internal(self):
        """dffits measure for influence of an observation

        based on resid_studentized_internal
        uses original results, no nobs loop

        """
        # TODO: do I want to use different sigma estimate in
        #      resid_studentized_external
        # -> move definition of sigma_error to the __init__
        hii = self.hat_matrix_diag
        dffits_ = self.resid_studentized_internal * np.sqrt(hii / (1 - hii))
        dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs)
        return dffits_, dffits_threshold

    @cache_readonly
    def dffits(self):
        """dffits measure for influence of an observation

        based on resid_studentized_external,
        uses results from leave-one-observation-out loop

        It is recommended that observations with dffits large than a
        threshold of 2 sqrt{k / n} where k is the number of parameters, should
        be investigated.

        Returns
        -------
        dffits: float
        dffits_threshold : float

        References
        ----------
        `Wikipedia <http://en.wikipedia.org/wiki/DFFITS>`_

        """
        # TODO: do I want to use different sigma estimate in
        #      resid_studentized_external
        # -> move definition of sigma_error to the __init__
        hii = self.hat_matrix_diag
        dffits_ = self.resid_studentized_external * np.sqrt(hii / (1 - hii))
        dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs)
        return dffits_, dffits_threshold

    @cache_readonly
    def dfbetas(self):
        """dfbetas

        uses results from leave-one-observation-out loop
        """
        dfbetas = self.results.params - self.params_not_obsi  # [None,:]
        dfbetas /= np.sqrt(self.sigma2_not_obsi[:, None])
        dfbetas /= np.sqrt(np.diag(self.results.normalized_cov_params))
        return dfbetas

    @cache_readonly
    def dfbeta(self):
        """dfbetas

        uses results from leave-one-observation-out loop
        """
        dfbeta = self.results.params - self.params_not_obsi
        return dfbeta

    @cache_readonly
    def sigma2_not_obsi(self):
        """error variance for all LOOO regressions

        This is 'mse_resid' from each auxiliary regression.

        uses results from leave-one-observation-out loop
        """
        return np.asarray(self._res_looo['mse_resid'])

    @property
    def params_not_obsi(self):
        """parameter estimates for all LOOO regressions

        uses results from leave-one-observation-out loop
        """
        return np.asarray(self._res_looo['params'])

    @property
    def det_cov_params_not_obsi(self):
        """determinant of cov_params of all LOOO regressions

        uses results from leave-one-observation-out loop
        """
        return np.asarray(self._res_looo['det_cov_params'])

    @cache_readonly
    def cov_ratio(self):
        """covariance ratio between LOOO and original

        This uses determinant of the estimate of the parameter covariance
        from leave-one-out estimates.
        requires leave one out loop for observations

        """
        # don't use inplace division / because then we change original
        cov_ratio = (self.det_cov_params_not_obsi
                     / np.linalg.det(self.results.cov_params()))
        return cov_ratio

    @cache_readonly
    def resid_var(self):
        """estimate of variance of the residuals

        ::

           sigma2 = sigma2_OLS * (1 - hii)

        where hii is the diagonal of the hat matrix

        """
        # TODO:check if correct outside of ols
        return self.scale * (1 - self.hat_matrix_diag)

    @cache_readonly
    def resid_std(self):
        """estimate of standard deviation of the residuals

        See Also
        --------
        resid_var

        """
        return np.sqrt(self.resid_var)

    def _ols_xnoti(self, drop_idx, endog_idx='endog', store=True):
        """regression results from LOVO auxiliary regression with cache


        The result instances are stored, which could use a large amount of
        memory if the datasets are large. There are too many combinations to
        store them all, except for small problems.

        Parameters
        ----------
        drop_idx : int
            index of exog that is dropped from the regression
        endog_idx : 'endog' or int
            If 'endog', then the endogenous variable of the result instance
            is regressed on the exogenous variables, excluding the one at
            drop_idx. If endog_idx is an integer, then the exog with that
            index is regressed with OLS on all other exogenous variables.
            (The latter is the auxiliary regression for the variance inflation
            factor.)

        this needs more thought, memory versus speed
        not yet used in any other parts, not sufficiently tested
        """
        # reverse the structure, access store, if fail calculate ?
        # this creates keys in store even if store = false ! bug
        if endog_idx == 'endog':
            stored = self.aux_regression_endog
            if hasattr(stored, drop_idx):
                return stored[drop_idx]
            x_i = self.results.model.endog

        else:
            # nested dictionary
            try:
                self.aux_regression_exog[endog_idx][drop_idx]
            except KeyError:
                pass

            stored = self.aux_regression_exog[endog_idx]
            stored = {}

            x_i = self.exog[:, endog_idx]

        k_vars = self.exog.shape[1]
        mask = np.arange(k_vars) != drop_idx
        x_noti = self.exog[:, mask]
        res = OLS(x_i, x_noti).fit()
        if store:
            stored[drop_idx] = res

        return res

    def _get_drop_vari(self, attributes):
        """
        regress endog on exog without one of the variables

        This uses a k_vars loop, only attributes of the OLS instance are
        stored.

        Parameters
        ----------
        attributes : list of strings
           These are the names of the attributes of the auxiliary OLS results
           instance that are stored and returned.

        not yet used
        """
        from statsmodels.sandbox.tools.cross_val import LeaveOneOut

        endog = self.results.model.endog
        exog = self.exog

        cv_iter = LeaveOneOut(self.k_vars)
        res_loo = defaultdict(list)
        for inidx, outidx in cv_iter:
            for att in attributes:
                res_i = self.model_class(endog, exog[:, inidx]).fit()
                res_loo[att].append(getattr(res_i, att))

        return res_loo

    @cache_readonly
    def _res_looo(self):
        """collect required results from the LOOO loop

        all results will be attached.
        currently only 'params', 'mse_resid', 'det_cov_params' are stored

        regresses endog on exog dropping one observation at a time

        this uses a nobs loop, only attributes of the OLS instance are stored.
        """
        from statsmodels.sandbox.tools.cross_val import LeaveOneOut

        def get_det_cov_params(res):
            return np.linalg.det(res.cov_params())

        endog = self.results.model.endog
        exog = self.results.model.exog

        params = np.zeros(exog.shape, dtype=np.float)
        mse_resid = np.zeros(endog.shape, dtype=np.float)
        det_cov_params = np.zeros(endog.shape, dtype=np.float)

        cv_iter = LeaveOneOut(self.nobs)
        for inidx, outidx in cv_iter:
            res_i = self.model_class(endog[inidx], exog[inidx]).fit()
            params[outidx] = res_i.params
            mse_resid[outidx] = res_i.mse_resid
            det_cov_params[outidx] = get_det_cov_params(res_i)

        return dict(params=params, mse_resid=mse_resid,
                    det_cov_params=det_cov_params)

    def summary_frame(self):
        """
        Creates a DataFrame with all available influence results.

        Returns
        -------
        frame : DataFrame
            A DataFrame with all results.

        Notes
        -----
        The resultant DataFrame contains six variables in addition to the
        DFBETAS. These are:

        * cooks_d : Cook's Distance defined in `Influence.cooks_distance`
        * standard_resid : Standardized residuals defined in
          `Influence.resid_studentized_internal`
        * hat_diag : The diagonal of the projection, or hat, matrix defined in
          `Influence.hat_matrix_diag`
        * dffits_internal : DFFITS statistics using internally Studentized
          residuals defined in `Influence.dffits_internal`
        * dffits : DFFITS statistics using externally Studentized residuals
          defined in `Influence.dffits`
        * student_resid : Externally Studentized residuals defined in
          `Influence.resid_studentized_external`
        """
        from pandas import DataFrame

        # row and column labels
        data = self.results.model.data
        row_labels = data.row_labels
        beta_labels = ['dfb_' + i for i in data.xnames]

        # grab the results
        summary_data = DataFrame(dict(
            cooks_d=self.cooks_distance[0],
            standard_resid=self.resid_studentized_internal,
            hat_diag=self.hat_matrix_diag,
            dffits_internal=self.dffits_internal[0],
            student_resid=self.resid_studentized_external,
            dffits=self.dffits[0],
        ),
            index=row_labels)
        # NOTE: if we don't give columns, order of above will be arbitrary
        dfbeta = DataFrame(self.dfbetas, columns=beta_labels,
                           index=row_labels)

        return dfbeta.join(summary_data)

    def summary_table(self, float_fmt="%6.3f"):
        """create a summary table with all influence and outlier measures

        This does currently not distinguish between statistics that can be
        calculated from the original regression results and for which a
        leave-one-observation-out loop is needed

        Returns
        -------
        res : SimpleTable
           SimpleTable instance with the results, can be printed

        Notes
        -----
        This also attaches table_data to the instance.



        """
        # print self.dfbetas

        #        table_raw = [ np.arange(self.nobs),
        #                      self.endog,
        #                      self.fittedvalues,
        #                      self.cooks_distance(),
        #                      self.resid_studentized_internal,
        #                      self.hat_matrix_diag,
        #                      self.dffits_internal,
        #                      self.resid_studentized_external,
        #                      self.dffits,
        #                      self.dfbetas
        #                      ]
        table_raw = [('obs', np.arange(self.nobs)),
                     ('endog', self.endog),
                     ('fitted\nvalue', self.results.fittedvalues),
                     ("Cook's\nd", self.cooks_distance[0]),
                     ("student.\nresidual", self.resid_studentized_internal),
                     ('hat diag', self.hat_matrix_diag),
                     ('dffits \ninternal', self.dffits_internal[0]),
                     ("ext.stud.\nresidual", self.resid_studentized_external),
                     ('dffits', self.dffits[0])
                     ]
        colnames, data = lzip(*table_raw)  # unzip
        data = np.column_stack(data)
        self.table_data = data
        from statsmodels.iolib.table import SimpleTable, default_html_fmt
        from statsmodels.iolib.tableformatting import fmt_base
        from copy import deepcopy
        fmt = deepcopy(fmt_base)
        fmt_html = deepcopy(default_html_fmt)
        fmt['data_fmts'] = ["%4d"] + [float_fmt] * (data.shape[1] - 1)
        # fmt_html['data_fmts'] = fmt['data_fmts']
        return SimpleTable(data, headers=colnames, txt_fmt=fmt,
                           html_fmt=fmt_html)


def summary_table(res, alpha=0.05):
    """
    Generate summary table of outlier and influence similar to SAS

    Parameters
    ----------
    alpha : float
       significance level for confidence interval

    Returns
    -------
    st : SimpleTable
       table with results that can be printed
    data : ndarray
       calculated measures and statistics for the table
    ss2 : list of strings
       column_names for table (Note: rows of table are observations)
    """

    from scipy import stats
    from statsmodels.sandbox.regression.predstd import wls_prediction_std

    infl = OLSInfluence(res)

    # standard error for predicted mean
    # Note: using hat_matrix only works for fitted values
    predict_mean_se = np.sqrt(infl.hat_matrix_diag * res.mse_resid)

    tppf = stats.t.isf(alpha / 2., res.df_resid)
    predict_mean_ci = np.column_stack([
        res.fittedvalues - tppf * predict_mean_se,
        res.fittedvalues + tppf * predict_mean_se])

    # standard error for predicted observation
    tmp = wls_prediction_std(res, alpha=alpha)
    predict_se, predict_ci_low, predict_ci_upp = tmp

    predict_ci = np.column_stack((predict_ci_low, predict_ci_upp))

    # standard deviation of residual
    resid_se = np.sqrt(res.mse_resid * (1 - infl.hat_matrix_diag))

    table_sm = np.column_stack([
        np.arange(res.nobs) + 1,
        res.model.endog,
        res.fittedvalues,
        predict_mean_se,
        predict_mean_ci[:, 0],
        predict_mean_ci[:, 1],
        predict_ci[:, 0],
        predict_ci[:, 1],
        res.resid,
        resid_se,
        infl.resid_studentized_internal,
        infl.cooks_distance[0]
    ])

    # colnames, data = lzip(*table_raw) #unzip
    data = table_sm
    ss2 = ['Obs', 'Dep Var\nPopulation', 'Predicted\nValue',
           'Std Error\nMean Predict', 'Mean ci\n95% low', 'Mean ci\n95% upp',
           'Predict ci\n95% low', 'Predict ci\n95% upp', 'Residual',
           'Std Error\nResidual', 'Student\nResidual', "Cook's\nD"]
    colnames = ss2
    # self.table_data = data
    # data = np.column_stack(data)
    from statsmodels.iolib.table import SimpleTable, default_html_fmt
    from statsmodels.iolib.tableformatting import fmt_base
    from copy import deepcopy
    fmt = deepcopy(fmt_base)
    fmt_html = deepcopy(default_html_fmt)
    fmt['data_fmts'] = ["%4d"] + ["%6.3f"] * (data.shape[1] - 1)
    # fmt_html['data_fmts'] = fmt['data_fmts']
    st = SimpleTable(data, headers=colnames, txt_fmt=fmt,
                     html_fmt=fmt_html)

    return st, data, ss2


class GLMInfluence(MLEInfluence):
    """Influence and outlier measures (experimental)

    This uses partly formulas specific to GLM, specifically cooks_distance
    is based on the hessian, i.e. observed or expected information matrix and
    not on cov_params, in contrast to MLEInfluence.
    Standardization for changes in parameters, in fittedvalues and in
    the linear predictor are based on cov_params.

    Parameters
    ----------
    results : instance of results class
        This only works for model and results classes that have the necessary
        helper methods.
    other arguments are only to override default behavior and are used instead
    of the corresponding attribute of the results class.
    By default resid_pearson is used as resid.

    Attributes
    ----------
    dbetas
        change in parameters divided by the standard error of parameters from
        the full model results, ``bse``.
    d_fittedvalues_scaled
        same as d_fittedvalues but scaled by the standard errors of a
        predicted mean of the response.
    d_linpred
        local change in linear prediction.
    d_linpred_scale
        local change in linear prediction scaled by the standard errors for
        the prediction based on cov_params.

    Notes
    -----
    This has not yet been tested for correctness when offset or exposure
    are used, although they should be supported by the code.

    Some GLM specific measures like d_deviance are still missing.

    Computing an explicit leave-one-observation-out (LOOO) loop is included
    but no influence measures are currently computed from it.
    """

    @cache_readonly
    def hat_matrix_diag(self):
        """
        Diagonal of the hat_matrix for GLM

        Notes
        -----
        This returns the diagonal of the hat matrix that was provided as
        argument to GLMInfluence or computes it using the results method
        `get_hat_matrix`.
        """
        if hasattr(self, '_hat_matrix_diag'):
            return self._hat_matrix_diag
        else:
            return self.results.get_hat_matrix()

    @cache_readonly
    def d_params(self):
        """Change in parameter estimates

        Notes
        -----
        This uses one-step approximation of the parameter change to deleting
        one observation.
        """

        beta_i = np.linalg.pinv(self.exog) * self.resid_studentized
        beta_i /= np.sqrt(1 - self.hat_matrix_diag)
        return beta_i.T

    # same computation as OLS
    @cache_readonly
    def resid_studentized(self):
        """
        Internally studentized pearson residuals

        Notes
        -----
        residuals / sqrt( scale * (1 - hii))

        where residuals are those provided to GLMInfluence which are
        pearson residuals by default, and
        hii is the diagonal of the hat matrix.
        """
        hii = self.hat_matrix_diag
        return self.resid / np.sqrt(self.scale * (1 - hii))

    # same computation as OLS
    @cache_readonly
    def cooks_distance(self):
        """Cook's distance

        Notes
        -----
        Based on one step approximation using resid_studentized and
        hat_matrix_diag for the computation.

        Cook's distance divides by the number of explanatory variables.

        Computed using formulas for GLM and does not use results.cov_params.
        It includes p-values based on the F-distribution which are only
        approximate outside of linear Gaussian models.
        """
        hii = self.hat_matrix_diag
        # Eubank p.93, 94
        cooks_d2 = self.resid_studentized ** 2 / self.k_vars
        cooks_d2 *= hii / (1 - hii)

        from scipy import stats
        # alpha = 0.1
        # print stats.f.isf(1-alpha, n_params, res.df_modelwc)
        pvals = stats.f.sf(cooks_d2, self.k_vars, self.results.df_resid)

        return cooks_d2, pvals

    @property
    def d_linpred(self):
        """
        Change in linear prediction

        This uses one-step approximation of the parameter change to deleting
        one observation ``d_params``.
        """
        # TODO: This will need adjustment for extra params in Poisson
        # use original model exog not transformed influence exog
        exog = self.results.model.exog
        return (exog * self.d_params).sum(1)

    @property
    def d_linpred_scaled(self):
        """
        Change in linpred scaled by standard errors

        This uses one-step approximation of the parameter change to deleting
        one observation ``d_params``, and divides by the standard errors
        for linpred provided by results.get_prediction.
        """
        # Note: this and the previous methods are for the response
        # and not for a weighted response, i.e. not the self.exog, self.endog
        # this will be relevant for WLS comparing fitted endog versus wendog
        return self.d_linpred / self._get_prediction.linpred.se_mean

    @property
    def _fittedvalues_one(self):
        """experimental code
        """
        import warnings
        warnings.warn('this ignores offset and exposure', UserWarning)
        # TODO: we need to handle offset, exposure and weights
        # use original model exog not transformed influence exog
        exog = self.results.model.exog
        fitted = np.array([self.results.model.predict(pi, exog[i])
                           for i, pi in enumerate(self.params_one)])
        return fitted.squeeze()

    @property
    def _diff_fittedvalues_one(self):
        """experimental code
        """
        # in discrete we cannot reuse results.fittedvalues
        return self.results.predict() - self._fittedvalues_one

    @cache_readonly
    def _res_looo(self):
        """collect required results from the LOOO loop

        all results will be attached.
        currently only 'params', 'mse_resid', 'det_cov_params' are stored

        Reestimates the model with endog and exog dropping one observation
        at a time

        This uses a nobs loop, only attributes of the results instance are
        stored.

        Warning: This will need refactoring and API changes to be able to
        add options.
        """
        from statsmodels.sandbox.tools.cross_val import LeaveOneOut
        get_det_cov_params = lambda res: np.linalg.det(res.cov_params())

        endog = self.results.model.endog
        exog = self.results.model.exog

        init_kwds = self.results.model._get_init_kwds()
        # We need to drop obs also from extra arrays
        freq_weights = init_kwds.pop('freq_weights')
        var_weights = init_kwds.pop('var_weights')
        offset = offset_ = init_kwds.pop('offset')
        exposure = exposure_ = init_kwds.pop('exposure')
        n_trials = init_kwds.pop('n_trials', None)
        # family Binomial creates `n` i.e. `n_trials`
        # we need to reset it
        # TODO: figure out how to do this properly
        if hasattr(init_kwds['family'], 'initialize'):
            # assume we have Binomial
            is_binomial = True
        else:
            is_binomial = False

        params = np.zeros(exog.shape, dtype=np.float)
        scale = np.zeros(endog.shape, dtype=np.float)
        det_cov_params = np.zeros(endog.shape, dtype=np.float)

        cv_iter = LeaveOneOut(self.nobs)
        for inidx, outidx in cv_iter:
            if offset is not None:
                offset_ = offset[inidx]
            if exposure is not None:
                exposure_ = exposure[inidx]
            if n_trials is not None:
                init_kwds['n_trials'] = n_trials[inidx]

            mod_i = self.model_class(endog[inidx], exog[inidx],
                                     offset=offset_,
                                     exposure=exposure_,
                                     freq_weights=freq_weights[inidx],
                                     var_weights=var_weights[inidx],
                                     **init_kwds)
            if is_binomial:
                mod_i.family.n = init_kwds['n_trials']
            res_i = mod_i.fit(start_params=self.results.params,
                              method='newton')
            params[outidx] = res_i.params.copy()
            scale[outidx] = res_i.scale
            det_cov_params[outidx] = get_det_cov_params(res_i)

        return dict(params=params, scale=scale, mse_resid=scale,
                    # alias for now
                    det_cov_params=det_cov_params)