1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
|
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
import warnings
from . import MinCovDet
from ..utils.validation import check_is_fitted, check_array
from ..metrics import accuracy_score
from ..base import OutlierMixin
class EllipticEnvelope(MinCovDet, OutlierMixin):
"""An object for detecting outliers in a Gaussian distributed dataset.
Read more in the :ref:`User Guide <outlier_detection>`.
Parameters
----------
store_precision : boolean, optional (default=True)
Specify if the estimated precision is stored.
assume_centered : boolean, optional (default=False)
If True, the support of robust location and covariance estimates
is computed, and a covariance estimate is recomputed from it,
without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float in (0., 1.), optional (default=None)
The proportion of points to be included in the support of the raw
MCD estimate. If None, the minimum value of support_fraction will
be used within the algorithm: `[n_sample + n_features + 1] / 2`.
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
Attributes
----------
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute the
robust estimates of location and shape.
offset_ : float
Offset used to define the decision function from the raw scores.
We have the relation: ``decision_function = score_samples - offset_``.
The offset depends on the contamination parameter and is defined in
such a way we obtain the expected number of outliers (samples with
decision function < 0) in training.
See Also
--------
EmpiricalCovariance, MinCovDet
Notes
-----
Outlier detection from covariance estimation may break or not
perform well in high-dimensional settings. In particular, one will
always take care to work with ``n_samples > n_features ** 2``.
References
----------
.. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the
minimum covariance determinant estimator" Technometrics 41(3), 212
(1999)
"""
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, contamination=0.1,
random_state=None):
super(EllipticEnvelope, self).__init__(
store_precision=store_precision,
assume_centered=assume_centered,
support_fraction=support_fraction,
random_state=random_state)
self.contamination = contamination
def fit(self, X, y=None):
"""Fit the EllipticEnvelope model.
Parameters
----------
X : numpy array or sparse matrix, shape (n_samples, n_features).
Training data
y : Ignored
not used, present for API consistency by convention.
"""
super(EllipticEnvelope, self).fit(X)
self.offset_ = np.percentile(-self.dist_, 100. * self.contamination)
return self
def decision_function(self, X, raw_values=None):
"""Compute the decision function of the given observations.
Parameters
----------
X : array-like, shape (n_samples, n_features)
raw_values : bool, optional
Whether or not to consider raw Mahalanobis distances as the
decision function. Must be False (default) for compatibility
with the others outlier detection tools.
.. deprecated:: 0.20
``raw_values`` has been deprecated in 0.20 and will be removed
in 0.22.
Returns
-------
decision : array-like, shape (n_samples, )
Decision function of the samples.
It is equal to the shifted Mahalanobis distances.
The threshold for being an outlier is 0, which ensures a
compatibility with other outlier detection algorithms.
"""
check_is_fitted(self, 'offset_')
negative_mahal_dist = self.score_samples(X)
# raw_values deprecation:
if raw_values is not None:
warnings.warn("raw_values parameter is deprecated in 0.20 and will"
" be removed in 0.22.", DeprecationWarning)
if not raw_values:
return (-self.offset_) ** 0.33 - (-negative_mahal_dist) ** 0.33
return negative_mahal_dist - self.offset_
def score_samples(self, X):
"""Compute the negative Mahalanobis distances.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
negative_mahal_distances : array-like, shape (n_samples, )
Opposite of the Mahalanobis distances.
"""
check_is_fitted(self, 'offset_')
return -self.mahalanobis(X)
def predict(self, X):
"""
Predict the labels (1 inlier, -1 outlier) of X according to the
fitted model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
X = check_array(X)
is_inlier = np.full(X.shape[0], -1, dtype=int)
values = self.decision_function(X)
is_inlier[values >= 0] = 1
return is_inlier
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Test samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape (n_samples,), optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
@property
def threshold_(self):
warnings.warn("threshold_ attribute is deprecated in 0.20 and will"
" be removed in 0.22.", DeprecationWarning)
return self.offset_
|