From c8925be8fc33cb844af3cc11f6636e35aeb2cee1 Mon Sep 17 00:00:00 2001 From: genvalen Date: Fri, 16 Jul 2021 21:05:43 -0400 Subject: [PATCH 1/2] Remove GraphicalLassoCV from DOCSTRING_IGNORE_LIST. --- maint_tools/test_docstrings.py | 1 - 1 file changed, 1 deletion(-) diff --git a/maint_tools/test_docstrings.py b/maint_tools/test_docstrings.py index 587190401c61e..32db4102dfbe3 100644 --- a/maint_tools/test_docstrings.py +++ b/maint_tools/test_docstrings.py @@ -41,7 +41,6 @@ "GaussianRandomProjection", "GradientBoostingClassifier", "GradientBoostingRegressor", - "GraphicalLassoCV", "GridSearchCV", "HalvingGridSearchCV", "HalvingRandomSearchCV", From 40c2fef2ea25d7aac6abdbe54a053455b5801dc6 Mon Sep 17 00:00:00 2001 From: genvalen Date: Fri, 16 Jul 2021 22:03:18 -0400 Subject: [PATCH 2/2] Ensure GraphicalLassoCV passes numpydoc validation --- sklearn/covariance/_graph_lasso.py | 41 ++++++++++++++++-------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/sklearn/covariance/_graph_lasso.py b/sklearn/covariance/_graph_lasso.py index 642a1dc2a9214..4bab1f8c32719 100644 --- a/sklearn/covariance/_graph_lasso.py +++ b/sklearn/covariance/_graph_lasso.py @@ -637,7 +637,7 @@ class GraphicalLassoCV(GraphicalLasso): stable. n_jobs : int, default=None - number of jobs to run in parallel. + Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary ` for more details. @@ -710,6 +710,24 @@ class GraphicalLassoCV(GraphicalLasso): .. versionadded:: 0.24 + See Also + -------- + graphical_lasso : L1-penalized covariance estimator. + GraphicalLasso : Sparse inverse covariance with + cross-validated choice of the l1 penalty. + + Notes + ----- + The search for the optimal penalization parameter (alpha) is done on an + iteratively refined grid: first the cross-validated scores on a grid are + computed, then a new refined grid is centered around the maximum, and so + on. + + One of the challenges which is faced here is that the solvers can + fail to converge to a well-conditioned estimate. The corresponding + values of alpha then come out as missing values, but the optimum may + be close to these missing values. + Examples -------- >>> import numpy as np @@ -730,22 +748,6 @@ class GraphicalLassoCV(GraphicalLasso): [0.017, 0.036, 0.094, 0.69 ]]) >>> np.around(cov.location_, decimals=3) array([0.073, 0.04 , 0.038, 0.143]) - - See Also - -------- - graphical_lasso, GraphicalLasso - - Notes - ----- - The search for the optimal penalization parameter (alpha) is done on an - iteratively refined grid: first the cross-validated scores on a grid are - computed, then a new refined grid is centered around the maximum, and so - on. - - One of the challenges which is faced here is that the solvers can - fail to converge to a well-conditioned estimate. The corresponding - values of alpha then come out as missing values, but the optimum may - be close to these missing values. """ def __init__( @@ -776,12 +778,12 @@ def __init__( self.n_jobs = n_jobs def fit(self, X, y=None): - """Fits the GraphicalLasso covariance model to X. + """Fit the GraphicalLasso covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) - Data from which to compute the covariance estimate + Data from which to compute the covariance estimate. y : Ignored Not used, present for API consistency by convention. @@ -789,6 +791,7 @@ def fit(self, X, y=None): Returns ------- self : object + Returns the instance itself. """ # Covariance does not make sense for a single feature X = self._validate_data(X, ensure_min_features=2, estimator=self)