diff --git a/maint_tools/test_docstrings.py b/maint_tools/test_docstrings.py index 9b23b1789aeb4..641b2dbb6ff22 100644 --- a/maint_tools/test_docstrings.py +++ b/maint_tools/test_docstrings.py @@ -88,7 +88,6 @@ "LocalOutlierFactor", "LocallyLinearEmbedding", "MDS", - "MLPClassifier", "MLPRegressor", "MaxAbsScaler", "MeanShift", diff --git a/sklearn/neural_network/_multilayer_perceptron.py b/sklearn/neural_network/_multilayer_perceptron.py index e07909a2e97ac..9c5baf3e91e99 100644 --- a/sklearn/neural_network/_multilayer_perceptron.py +++ b/sklearn/neural_network/_multilayer_perceptron.py @@ -742,7 +742,8 @@ def fit(self, X, y): Returns ------- - self : returns a trained MLP model. + self : object + Returns a trained MLP model. """ return self._fit(X, y, incremental=False) @@ -825,7 +826,7 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron): batch_size : int, default='auto' Size of minibatches for stochastic optimizers. If the solver is 'lbfgs', the classifier will not use minibatch. - When set to "auto", `batch_size=min(200, n_samples)` + When set to "auto", `batch_size=min(200, n_samples)`. learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant' Learning rate schedule for weight updates. @@ -904,27 +905,27 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron): If early stopping is False, then the training stops when the training loss does not improve by more than tol for n_iter_no_change consecutive passes over the training set. - Only effective when solver='sgd' or 'adam' + Only effective when solver='sgd' or 'adam'. validation_fraction : float, default=0.1 The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. - Only used if early_stopping is True + Only used if early_stopping is True. beta_1 : float, default=0.9 Exponential decay rate for estimates of first moment vector in adam, - should be in [0, 1). Only used when solver='adam' + should be in [0, 1). Only used when solver='adam'. beta_2 : float, default=0.999 Exponential decay rate for estimates of second moment vector in adam, - should be in [0, 1). Only used when solver='adam' + should be in [0, 1). Only used when solver='adam'. epsilon : float, default=1e-8 - Value for numerical stability in adam. Only used when solver='adam' + Value for numerical stability in adam. Only used when solver='adam'. n_iter_no_change : int, default=10 Maximum number of epochs to not meet ``tol`` improvement. - Only effective when solver='sgd' or 'adam' + Only effective when solver='sgd' or 'adam'. .. versionadded:: 0.20 @@ -979,21 +980,10 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron): out_activation_ : str Name of the output activation function. - Examples + See Also -------- - >>> from sklearn.neural_network import MLPClassifier - >>> from sklearn.datasets import make_classification - >>> from sklearn.model_selection import train_test_split - >>> X, y = make_classification(n_samples=100, random_state=1) - >>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, - ... random_state=1) - >>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train) - >>> clf.predict_proba(X_test[:1]) - array([[0.038..., 0.961...]]) - >>> clf.predict(X_test[:5, :]) - array([1, 0, 1, 0, 1]) - >>> clf.score(X_test, y_test) - 0.8... + MLPRegressor : Multi-layer Perceptron regressor. + BernoulliRBM : Bernoulli Restricted Boltzmann Machine (RBM). Notes ----- @@ -1023,6 +1013,22 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron): Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic optimization." arXiv preprint arXiv:1412.6980 (2014). + + Examples + -------- + >>> from sklearn.neural_network import MLPClassifier + >>> from sklearn.datasets import make_classification + >>> from sklearn.model_selection import train_test_split + >>> X, y = make_classification(n_samples=100, random_state=1) + >>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, + ... random_state=1) + >>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train) + >>> clf.predict_proba(X_test[:1]) + array([[0.038..., 0.961...]]) + >>> clf.predict(X_test[:5, :]) + array([1, 0, 1, 0, 1]) + >>> clf.score(X_test, y_test) + 0.8... """ def __init__( @@ -1134,7 +1140,7 @@ def _validate_input(self, X, y, incremental, reset): return X, y def predict(self, X): - """Predict using the multi-layer perceptron classifier + """Predict using the multi-layer perceptron classifier. Parameters ---------- @@ -1211,7 +1217,7 @@ def predict_log_proba(self, X): log_y_prob : ndarray of shape (n_samples, n_classes) The predicted log-probability of the sample for each class in the model, where classes are ordered as they are in - `self.classes_`. Equivalent to log(predict_proba(X)) + `self.classes_`. Equivalent to `log(predict_proba(X))`. """ y_prob = self.predict_proba(X) return np.log(y_prob, out=y_prob)