Skip to content

Commit adb7ced

Browse files
jmloyolaglemaitre
andauthored
DOC Ensures that MLPClassifier passes numpydoc validation (#20444)
Co-authored-by: Guillaume Lemaitre <g.lemaitre58@gmail.com>
1 parent cd0759b commit adb7ced

File tree

2 files changed

+30
-25
lines changed

2 files changed

+30
-25
lines changed

maint_tools/test_docstrings.py

-1
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,6 @@
8686
"LocalOutlierFactor",
8787
"LocallyLinearEmbedding",
8888
"MDS",
89-
"MLPClassifier",
9089
"MLPRegressor",
9190
"MaxAbsScaler",
9291
"MeanShift",

sklearn/neural_network/_multilayer_perceptron.py

+30-24
Original file line numberDiff line numberDiff line change
@@ -742,7 +742,8 @@ def fit(self, X, y):
742742
743743
Returns
744744
-------
745-
self : returns a trained MLP model.
745+
self : object
746+
Returns a trained MLP model.
746747
"""
747748
return self._fit(X, y, incremental=False)
748749

@@ -825,7 +826,7 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
825826
batch_size : int, default='auto'
826827
Size of minibatches for stochastic optimizers.
827828
If the solver is 'lbfgs', the classifier will not use minibatch.
828-
When set to "auto", `batch_size=min(200, n_samples)`
829+
When set to "auto", `batch_size=min(200, n_samples)`.
829830
830831
learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
831832
Learning rate schedule for weight updates.
@@ -904,27 +905,27 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
904905
If early stopping is False, then the training stops when the training
905906
loss does not improve by more than tol for n_iter_no_change consecutive
906907
passes over the training set.
907-
Only effective when solver='sgd' or 'adam'
908+
Only effective when solver='sgd' or 'adam'.
908909
909910
validation_fraction : float, default=0.1
910911
The proportion of training data to set aside as validation set for
911912
early stopping. Must be between 0 and 1.
912-
Only used if early_stopping is True
913+
Only used if early_stopping is True.
913914
914915
beta_1 : float, default=0.9
915916
Exponential decay rate for estimates of first moment vector in adam,
916-
should be in [0, 1). Only used when solver='adam'
917+
should be in [0, 1). Only used when solver='adam'.
917918
918919
beta_2 : float, default=0.999
919920
Exponential decay rate for estimates of second moment vector in adam,
920-
should be in [0, 1). Only used when solver='adam'
921+
should be in [0, 1). Only used when solver='adam'.
921922
922923
epsilon : float, default=1e-8
923-
Value for numerical stability in adam. Only used when solver='adam'
924+
Value for numerical stability in adam. Only used when solver='adam'.
924925
925926
n_iter_no_change : int, default=10
926927
Maximum number of epochs to not meet ``tol`` improvement.
927-
Only effective when solver='sgd' or 'adam'
928+
Only effective when solver='sgd' or 'adam'.
928929
929930
.. versionadded:: 0.20
930931
@@ -979,21 +980,10 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
979980
out_activation_ : str
980981
Name of the output activation function.
981982
982-
Examples
983+
See Also
983984
--------
984-
>>> from sklearn.neural_network import MLPClassifier
985-
>>> from sklearn.datasets import make_classification
986-
>>> from sklearn.model_selection import train_test_split
987-
>>> X, y = make_classification(n_samples=100, random_state=1)
988-
>>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
989-
... random_state=1)
990-
>>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train)
991-
>>> clf.predict_proba(X_test[:1])
992-
array([[0.038..., 0.961...]])
993-
>>> clf.predict(X_test[:5, :])
994-
array([1, 0, 1, 0, 1])
995-
>>> clf.score(X_test, y_test)
996-
0.8...
985+
MLPRegressor : Multi-layer Perceptron regressor.
986+
BernoulliRBM : Bernoulli Restricted Boltzmann Machine (RBM).
997987
998988
Notes
999989
-----
@@ -1023,6 +1013,22 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
10231013
10241014
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
10251015
optimization." arXiv preprint arXiv:1412.6980 (2014).
1016+
1017+
Examples
1018+
--------
1019+
>>> from sklearn.neural_network import MLPClassifier
1020+
>>> from sklearn.datasets import make_classification
1021+
>>> from sklearn.model_selection import train_test_split
1022+
>>> X, y = make_classification(n_samples=100, random_state=1)
1023+
>>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
1024+
... random_state=1)
1025+
>>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train)
1026+
>>> clf.predict_proba(X_test[:1])
1027+
array([[0.038..., 0.961...]])
1028+
>>> clf.predict(X_test[:5, :])
1029+
array([1, 0, 1, 0, 1])
1030+
>>> clf.score(X_test, y_test)
1031+
0.8...
10261032
"""
10271033

10281034
def __init__(
@@ -1134,7 +1140,7 @@ def _validate_input(self, X, y, incremental, reset):
11341140
return X, y
11351141

11361142
def predict(self, X):
1137-
"""Predict using the multi-layer perceptron classifier
1143+
"""Predict using the multi-layer perceptron classifier.
11381144
11391145
Parameters
11401146
----------
@@ -1211,7 +1217,7 @@ def predict_log_proba(self, X):
12111217
log_y_prob : ndarray of shape (n_samples, n_classes)
12121218
The predicted log-probability of the sample for each class
12131219
in the model, where classes are ordered as they are in
1214-
`self.classes_`. Equivalent to log(predict_proba(X))
1220+
`self.classes_`. Equivalent to `log(predict_proba(X))`.
12151221
"""
12161222
y_prob = self.predict_proba(X)
12171223
return np.log(y_prob, out=y_prob)

0 commit comments

Comments
 (0)