@@ -742,7 +742,8 @@ def fit(self, X, y):
742
742
743
743
Returns
744
744
-------
745
- self : returns a trained MLP model.
745
+ self : object
746
+ Returns a trained MLP model.
746
747
"""
747
748
return self ._fit (X , y , incremental = False )
748
749
@@ -825,7 +826,7 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
825
826
batch_size : int, default='auto'
826
827
Size of minibatches for stochastic optimizers.
827
828
If the solver is 'lbfgs', the classifier will not use minibatch.
828
- When set to "auto", `batch_size=min(200, n_samples)`
829
+ When set to "auto", `batch_size=min(200, n_samples)`.
829
830
830
831
learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
831
832
Learning rate schedule for weight updates.
@@ -904,27 +905,27 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
904
905
If early stopping is False, then the training stops when the training
905
906
loss does not improve by more than tol for n_iter_no_change consecutive
906
907
passes over the training set.
907
- Only effective when solver='sgd' or 'adam'
908
+ Only effective when solver='sgd' or 'adam'.
908
909
909
910
validation_fraction : float, default=0.1
910
911
The proportion of training data to set aside as validation set for
911
912
early stopping. Must be between 0 and 1.
912
- Only used if early_stopping is True
913
+ Only used if early_stopping is True.
913
914
914
915
beta_1 : float, default=0.9
915
916
Exponential decay rate for estimates of first moment vector in adam,
916
- should be in [0, 1). Only used when solver='adam'
917
+ should be in [0, 1). Only used when solver='adam'.
917
918
918
919
beta_2 : float, default=0.999
919
920
Exponential decay rate for estimates of second moment vector in adam,
920
- should be in [0, 1). Only used when solver='adam'
921
+ should be in [0, 1). Only used when solver='adam'.
921
922
922
923
epsilon : float, default=1e-8
923
- Value for numerical stability in adam. Only used when solver='adam'
924
+ Value for numerical stability in adam. Only used when solver='adam'.
924
925
925
926
n_iter_no_change : int, default=10
926
927
Maximum number of epochs to not meet ``tol`` improvement.
927
- Only effective when solver='sgd' or 'adam'
928
+ Only effective when solver='sgd' or 'adam'.
928
929
929
930
.. versionadded:: 0.20
930
931
@@ -979,21 +980,10 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
979
980
out_activation_ : str
980
981
Name of the output activation function.
981
982
982
- Examples
983
+ See Also
983
984
--------
984
- >>> from sklearn.neural_network import MLPClassifier
985
- >>> from sklearn.datasets import make_classification
986
- >>> from sklearn.model_selection import train_test_split
987
- >>> X, y = make_classification(n_samples=100, random_state=1)
988
- >>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
989
- ... random_state=1)
990
- >>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train)
991
- >>> clf.predict_proba(X_test[:1])
992
- array([[0.038..., 0.961...]])
993
- >>> clf.predict(X_test[:5, :])
994
- array([1, 0, 1, 0, 1])
995
- >>> clf.score(X_test, y_test)
996
- 0.8...
985
+ MLPRegressor : Multi-layer Perceptron regressor.
986
+ BernoulliRBM : Bernoulli Restricted Boltzmann Machine (RBM).
997
987
998
988
Notes
999
989
-----
@@ -1023,6 +1013,22 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
1023
1013
1024
1014
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
1025
1015
optimization." arXiv preprint arXiv:1412.6980 (2014).
1016
+
1017
+ Examples
1018
+ --------
1019
+ >>> from sklearn.neural_network import MLPClassifier
1020
+ >>> from sklearn.datasets import make_classification
1021
+ >>> from sklearn.model_selection import train_test_split
1022
+ >>> X, y = make_classification(n_samples=100, random_state=1)
1023
+ >>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
1024
+ ... random_state=1)
1025
+ >>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train)
1026
+ >>> clf.predict_proba(X_test[:1])
1027
+ array([[0.038..., 0.961...]])
1028
+ >>> clf.predict(X_test[:5, :])
1029
+ array([1, 0, 1, 0, 1])
1030
+ >>> clf.score(X_test, y_test)
1031
+ 0.8...
1026
1032
"""
1027
1033
1028
1034
def __init__ (
@@ -1134,7 +1140,7 @@ def _validate_input(self, X, y, incremental, reset):
1134
1140
return X , y
1135
1141
1136
1142
def predict (self , X ):
1137
- """Predict using the multi-layer perceptron classifier
1143
+ """Predict using the multi-layer perceptron classifier.
1138
1144
1139
1145
Parameters
1140
1146
----------
@@ -1211,7 +1217,7 @@ def predict_log_proba(self, X):
1211
1217
log_y_prob : ndarray of shape (n_samples, n_classes)
1212
1218
The predicted log-probability of the sample for each class
1213
1219
in the model, where classes are ordered as they are in
1214
- `self.classes_`. Equivalent to log(predict_proba(X))
1220
+ `self.classes_`. Equivalent to ` log(predict_proba(X))`.
1215
1221
"""
1216
1222
y_prob = self .predict_proba (X )
1217
1223
return np .log (y_prob , out = y_prob )
0 commit comments