From f72e1720ac334575b2bbe82d26766aec89defc74 Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Sat, 22 Jun 2019 12:01:59 +0300 Subject: [PATCH 01/17] fix convergence warnings --- examples/neural_networks/plot_mlp_training_curves.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py index 323b2348c7342..b1737be855625 100644 --- a/examples/neural_networks/plot_mlp_training_curves.py +++ b/examples/neural_networks/plot_mlp_training_curves.py @@ -14,10 +14,18 @@ """ print(__doc__) + +import timeit +import warnings + import matplotlib.pyplot as plt + from sklearn.neural_network import MLPClassifier from sklearn.preprocessing import MinMaxScaler from sklearn import datasets +from sklearn.exceptions import ConvergenceWarning + +warnings.filterwarnings("ignore", category=ConvergenceWarning, module="sklearn") # different learning rate schedules and momentum parameters params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0, From 2e6e36b9d5771f1e6ab77e4beffb393d005b297a Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Sat, 22 Jun 2019 12:14:10 +0300 Subject: [PATCH 02/17] fix convergence warnings --- examples/neural_networks/plot_mlp_training_curves.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py index b1737be855625..3e0e4bb2a34f0 100644 --- a/examples/neural_networks/plot_mlp_training_curves.py +++ b/examples/neural_networks/plot_mlp_training_curves.py @@ -25,7 +25,8 @@ from sklearn import datasets from sklearn.exceptions import ConvergenceWarning -warnings.filterwarnings("ignore", category=ConvergenceWarning, module="sklearn") +warnings.filterwarnings("ignore", category=ConvergenceWarning, + module="sklearn") # different learning rate schedules and momentum parameters params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0, From 795395a57089946e688110f6d6f4df719ce3bf6b Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Sat, 22 Jun 2019 12:27:24 +0300 Subject: [PATCH 03/17] PEP8 --- examples/neural_networks/plot_mlp_training_curves.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py index 3e0e4bb2a34f0..a2e951ee18b43 100644 --- a/examples/neural_networks/plot_mlp_training_curves.py +++ b/examples/neural_networks/plot_mlp_training_curves.py @@ -15,7 +15,6 @@ print(__doc__) -import timeit import warnings import matplotlib.pyplot as plt From 91b1fbfa54139e2dd4d5248239a41f64da086bf9 Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Sat, 22 Jun 2019 12:29:19 +0300 Subject: [PATCH 04/17] PEP8 --- examples/neural_networks/plot_mlp_training_curves.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py index a2e951ee18b43..cf6a2f3267706 100644 --- a/examples/neural_networks/plot_mlp_training_curves.py +++ b/examples/neural_networks/plot_mlp_training_curves.py @@ -25,7 +25,7 @@ from sklearn.exceptions import ConvergenceWarning warnings.filterwarnings("ignore", category=ConvergenceWarning, - module="sklearn") + module="sklearn") # different learning rate schedules and momentum parameters params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0, From bd4a6f210b9032e514a6a8f14d970f54d72b94fa Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Sat, 22 Jun 2019 13:33:11 +0300 Subject: [PATCH 05/17] Fix Convergence Warning by changing the Optimization Algorithm --- examples/neural_networks/plot_mlp_alpha.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/neural_networks/plot_mlp_alpha.py b/examples/neural_networks/plot_mlp_alpha.py index 7077f9b2bba74..e345140b3361a 100644 --- a/examples/neural_networks/plot_mlp_alpha.py +++ b/examples/neural_networks/plot_mlp_alpha.py @@ -36,7 +36,7 @@ classifiers = [] for i in alphas: - classifiers.append(MLPClassifier(alpha=i, random_state=1)) + classifiers.append(MLPClassifier(solver='lbfgs', alpha=i, random_state=1, hidden_layer_sizes=[100, 100])) X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=0, n_clusters_per_class=1) From 8ecd8e75fdcfb4eb054af49f91e9020e1d39cc65 Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Sat, 22 Jun 2019 13:34:36 +0300 Subject: [PATCH 06/17] PEP8 --- examples/neural_networks/plot_mlp_alpha.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/neural_networks/plot_mlp_alpha.py b/examples/neural_networks/plot_mlp_alpha.py index e345140b3361a..7f718539131d4 100644 --- a/examples/neural_networks/plot_mlp_alpha.py +++ b/examples/neural_networks/plot_mlp_alpha.py @@ -36,7 +36,8 @@ classifiers = [] for i in alphas: - classifiers.append(MLPClassifier(solver='lbfgs', alpha=i, random_state=1, hidden_layer_sizes=[100, 100])) + classifiers.append(MLPClassifier(solver='lbfgs', alpha=i, random_state=1, + hidden_layer_sizes=[100, 100])) X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=0, n_clusters_per_class=1) From 2c7d05e4d7b63ecc3206bcfb8d23544d23457401 Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Sat, 22 Jun 2019 15:13:59 +0300 Subject: [PATCH 07/17] Fixed Future Warnings by explicitly defining n_estimators. --- .../ensemble/plot_random_forest_regression_multioutput.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/ensemble/plot_random_forest_regression_multioutput.py b/examples/ensemble/plot_random_forest_regression_multioutput.py index 6ae49ad694e9d..8318ec99b11c4 100644 --- a/examples/ensemble/plot_random_forest_regression_multioutput.py +++ b/examples/ensemble/plot_random_forest_regression_multioutput.py @@ -43,11 +43,12 @@ X, y, train_size=400, test_size=200, random_state=4) max_depth = 30 -regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth, +regr_multirf = MultiOutputRegressor(RandomForestRegressor(n_estimators=100, + max_depth=max_depth, random_state=0)) regr_multirf.fit(X_train, y_train) -regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2) +regr_rf = RandomForestRegressor(n_estimators=100, max_depth=max_depth, random_state=2) regr_rf.fit(X_train, y_train) # Predict on new data From a0eeedcd814053a7a33781168de72b023dcd319f Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Sat, 22 Jun 2019 15:16:21 +0300 Subject: [PATCH 08/17] PEP8 --- examples/ensemble/plot_random_forest_regression_multioutput.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/ensemble/plot_random_forest_regression_multioutput.py b/examples/ensemble/plot_random_forest_regression_multioutput.py index 8318ec99b11c4..8b7803361a60a 100644 --- a/examples/ensemble/plot_random_forest_regression_multioutput.py +++ b/examples/ensemble/plot_random_forest_regression_multioutput.py @@ -48,7 +48,8 @@ random_state=0)) regr_multirf.fit(X_train, y_train) -regr_rf = RandomForestRegressor(n_estimators=100, max_depth=max_depth, random_state=2) +regr_rf = RandomForestRegressor(n_estimators=100, max_depth=max_depth, + random_state=2) regr_rf.fit(X_train, y_train) # Predict on new data From 4fff6418bd340a6fe223e3e7087aee041436c1c3 Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Sat, 22 Jun 2019 16:15:23 +0300 Subject: [PATCH 09/17] deleted all --- ...ot_random_forest_regression_multioutput.py | 77 ------------ examples/neural_networks/plot_mlp_alpha.py | 113 ------------------ .../plot_mlp_training_curves.py | 97 --------------- 3 files changed, 287 deletions(-) delete mode 100644 examples/ensemble/plot_random_forest_regression_multioutput.py delete mode 100644 examples/neural_networks/plot_mlp_alpha.py delete mode 100644 examples/neural_networks/plot_mlp_training_curves.py diff --git a/examples/ensemble/plot_random_forest_regression_multioutput.py b/examples/ensemble/plot_random_forest_regression_multioutput.py deleted file mode 100644 index 8b7803361a60a..0000000000000 --- a/examples/ensemble/plot_random_forest_regression_multioutput.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -============================================================ -Comparing random forests and the multi-output meta estimator -============================================================ - -An example to compare multi-output regression with random forest and -the :ref:`multioutput.MultiOutputRegressor ` meta-estimator. - -This example illustrates the use of the -:ref:`multioutput.MultiOutputRegressor ` meta-estimator -to perform multi-output regression. A random forest regressor is used, -which supports multi-output regression natively, so the results can be -compared. - -The random forest regressor will only ever predict values within the -range of observations or closer to zero for each of the targets. As a -result the predictions are biased towards the centre of the circle. - -Using a single underlying feature the model learns both the -x and y coordinate as output. - -""" -print(__doc__) - -# Author: Tim Head -# -# License: BSD 3 clause - -import numpy as np -import matplotlib.pyplot as plt -from sklearn.ensemble import RandomForestRegressor -from sklearn.model_selection import train_test_split -from sklearn.multioutput import MultiOutputRegressor - - -# Create a random dataset -rng = np.random.RandomState(1) -X = np.sort(200 * rng.rand(600, 1) - 100, axis=0) -y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T -y += (0.5 - rng.rand(*y.shape)) - -X_train, X_test, y_train, y_test = train_test_split( - X, y, train_size=400, test_size=200, random_state=4) - -max_depth = 30 -regr_multirf = MultiOutputRegressor(RandomForestRegressor(n_estimators=100, - max_depth=max_depth, - random_state=0)) -regr_multirf.fit(X_train, y_train) - -regr_rf = RandomForestRegressor(n_estimators=100, max_depth=max_depth, - random_state=2) -regr_rf.fit(X_train, y_train) - -# Predict on new data -y_multirf = regr_multirf.predict(X_test) -y_rf = regr_rf.predict(X_test) - -# Plot the results -plt.figure() -s = 50 -a = 0.4 -plt.scatter(y_test[:, 0], y_test[:, 1], edgecolor='k', - c="navy", s=s, marker="s", alpha=a, label="Data") -plt.scatter(y_multirf[:, 0], y_multirf[:, 1], edgecolor='k', - c="cornflowerblue", s=s, alpha=a, - label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test)) -plt.scatter(y_rf[:, 0], y_rf[:, 1], edgecolor='k', - c="c", s=s, marker="^", alpha=a, - label="RF score=%.2f" % regr_rf.score(X_test, y_test)) -plt.xlim([-6, 6]) -plt.ylim([-6, 6]) -plt.xlabel("target 1") -plt.ylabel("target 2") -plt.title("Comparing random forests and the multi-output meta estimator") -plt.legend() -plt.show() diff --git a/examples/neural_networks/plot_mlp_alpha.py b/examples/neural_networks/plot_mlp_alpha.py deleted file mode 100644 index 7f718539131d4..0000000000000 --- a/examples/neural_networks/plot_mlp_alpha.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -================================================ -Varying regularization in Multi-layer Perceptron -================================================ - -A comparison of different values for regularization parameter 'alpha' on -synthetic datasets. The plot shows that different alphas yield different -decision functions. - -Alpha is a parameter for regularization term, aka penalty term, that combats -overfitting by constraining the size of the weights. Increasing alpha may fix -high variance (a sign of overfitting) by encouraging smaller weights, resulting -in a decision boundary plot that appears with lesser curvatures. -Similarly, decreasing alpha may fix high bias (a sign of underfitting) by -encouraging larger weights, potentially resulting in a more complicated -decision boundary. -""" -print(__doc__) - - -# Author: Issam H. Laradji -# License: BSD 3 clause - -import numpy as np -from matplotlib import pyplot as plt -from matplotlib.colors import ListedColormap -from sklearn.model_selection import train_test_split -from sklearn.preprocessing import StandardScaler -from sklearn.datasets import make_moons, make_circles, make_classification -from sklearn.neural_network import MLPClassifier - -h = .02 # step size in the mesh - -alphas = np.logspace(-5, 3, 5) -names = ['alpha ' + str(i) for i in alphas] - -classifiers = [] -for i in alphas: - classifiers.append(MLPClassifier(solver='lbfgs', alpha=i, random_state=1, - hidden_layer_sizes=[100, 100])) - -X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, - random_state=0, n_clusters_per_class=1) -rng = np.random.RandomState(2) -X += 2 * rng.uniform(size=X.shape) -linearly_separable = (X, y) - -datasets = [make_moons(noise=0.3, random_state=0), - make_circles(noise=0.2, factor=0.5, random_state=1), - linearly_separable] - -figure = plt.figure(figsize=(17, 9)) -i = 1 -# iterate over datasets -for X, y in datasets: - # preprocess dataset, split into training and test part - X = StandardScaler().fit_transform(X) - X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4) - - x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 - y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 - xx, yy = np.meshgrid(np.arange(x_min, x_max, h), - np.arange(y_min, y_max, h)) - - # just plot the dataset first - cm = plt.cm.RdBu - cm_bright = ListedColormap(['#FF0000', '#0000FF']) - ax = plt.subplot(len(datasets), len(classifiers) + 1, i) - # Plot the training points - ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) - # and testing points - ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) - ax.set_xlim(xx.min(), xx.max()) - ax.set_ylim(yy.min(), yy.max()) - ax.set_xticks(()) - ax.set_yticks(()) - i += 1 - - # iterate over classifiers - for name, clf in zip(names, classifiers): - ax = plt.subplot(len(datasets), len(classifiers) + 1, i) - clf.fit(X_train, y_train) - score = clf.score(X_test, y_test) - - # Plot the decision boundary. For that, we will assign a color to each - # point in the mesh [x_min, x_max]x[y_min, y_max]. - if hasattr(clf, "decision_function"): - Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) - else: - Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] - - # Put the result into a color plot - Z = Z.reshape(xx.shape) - ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) - - # Plot also the training points - ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, - edgecolors='black', s=25) - # and testing points - ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, - alpha=0.6, edgecolors='black', s=25) - - ax.set_xlim(xx.min(), xx.max()) - ax.set_ylim(yy.min(), yy.max()) - ax.set_xticks(()) - ax.set_yticks(()) - ax.set_title(name) - ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'), - size=15, horizontalalignment='right') - i += 1 - -figure.subplots_adjust(left=.02, right=.98) -plt.show() diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py deleted file mode 100644 index cf6a2f3267706..0000000000000 --- a/examples/neural_networks/plot_mlp_training_curves.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -======================================================== -Compare Stochastic learning strategies for MLPClassifier -======================================================== - -This example visualizes some training loss curves for different stochastic -learning strategies, including SGD and Adam. Because of time-constraints, we -use several small datasets, for which L-BFGS might be more suitable. The -general trend shown in these examples seems to carry over to larger datasets, -however. - -Note that those results can be highly dependent on the value of -``learning_rate_init``. -""" - -print(__doc__) - -import warnings - -import matplotlib.pyplot as plt - -from sklearn.neural_network import MLPClassifier -from sklearn.preprocessing import MinMaxScaler -from sklearn import datasets -from sklearn.exceptions import ConvergenceWarning - -warnings.filterwarnings("ignore", category=ConvergenceWarning, - module="sklearn") - -# different learning rate schedules and momentum parameters -params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0, - 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, - 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, - 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0, - 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, - 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, - 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, - {'solver': 'adam', 'learning_rate_init': 0.01}] - -labels = ["constant learning-rate", "constant with momentum", - "constant with Nesterov's momentum", - "inv-scaling learning-rate", "inv-scaling with momentum", - "inv-scaling with Nesterov's momentum", "adam"] - -plot_args = [{'c': 'red', 'linestyle': '-'}, - {'c': 'green', 'linestyle': '-'}, - {'c': 'blue', 'linestyle': '-'}, - {'c': 'red', 'linestyle': '--'}, - {'c': 'green', 'linestyle': '--'}, - {'c': 'blue', 'linestyle': '--'}, - {'c': 'black', 'linestyle': '-'}] - - -def plot_on_dataset(X, y, ax, name): - # for each dataset, plot learning for each learning strategy - print("\nlearning on dataset %s" % name) - ax.set_title(name) - X = MinMaxScaler().fit_transform(X) - mlps = [] - if name == "digits": - # digits is larger but converges fairly quickly - max_iter = 15 - else: - max_iter = 400 - - for label, param in zip(labels, params): - print("training: %s" % label) - mlp = MLPClassifier(verbose=0, random_state=0, - max_iter=max_iter, **param) - mlp.fit(X, y) - mlps.append(mlp) - print("Training set score: %f" % mlp.score(X, y)) - print("Training set loss: %f" % mlp.loss_) - for mlp, label, args in zip(mlps, labels, plot_args): - ax.plot(mlp.loss_curve_, label=label, **args) - - -fig, axes = plt.subplots(2, 2, figsize=(15, 10)) -# load / generate some toy datasets -iris = datasets.load_iris() -digits = datasets.load_digits() -data_sets = [(iris.data, iris.target), - (digits.data, digits.target), - datasets.make_circles(noise=0.2, factor=0.5, random_state=1), - datasets.make_moons(noise=0.3, random_state=0)] - -for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits', - 'circles', 'moons']): - plot_on_dataset(*data, ax=ax, name=name) - -fig.legend(ax.get_lines(), labels, ncol=3, loc="upper center") -plt.show() From b459c8991a8fea1afae99ebeeed9838d546537ed Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Sat, 22 Jun 2019 16:17:46 +0300 Subject: [PATCH 10/17] Fixed Convergence Warnings --- .../plot_mlp_training_curves.py | 97 +++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 examples/neural_networks/plot_mlp_training_curves.py diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py new file mode 100644 index 0000000000000..cf6a2f3267706 --- /dev/null +++ b/examples/neural_networks/plot_mlp_training_curves.py @@ -0,0 +1,97 @@ +""" +======================================================== +Compare Stochastic learning strategies for MLPClassifier +======================================================== + +This example visualizes some training loss curves for different stochastic +learning strategies, including SGD and Adam. Because of time-constraints, we +use several small datasets, for which L-BFGS might be more suitable. The +general trend shown in these examples seems to carry over to larger datasets, +however. + +Note that those results can be highly dependent on the value of +``learning_rate_init``. +""" + +print(__doc__) + +import warnings + +import matplotlib.pyplot as plt + +from sklearn.neural_network import MLPClassifier +from sklearn.preprocessing import MinMaxScaler +from sklearn import datasets +from sklearn.exceptions import ConvergenceWarning + +warnings.filterwarnings("ignore", category=ConvergenceWarning, + module="sklearn") + +# different learning rate schedules and momentum parameters +params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0, + 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, + 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, + 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0, + 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, + 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, + 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, + {'solver': 'adam', 'learning_rate_init': 0.01}] + +labels = ["constant learning-rate", "constant with momentum", + "constant with Nesterov's momentum", + "inv-scaling learning-rate", "inv-scaling with momentum", + "inv-scaling with Nesterov's momentum", "adam"] + +plot_args = [{'c': 'red', 'linestyle': '-'}, + {'c': 'green', 'linestyle': '-'}, + {'c': 'blue', 'linestyle': '-'}, + {'c': 'red', 'linestyle': '--'}, + {'c': 'green', 'linestyle': '--'}, + {'c': 'blue', 'linestyle': '--'}, + {'c': 'black', 'linestyle': '-'}] + + +def plot_on_dataset(X, y, ax, name): + # for each dataset, plot learning for each learning strategy + print("\nlearning on dataset %s" % name) + ax.set_title(name) + X = MinMaxScaler().fit_transform(X) + mlps = [] + if name == "digits": + # digits is larger but converges fairly quickly + max_iter = 15 + else: + max_iter = 400 + + for label, param in zip(labels, params): + print("training: %s" % label) + mlp = MLPClassifier(verbose=0, random_state=0, + max_iter=max_iter, **param) + mlp.fit(X, y) + mlps.append(mlp) + print("Training set score: %f" % mlp.score(X, y)) + print("Training set loss: %f" % mlp.loss_) + for mlp, label, args in zip(mlps, labels, plot_args): + ax.plot(mlp.loss_curve_, label=label, **args) + + +fig, axes = plt.subplots(2, 2, figsize=(15, 10)) +# load / generate some toy datasets +iris = datasets.load_iris() +digits = datasets.load_digits() +data_sets = [(iris.data, iris.target), + (digits.data, digits.target), + datasets.make_circles(noise=0.2, factor=0.5, random_state=1), + datasets.make_moons(noise=0.3, random_state=0)] + +for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits', + 'circles', 'moons']): + plot_on_dataset(*data, ax=ax, name=name) + +fig.legend(ax.get_lines(), labels, ncol=3, loc="upper center") +plt.show() From 66205c2f6891ef77d7acf8da522593532507e896 Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Sat, 22 Jun 2019 23:59:42 +0300 Subject: [PATCH 11/17] removed changes on unrelated examples --- ...ot_random_forest_regression_multioutput.py | 75 ++++++++++++ examples/neural_networks/plot_mlp_alpha.py | 112 ++++++++++++++++++ 2 files changed, 187 insertions(+) create mode 100644 examples/ensemble/plot_random_forest_regression_multioutput.py create mode 100644 examples/neural_networks/plot_mlp_alpha.py diff --git a/examples/ensemble/plot_random_forest_regression_multioutput.py b/examples/ensemble/plot_random_forest_regression_multioutput.py new file mode 100644 index 0000000000000..6ae49ad694e9d --- /dev/null +++ b/examples/ensemble/plot_random_forest_regression_multioutput.py @@ -0,0 +1,75 @@ +""" +============================================================ +Comparing random forests and the multi-output meta estimator +============================================================ + +An example to compare multi-output regression with random forest and +the :ref:`multioutput.MultiOutputRegressor ` meta-estimator. + +This example illustrates the use of the +:ref:`multioutput.MultiOutputRegressor ` meta-estimator +to perform multi-output regression. A random forest regressor is used, +which supports multi-output regression natively, so the results can be +compared. + +The random forest regressor will only ever predict values within the +range of observations or closer to zero for each of the targets. As a +result the predictions are biased towards the centre of the circle. + +Using a single underlying feature the model learns both the +x and y coordinate as output. + +""" +print(__doc__) + +# Author: Tim Head +# +# License: BSD 3 clause + +import numpy as np +import matplotlib.pyplot as plt +from sklearn.ensemble import RandomForestRegressor +from sklearn.model_selection import train_test_split +from sklearn.multioutput import MultiOutputRegressor + + +# Create a random dataset +rng = np.random.RandomState(1) +X = np.sort(200 * rng.rand(600, 1) - 100, axis=0) +y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T +y += (0.5 - rng.rand(*y.shape)) + +X_train, X_test, y_train, y_test = train_test_split( + X, y, train_size=400, test_size=200, random_state=4) + +max_depth = 30 +regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth, + random_state=0)) +regr_multirf.fit(X_train, y_train) + +regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2) +regr_rf.fit(X_train, y_train) + +# Predict on new data +y_multirf = regr_multirf.predict(X_test) +y_rf = regr_rf.predict(X_test) + +# Plot the results +plt.figure() +s = 50 +a = 0.4 +plt.scatter(y_test[:, 0], y_test[:, 1], edgecolor='k', + c="navy", s=s, marker="s", alpha=a, label="Data") +plt.scatter(y_multirf[:, 0], y_multirf[:, 1], edgecolor='k', + c="cornflowerblue", s=s, alpha=a, + label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test)) +plt.scatter(y_rf[:, 0], y_rf[:, 1], edgecolor='k', + c="c", s=s, marker="^", alpha=a, + label="RF score=%.2f" % regr_rf.score(X_test, y_test)) +plt.xlim([-6, 6]) +plt.ylim([-6, 6]) +plt.xlabel("target 1") +plt.ylabel("target 2") +plt.title("Comparing random forests and the multi-output meta estimator") +plt.legend() +plt.show() diff --git a/examples/neural_networks/plot_mlp_alpha.py b/examples/neural_networks/plot_mlp_alpha.py new file mode 100644 index 0000000000000..7077f9b2bba74 --- /dev/null +++ b/examples/neural_networks/plot_mlp_alpha.py @@ -0,0 +1,112 @@ +""" +================================================ +Varying regularization in Multi-layer Perceptron +================================================ + +A comparison of different values for regularization parameter 'alpha' on +synthetic datasets. The plot shows that different alphas yield different +decision functions. + +Alpha is a parameter for regularization term, aka penalty term, that combats +overfitting by constraining the size of the weights. Increasing alpha may fix +high variance (a sign of overfitting) by encouraging smaller weights, resulting +in a decision boundary plot that appears with lesser curvatures. +Similarly, decreasing alpha may fix high bias (a sign of underfitting) by +encouraging larger weights, potentially resulting in a more complicated +decision boundary. +""" +print(__doc__) + + +# Author: Issam H. Laradji +# License: BSD 3 clause + +import numpy as np +from matplotlib import pyplot as plt +from matplotlib.colors import ListedColormap +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import StandardScaler +from sklearn.datasets import make_moons, make_circles, make_classification +from sklearn.neural_network import MLPClassifier + +h = .02 # step size in the mesh + +alphas = np.logspace(-5, 3, 5) +names = ['alpha ' + str(i) for i in alphas] + +classifiers = [] +for i in alphas: + classifiers.append(MLPClassifier(alpha=i, random_state=1)) + +X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, + random_state=0, n_clusters_per_class=1) +rng = np.random.RandomState(2) +X += 2 * rng.uniform(size=X.shape) +linearly_separable = (X, y) + +datasets = [make_moons(noise=0.3, random_state=0), + make_circles(noise=0.2, factor=0.5, random_state=1), + linearly_separable] + +figure = plt.figure(figsize=(17, 9)) +i = 1 +# iterate over datasets +for X, y in datasets: + # preprocess dataset, split into training and test part + X = StandardScaler().fit_transform(X) + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4) + + x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 + y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 + xx, yy = np.meshgrid(np.arange(x_min, x_max, h), + np.arange(y_min, y_max, h)) + + # just plot the dataset first + cm = plt.cm.RdBu + cm_bright = ListedColormap(['#FF0000', '#0000FF']) + ax = plt.subplot(len(datasets), len(classifiers) + 1, i) + # Plot the training points + ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) + # and testing points + ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) + ax.set_xlim(xx.min(), xx.max()) + ax.set_ylim(yy.min(), yy.max()) + ax.set_xticks(()) + ax.set_yticks(()) + i += 1 + + # iterate over classifiers + for name, clf in zip(names, classifiers): + ax = plt.subplot(len(datasets), len(classifiers) + 1, i) + clf.fit(X_train, y_train) + score = clf.score(X_test, y_test) + + # Plot the decision boundary. For that, we will assign a color to each + # point in the mesh [x_min, x_max]x[y_min, y_max]. + if hasattr(clf, "decision_function"): + Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) + else: + Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] + + # Put the result into a color plot + Z = Z.reshape(xx.shape) + ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) + + # Plot also the training points + ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, + edgecolors='black', s=25) + # and testing points + ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, + alpha=0.6, edgecolors='black', s=25) + + ax.set_xlim(xx.min(), xx.max()) + ax.set_ylim(yy.min(), yy.max()) + ax.set_xticks(()) + ax.set_yticks(()) + ax.set_title(name) + ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'), + size=15, horizontalalignment='right') + i += 1 + +figure.subplots_adjust(left=.02, right=.98) +plt.show() From 1dffe4fd114794c226569c06831cd537da00b35b Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Mon, 24 Jun 2019 19:48:00 +0300 Subject: [PATCH 12/17] add comment and with statement --- .../plot_mlp_training_curves.py | 141 +++++++++--------- 1 file changed, 72 insertions(+), 69 deletions(-) diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py index cf6a2f3267706..4e0ce7dee0aee 100644 --- a/examples/neural_networks/plot_mlp_training_curves.py +++ b/examples/neural_networks/plot_mlp_training_curves.py @@ -24,74 +24,77 @@ from sklearn import datasets from sklearn.exceptions import ConvergenceWarning -warnings.filterwarnings("ignore", category=ConvergenceWarning, - module="sklearn") - -# different learning rate schedules and momentum parameters -params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0, - 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, - 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, - 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0, - 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, - 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, - 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, - {'solver': 'adam', 'learning_rate_init': 0.01}] - -labels = ["constant learning-rate", "constant with momentum", - "constant with Nesterov's momentum", - "inv-scaling learning-rate", "inv-scaling with momentum", - "inv-scaling with Nesterov's momentum", "adam"] - -plot_args = [{'c': 'red', 'linestyle': '-'}, - {'c': 'green', 'linestyle': '-'}, - {'c': 'blue', 'linestyle': '-'}, - {'c': 'red', 'linestyle': '--'}, - {'c': 'green', 'linestyle': '--'}, - {'c': 'blue', 'linestyle': '--'}, - {'c': 'black', 'linestyle': '-'}] - - -def plot_on_dataset(X, y, ax, name): - # for each dataset, plot learning for each learning strategy - print("\nlearning on dataset %s" % name) - ax.set_title(name) - X = MinMaxScaler().fit_transform(X) - mlps = [] - if name == "digits": - # digits is larger but converges fairly quickly - max_iter = 15 - else: - max_iter = 400 - - for label, param in zip(labels, params): - print("training: %s" % label) - mlp = MLPClassifier(verbose=0, random_state=0, - max_iter=max_iter, **param) - mlp.fit(X, y) - mlps.append(mlp) - print("Training set score: %f" % mlp.score(X, y)) - print("Training set loss: %f" % mlp.loss_) - for mlp, label, args in zip(mlps, labels, plot_args): - ax.plot(mlp.loss_curve_, label=label, **args) - - -fig, axes = plt.subplots(2, 2, figsize=(15, 10)) -# load / generate some toy datasets -iris = datasets.load_iris() -digits = datasets.load_digits() -data_sets = [(iris.data, iris.target), - (digits.data, digits.target), - datasets.make_circles(noise=0.2, factor=0.5, random_state=1), - datasets.make_moons(noise=0.3, random_state=0)] - -for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits', +# some parameter combinations will not converge as can be seen on the +# plots so they are ignored here +with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=ConvergenceWarning, + module="sklearn") + + # different learning rate schedules and momentum parameters + params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0, + 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, + 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, + 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0, + 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, + 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, + 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, + {'solver': 'adam', 'learning_rate_init': 0.01}] + + labels = ["constant learning-rate", "constant with momentum", + "constant with Nesterov's momentum", + "inv-scaling learning-rate", "inv-scaling with momentum", + "inv-scaling with Nesterov's momentum", "adam"] + + plot_args = [{'c': 'red', 'linestyle': '-'}, + {'c': 'green', 'linestyle': '-'}, + {'c': 'blue', 'linestyle': '-'}, + {'c': 'red', 'linestyle': '--'}, + {'c': 'green', 'linestyle': '--'}, + {'c': 'blue', 'linestyle': '--'}, + {'c': 'black', 'linestyle': '-'}] + + + def plot_on_dataset(X, y, ax, name): + # for each dataset, plot learning for each learning strategy + print("\nlearning on dataset %s" % name) + ax.set_title(name) + X = MinMaxScaler().fit_transform(X) + mlps = [] + if name == "digits": + # digits is larger but converges fairly quickly + max_iter = 15 + else: + max_iter = 400 + + for label, param in zip(labels, params): + print("training: %s" % label) + mlp = MLPClassifier(verbose=0, random_state=0, + max_iter=max_iter, **param) + mlp.fit(X, y) + mlps.append(mlp) + print("Training set score: %f" % mlp.score(X, y)) + print("Training set loss: %f" % mlp.loss_) + for mlp, label, args in zip(mlps, labels, plot_args): + ax.plot(mlp.loss_curve_, label=label, **args) + + + fig, axes = plt.subplots(2, 2, figsize=(15, 10)) + # load / generate some toy datasets + iris = datasets.load_iris() + digits = datasets.load_digits() + data_sets = [(iris.data, iris.target), + (digits.data, digits.target), + datasets.make_circles(noise=0.2, factor=0.5, random_state=1), + datasets.make_moons(noise=0.3, random_state=0)] + + for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits', 'circles', 'moons']): - plot_on_dataset(*data, ax=ax, name=name) + plot_on_dataset(*data, ax=ax, name=name) -fig.legend(ax.get_lines(), labels, ncol=3, loc="upper center") -plt.show() + fig.legend(ax.get_lines(), labels, ncol=3, loc="upper center") + plt.show() From b09820baccce81a379786d805f906cf18b04039d Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Mon, 24 Jun 2019 20:40:03 +0300 Subject: [PATCH 13/17] PEP8 --- .../plot_mlp_training_curves.py | 104 +++++++++--------- 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py index 4e0ce7dee0aee..874884df4beaa 100644 --- a/examples/neural_networks/plot_mlp_training_curves.py +++ b/examples/neural_networks/plot_mlp_training_curves.py @@ -24,45 +24,45 @@ from sklearn import datasets from sklearn.exceptions import ConvergenceWarning -# some parameter combinations will not converge as can be seen on the -# plots so they are ignored here -with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=ConvergenceWarning, - module="sklearn") - - # different learning rate schedules and momentum parameters - params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0, - 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, - 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, - 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0, - 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, - 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, - {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, - 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, - {'solver': 'adam', 'learning_rate_init': 0.01}] - - labels = ["constant learning-rate", "constant with momentum", - "constant with Nesterov's momentum", - "inv-scaling learning-rate", "inv-scaling with momentum", - "inv-scaling with Nesterov's momentum", "adam"] - - plot_args = [{'c': 'red', 'linestyle': '-'}, - {'c': 'green', 'linestyle': '-'}, - {'c': 'blue', 'linestyle': '-'}, - {'c': 'red', 'linestyle': '--'}, - {'c': 'green', 'linestyle': '--'}, - {'c': 'blue', 'linestyle': '--'}, - {'c': 'black', 'linestyle': '-'}] - - - def plot_on_dataset(X, y, ax, name): - # for each dataset, plot learning for each learning strategy - print("\nlearning on dataset %s" % name) - ax.set_title(name) +# different learning rate schedules and momentum parameters +params = [{'solver': 'sgd', 'learning_rate': 'constant', 'momentum': 0, + 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, + 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'constant', 'momentum': .9, + 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0, + 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, + 'nesterovs_momentum': True, 'learning_rate_init': 0.2}, + {'solver': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9, + 'nesterovs_momentum': False, 'learning_rate_init': 0.2}, + {'solver': 'adam', 'learning_rate_init': 0.01}] + +labels = ["constant learning-rate", "constant with momentum", + "constant with Nesterov's momentum", + "inv-scaling learning-rate", "inv-scaling with momentum", + "inv-scaling with Nesterov's momentum", "adam"] + +plot_args = [{'c': 'red', 'linestyle': '-'}, + {'c': 'green', 'linestyle': '-'}, + {'c': 'blue', 'linestyle': '-'}, + {'c': 'red', 'linestyle': '--'}, + {'c': 'green', 'linestyle': '--'}, + {'c': 'blue', 'linestyle': '--'}, + {'c': 'black', 'linestyle': '-'}] + + +def plot_on_dataset(X, y, ax, name): + # for each dataset, plot learning for each learning strategy + print("\nlearning on dataset %s" % name) + ax.set_title(name) + + # some parameter combinations will not converge as can be seen on the + # plots so they are ignored here + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=ConvergenceWarning, + module="sklearn") X = MinMaxScaler().fit_transform(X) mlps = [] if name == "digits": @@ -80,21 +80,21 @@ def plot_on_dataset(X, y, ax, name): print("Training set score: %f" % mlp.score(X, y)) print("Training set loss: %f" % mlp.loss_) for mlp, label, args in zip(mlps, labels, plot_args): - ax.plot(mlp.loss_curve_, label=label, **args) + ax.plot(mlp.loss_curve_, label=label, **args) - fig, axes = plt.subplots(2, 2, figsize=(15, 10)) - # load / generate some toy datasets - iris = datasets.load_iris() - digits = datasets.load_digits() - data_sets = [(iris.data, iris.target), - (digits.data, digits.target), - datasets.make_circles(noise=0.2, factor=0.5, random_state=1), - datasets.make_moons(noise=0.3, random_state=0)] +fig, axes = plt.subplots(2, 2, figsize=(15, 10)) +# load / generate some toy datasets +iris = datasets.load_iris() +digits = datasets.load_digits() +data_sets = [(iris.data, iris.target), + (digits.data, digits.target), + datasets.make_circles(noise=0.2, factor=0.5, random_state=1), + datasets.make_moons(noise=0.3, random_state=0)] - for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits', +for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits', 'circles', 'moons']): - plot_on_dataset(*data, ax=ax, name=name) + plot_on_dataset(*data, ax=ax, name=name) - fig.legend(ax.get_lines(), labels, ncol=3, loc="upper center") - plt.show() +fig.legend(ax.get_lines(), labels, ncol=3, loc="upper center") +plt.show() From 45651760a0f69d7b9734392aeea1b4f9fc27d3da Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Mon, 24 Jun 2019 21:08:52 +0300 Subject: [PATCH 14/17] context manager fix --- .../plot_mlp_training_curves.py | 42 ++++++++++--------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py index 874884df4beaa..197bc54f5b0d4 100644 --- a/examples/neural_networks/plot_mlp_training_curves.py +++ b/examples/neural_networks/plot_mlp_training_curves.py @@ -58,27 +58,29 @@ def plot_on_dataset(X, y, ax, name): print("\nlearning on dataset %s" % name) ax.set_title(name) - # some parameter combinations will not converge as can be seen on the - # plots so they are ignored here - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=ConvergenceWarning, - module="sklearn") - X = MinMaxScaler().fit_transform(X) - mlps = [] - if name == "digits": - # digits is larger but converges fairly quickly - max_iter = 15 - else: - max_iter = 400 - - for label, param in zip(labels, params): - print("training: %s" % label) - mlp = MLPClassifier(verbose=0, random_state=0, - max_iter=max_iter, **param) + X = MinMaxScaler().fit_transform(X) + mlps = [] + if name == "digits": + # digits is larger but converges fairly quickly + max_iter = 15 + else: + max_iter = 400 + + for label, param in zip(labels, params): + print("training: %s" % label) + mlp = MLPClassifier(verbose=0, random_state=0, + max_iter=max_iter, **param) + + # some parameter combinations will not converge as can be seen on the + # plots so they are ignored here + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=ConvergenceWarning, + module="sklearn") mlp.fit(X, y) - mlps.append(mlp) - print("Training set score: %f" % mlp.score(X, y)) - print("Training set loss: %f" % mlp.loss_) + + mlps.append(mlp) + print("Training set score: %f" % mlp.score(X, y)) + print("Training set loss: %f" % mlp.loss_) for mlp, label, args in zip(mlps, labels, plot_args): ax.plot(mlp.loss_curve_, label=label, **args) From 9a6f615b979437948609c1b4787a4647236cb705 Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Mon, 24 Jun 2019 21:10:52 +0300 Subject: [PATCH 15/17] fixed indentation --- examples/neural_networks/plot_mlp_training_curves.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py index 197bc54f5b0d4..6aeb8be935f51 100644 --- a/examples/neural_networks/plot_mlp_training_curves.py +++ b/examples/neural_networks/plot_mlp_training_curves.py @@ -81,8 +81,8 @@ def plot_on_dataset(X, y, ax, name): mlps.append(mlp) print("Training set score: %f" % mlp.score(X, y)) print("Training set loss: %f" % mlp.loss_) - for mlp, label, args in zip(mlps, labels, plot_args): - ax.plot(mlp.loss_curve_, label=label, **args) + for mlp, label, args in zip(mlps, labels, plot_args): + ax.plot(mlp.loss_curve_, label=label, **args) fig, axes = plt.subplots(2, 2, figsize=(15, 10)) From 62ee2a3b91a0c35ea6779f48c15fb58c8b0ddf97 Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Mon, 24 Jun 2019 21:17:43 +0300 Subject: [PATCH 16/17] PEP8 --- examples/neural_networks/plot_mlp_training_curves.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py index 6aeb8be935f51..e7fb5c845c132 100644 --- a/examples/neural_networks/plot_mlp_training_curves.py +++ b/examples/neural_networks/plot_mlp_training_curves.py @@ -70,7 +70,7 @@ def plot_on_dataset(X, y, ax, name): print("training: %s" % label) mlp = MLPClassifier(verbose=0, random_state=0, max_iter=max_iter, **param) - + # some parameter combinations will not converge as can be seen on the # plots so they are ignored here with warnings.catch_warnings(): From 533cc72d988059bd3533f7dc117f647e873eb141 Mon Sep 17 00:00:00 2001 From: Martin Oywa Date: Mon, 24 Jun 2019 21:45:22 +0300 Subject: [PATCH 17/17] flake8 --- examples/neural_networks/plot_mlp_training_curves.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/neural_networks/plot_mlp_training_curves.py b/examples/neural_networks/plot_mlp_training_curves.py index e7fb5c845c132..c00bd9a6f452d 100644 --- a/examples/neural_networks/plot_mlp_training_curves.py +++ b/examples/neural_networks/plot_mlp_training_curves.py @@ -70,7 +70,7 @@ def plot_on_dataset(X, y, ax, name): print("training: %s" % label) mlp = MLPClassifier(verbose=0, random_state=0, max_iter=max_iter, **param) - + # some parameter combinations will not converge as can be seen on the # plots so they are ignored here with warnings.catch_warnings():