Skip to content

Commit adab236

Browse files
committed
Pushing the docs to dev/ for branch: master, commit 7b0b6d73441c2a29e455992c3f1e306a4feab07e
1 parent 5254a54 commit adab236

File tree

914 files changed

+2700
-2697
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

914 files changed

+2700
-2697
lines changed
52 Bytes
Binary file not shown.
51 Bytes
Binary file not shown.

dev/_downloads/plot_confusion_matrix.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
"execution_count": null,
2525
"cell_type": "code",
2626
"source": [
27-
"print(__doc__)\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm, datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\n# import some data to play with\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\nclass_names = iris.target_names\n\n# Split the data into a training set and a test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n# Run classifier, using a model that is too regularized (C too low) to see\n# the impact on the results\nclassifier = svm.SVC(kernel='linear', C=0.01)\ny_pred = classifier.fit(X_train, y_train).predict(X_test)\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, y_pred)\nnp.set_printoptions(precision=2)\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization')\n\n# Plot normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n\nplt.show()"
27+
"print(__doc__)\n\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm, datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\n# import some data to play with\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\nclass_names = iris.target_names\n\n# Split the data into a training set and a test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)\n\n# Run classifier, using a model that is too regularized (C too low) to see\n# the impact on the results\nclassifier = svm.SVC(kernel='linear', C=0.01)\ny_pred = classifier.fit(X_train, y_train).predict(X_test)\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n# Compute confusion matrix\ncnf_matrix = confusion_matrix(y_test, y_pred)\nnp.set_printoptions(precision=2)\n\n# Plot non-normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization')\n\n# Plot normalized confusion matrix\nplt.figure()\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n\nplt.show()"
2828
],
2929
"outputs": [],
3030
"metadata": {

dev/_downloads/plot_confusion_matrix.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -57,13 +57,6 @@ def plot_confusion_matrix(cm, classes,
5757
This function prints and plots the confusion matrix.
5858
Normalization can be applied by setting `normalize=True`.
5959
"""
60-
plt.imshow(cm, interpolation='nearest', cmap=cmap)
61-
plt.title(title)
62-
plt.colorbar()
63-
tick_marks = np.arange(len(classes))
64-
plt.xticks(tick_marks, classes, rotation=45)
65-
plt.yticks(tick_marks, classes)
66-
6760
if normalize:
6861
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
6962
print("Normalized confusion matrix")
@@ -72,9 +65,17 @@ def plot_confusion_matrix(cm, classes,
7265

7366
print(cm)
7467

68+
plt.imshow(cm, interpolation='nearest', cmap=cmap)
69+
plt.title(title)
70+
plt.colorbar()
71+
tick_marks = np.arange(len(classes))
72+
plt.xticks(tick_marks, classes, rotation=45)
73+
plt.yticks(tick_marks, classes)
74+
75+
fmt = '.2f' if normalize else 'd'
7576
thresh = cm.max() / 2.
7677
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
77-
plt.text(j, i, cm[i, j],
78+
plt.text(j, i, format(cm[i, j], fmt),
7879
horizontalalignment="center",
7980
color="white" if cm[i, j] > thresh else "black")
8081

dev/_downloads/scikit-learn-docs.pdf

12.6 KB
Binary file not shown.
-457 Bytes
-457 Bytes
-28 Bytes
-54 Bytes
-54 Bytes
78 Bytes
380 Bytes
164 Bytes
164 Bytes
-151 Bytes
-151 Bytes
-39 Bytes
-39 Bytes
45 Bytes
45 Bytes
164 Bytes
164 Bytes
167 Bytes
-233 Bytes
106 Bytes
106 Bytes
-549 Bytes
147 Bytes
-1 Bytes
-2 Bytes
57 Bytes
57 Bytes
-109 Bytes
-30 Bytes

dev/_sources/auto_examples/applications/plot_model_complexity_influence.txt

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -226,53 +226,53 @@ main code
226226
learning_rate='optimal', loss='modified_huber', n_iter=5, n_jobs=1,
227227
penalty='elasticnet', power_t=0.5, random_state=None, shuffle=True,
228228
verbose=0, warm_start=False)
229-
Complexity: 4454 | Hamming Loss (Misclassification Ratio): 0.2501 | Pred. Time: 0.027280s
229+
Complexity: 4454 | Hamming Loss (Misclassification Ratio): 0.2501 | Pred. Time: 0.026130s
230230

231231
Benchmarking SGDClassifier(alpha=0.001, average=False, class_weight=None, epsilon=0.1,
232232
eta0=0.0, fit_intercept=True, l1_ratio=0.5, learning_rate='optimal',
233233
loss='modified_huber', n_iter=5, n_jobs=1, penalty='elasticnet',
234234
power_t=0.5, random_state=None, shuffle=True, verbose=0,
235235
warm_start=False)
236-
Complexity: 1624 | Hamming Loss (Misclassification Ratio): 0.2923 | Pred. Time: 0.020358s
236+
Complexity: 1624 | Hamming Loss (Misclassification Ratio): 0.2923 | Pred. Time: 0.020463s
237237

238238
Benchmarking SGDClassifier(alpha=0.001, average=False, class_weight=None, epsilon=0.1,
239239
eta0=0.0, fit_intercept=True, l1_ratio=0.75,
240240
learning_rate='optimal', loss='modified_huber', n_iter=5, n_jobs=1,
241241
penalty='elasticnet', power_t=0.5, random_state=None, shuffle=True,
242242
verbose=0, warm_start=False)
243-
Complexity: 873 | Hamming Loss (Misclassification Ratio): 0.3191 | Pred. Time: 0.016954s
243+
Complexity: 873 | Hamming Loss (Misclassification Ratio): 0.3191 | Pred. Time: 0.016073s
244244

245245
Benchmarking SGDClassifier(alpha=0.001, average=False, class_weight=None, epsilon=0.1,
246246
eta0=0.0, fit_intercept=True, l1_ratio=0.9, learning_rate='optimal',
247247
loss='modified_huber', n_iter=5, n_jobs=1, penalty='elasticnet',
248248
power_t=0.5, random_state=None, shuffle=True, verbose=0,
249249
warm_start=False)
250-
Complexity: 655 | Hamming Loss (Misclassification Ratio): 0.3252 | Pred. Time: 0.014675s
250+
Complexity: 655 | Hamming Loss (Misclassification Ratio): 0.3252 | Pred. Time: 0.014042s
251251

252252
Benchmarking NuSVR(C=1000.0, cache_size=200, coef0=0.0, degree=3, gamma=3.0517578125e-05,
253253
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
254254
verbose=False)
255-
Complexity: 69 | MSE: 31.8133 | Pred. Time: 0.000371s
255+
Complexity: 69 | MSE: 31.8133 | Pred. Time: 0.000365s
256256

257257
Benchmarking NuSVR(C=1000.0, cache_size=200, coef0=0.0, degree=3, gamma=3.0517578125e-05,
258258
kernel='rbf', max_iter=-1, nu=0.25, shrinking=True, tol=0.001,
259259
verbose=False)
260-
Complexity: 136 | MSE: 25.6140 | Pred. Time: 0.000650s
260+
Complexity: 136 | MSE: 25.6140 | Pred. Time: 0.000654s
261261

262262
Benchmarking NuSVR(C=1000.0, cache_size=200, coef0=0.0, degree=3, gamma=3.0517578125e-05,
263263
kernel='rbf', max_iter=-1, nu=0.5, shrinking=True, tol=0.001,
264264
verbose=False)
265-
Complexity: 243 | MSE: 22.3315 | Pred. Time: 0.001114s
265+
Complexity: 243 | MSE: 22.3315 | Pred. Time: 0.001113s
266266

267267
Benchmarking NuSVR(C=1000.0, cache_size=200, coef0=0.0, degree=3, gamma=3.0517578125e-05,
268268
kernel='rbf', max_iter=-1, nu=0.75, shrinking=True, tol=0.001,
269269
verbose=False)
270-
Complexity: 350 | MSE: 21.3679 | Pred. Time: 0.001567s
270+
Complexity: 350 | MSE: 21.3679 | Pred. Time: 0.001583s
271271

272272
Benchmarking NuSVR(C=1000.0, cache_size=200, coef0=0.0, degree=3, gamma=3.0517578125e-05,
273273
kernel='rbf', max_iter=-1, nu=0.9, shrinking=True, tol=0.001,
274274
verbose=False)
275-
Complexity: 404 | MSE: 21.0915 | Pred. Time: 0.001803s
275+
Complexity: 404 | MSE: 21.0915 | Pred. Time: 0.001810s
276276

277277
Benchmarking GradientBoostingRegressor(alpha=0.9, criterion='friedman_mse', init=None,
278278
learning_rate=0.1, loss='ls', max_depth=3, max_features=None,
@@ -288,7 +288,7 @@ main code
288288
min_samples_leaf=1, min_samples_split=2,
289289
min_weight_fraction_leaf=0.0, n_estimators=50, presort='auto',
290290
random_state=None, subsample=1.0, verbose=0, warm_start=False)
291-
Complexity: 50 | MSE: 8.3398 | Pred. Time: 0.000207s
291+
Complexity: 50 | MSE: 8.3398 | Pred. Time: 0.000199s
292292

293293
Benchmarking GradientBoostingRegressor(alpha=0.9, criterion='friedman_mse', init=None,
294294
learning_rate=0.1, loss='ls', max_depth=3, max_features=None,
@@ -297,7 +297,7 @@ main code
297297
min_weight_fraction_leaf=0.0, n_estimators=100,
298298
presort='auto', random_state=None, subsample=1.0, verbose=0,
299299
warm_start=False)
300-
Complexity: 100 | MSE: 7.0096 | Pred. Time: 0.000282s
300+
Complexity: 100 | MSE: 7.0096 | Pred. Time: 0.000283s
301301

302302
Benchmarking GradientBoostingRegressor(alpha=0.9, criterion='friedman_mse', init=None,
303303
learning_rate=0.1, loss='ls', max_depth=3, max_features=None,
@@ -306,7 +306,7 @@ main code
306306
min_weight_fraction_leaf=0.0, n_estimators=200,
307307
presort='auto', random_state=None, subsample=1.0, verbose=0,
308308
warm_start=False)
309-
Complexity: 200 | MSE: 6.1836 | Pred. Time: 0.000447s
309+
Complexity: 200 | MSE: 6.1836 | Pred. Time: 0.000451s
310310

311311
Benchmarking GradientBoostingRegressor(alpha=0.9, criterion='friedman_mse', init=None,
312312
learning_rate=0.1, loss='ls', max_depth=3, max_features=None,
@@ -315,10 +315,10 @@ main code
315315
min_weight_fraction_leaf=0.0, n_estimators=500,
316316
presort='auto', random_state=None, subsample=1.0, verbose=0,
317317
warm_start=False)
318-
Complexity: 500 | MSE: 6.3426 | Pred. Time: 0.000977s
318+
Complexity: 500 | MSE: 6.3426 | Pred. Time: 0.000985s
319319

320320

321-
**Total running time of the script:** ( 0 minutes 25.380 seconds)
321+
**Total running time of the script:** ( 0 minutes 24.747 seconds)
322322

323323

324324

0 commit comments

Comments
 (0)