Skip to content

Commit e2a2b4d

Browse files
shotatagramfort
authored andcommitted
Fix typos (scikit-learn#6942)
1 parent aec7f25 commit e2a2b4d

File tree

10 files changed

+16
-16
lines changed

10 files changed

+16
-16
lines changed

benchmarks/bench_plot_randomized_svd.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -373,7 +373,7 @@ def bench_b(power_list):
373373
if enable_spectral_norm:
374374
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
375375
plot_power_iter_vs_s(power_iter, all_spectral, title)
376-
title = "%s: frobenius norm diff vs n power iteration" % (dataset_name)
376+
title = "%s: Frobenius norm diff vs n power iteration" % (dataset_name)
377377
plot_power_iter_vs_s(power_iter, all_frobenius, title)
378378

379379

examples/calibration/plot_calibration_multiclass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ class of an instance (red: class 1, green: class 2, blue: class 3).
145145
zip(calibrated_classifier.calibrators_, p.T)]).T
146146
prediction /= prediction.sum(axis=1)[:, None]
147147

148-
# Ploit modifications of calibrator
148+
# Plot modifications of calibrator
149149
for i in range(prediction.shape[0]):
150150
plt.arrow(p[i, 0], p[i, 1],
151151
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],

examples/ensemble/plot_ensemble_oob.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
random_state=RANDOM_STATE)
4242

4343
# NOTE: Setting the `warm_start` construction parameter to `True` disables
44-
# support for paralellised ensembles but is necessary for tracking the OOB
44+
# support for parallelized ensembles but is necessary for tracking the OOB
4545
# error trajectory during training.
4646
ensemble_clfs = [
4747
("RandomForestClassifier, max_features='sqrt'",

examples/hetero_feature_union.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ def transform(self, posts):
165165
('svc', SVC(kernel='linear')),
166166
])
167167

168-
# limit the list of categories to make running this exmaple faster.
168+
# limit the list of categories to make running this example faster.
169169
categories = ['alt.atheism', 'talk.religion.misc']
170170
train = fetch_20newsgroups(random_state=1,
171171
subset='train',

examples/linear_model/plot_ard.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
relevant_features = np.random.randint(0, n_features, 10)
4040
for i in relevant_features:
4141
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
42-
# Create noite with a precision alpha of 50.
42+
# Create noise with a precision alpha of 50.
4343
alpha_ = 50.
4444
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
4545
# Create the target

examples/linear_model/plot_lasso_coordinate_descent_path.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
alphas_enet, coefs_enet, _ = enet_path(
4141
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
4242

43-
print("Computing regularization path using the positve elastic net...")
43+
print("Computing regularization path using the positive elastic net...")
4444
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
4545
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
4646

sklearn/linear_model/randomized_l1.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -546,7 +546,7 @@ def _lasso_stability_path(X, y, mask, weights, eps):
546546
alpha_min=alpha_min)
547547
# Scale alpha by alpha_max
548548
alphas /= alphas[0]
549-
# Sort alphas in assending order
549+
# Sort alphas in ascending order
550550
alphas = alphas[::-1]
551551
coefs = coefs[:, ::-1]
552552
# Get rid of the alphas that are too small
@@ -564,7 +564,7 @@ def lasso_stability_path(X, y, scaling=0.5, random_state=None,
564564
sample_fraction=0.75,
565565
eps=4 * np.finfo(np.float).eps, n_jobs=1,
566566
verbose=False):
567-
"""Stabiliy path based on randomized Lasso estimates
567+
"""Stability path based on randomized Lasso estimates
568568
569569
Read more in the :ref:`User Guide <randomized_l1>`.
570570

sklearn/linear_model/sag.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,10 +110,10 @@ def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
110110
111111
max_iter: int, optional
112112
The max number of passes over the training data if the stopping
113-
criterea is not reached. Defaults to 1000.
113+
criteria is not reached. Defaults to 1000.
114114
115115
tol: double, optional
116-
The stopping criterea for the weights. The iterations will stop when
116+
The stopping criteria for the weights. The iterations will stop when
117117
max(change in weights) / max(weights) < tol. Defaults to .001
118118
119119
verbose: integer, optional

sklearn/linear_model/sag_fast.pyx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -264,9 +264,9 @@ def sag(SequentialDataset dataset,
264264
# the index (row number) of the current sample
265265
cdef int sample_ind
266266

267-
# the maximum change in weights, used to compute stopping criterea
267+
# the maximum change in weights, used to compute stopping criteria
268268
cdef double max_change
269-
# a holder variable for the max weight, used to compute stopping criterea
269+
# a holder variable for the max weight, used to compute stopping criteria
270270
cdef double max_weight
271271

272272
# the start time of the fit

sklearn/linear_model/sgd_fast.pyx

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -482,8 +482,8 @@ def average_sgd(np.ndarray[double, ndim=1, mode='c'] weights,
482482
(1) constant, eta = eta0
483483
(2) optimal, eta = 1.0/(alpha * t).
484484
(3) inverse scaling, eta = eta0 / pow(t, power_t)
485-
(4) Passive Agressive-I, eta = min(alpha, loss/norm(x))
486-
(5) Passive Agressive-II, eta = 1.0 / (norm(x) + 0.5*alpha)
485+
(4) Passive Aggressive-I, eta = min(alpha, loss/norm(x))
486+
(5) Passive Aggressive-II, eta = 1.0 / (norm(x) + 0.5*alpha)
487487
eta0 : double
488488
The initial learning rate.
489489
power_t : double
@@ -503,9 +503,9 @@ def average_sgd(np.ndarray[double, ndim=1, mode='c'] weights,
503503
intercept : float
504504
The fitted intercept term.
505505
average_weights : array shape=[n_features]
506-
The averaged weights accross iterations
506+
The averaged weights across iterations
507507
average_intercept : float
508-
The averaged intercept accross iterations
508+
The averaged intercept across iterations
509509
"""
510510
return _plain_sgd(weights,
511511
intercept,

0 commit comments

Comments
 (0)