Skip to content

[MRG] Simplify super() calls #12812

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 14 commits into from
Jan 10, 2019
2 changes: 1 addition & 1 deletion doc/modules/feature_extraction.rst
Original file line number Diff line number Diff line change
Expand Up @@ -961,7 +961,7 @@ Some tips and tricks:
...
>>> class CustomVectorizer(CountVectorizer):
... def build_tokenizer(self):
... tokenize = super(CustomVectorizer, self).build_tokenizer()
... tokenize = super().build_tokenizer()
... return lambda doc: list(to_british(tokenize(doc)))
...
>>> print(CustomVectorizer().build_analyzer()(u"color colour")) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Expand Down
2 changes: 1 addition & 1 deletion examples/bicluster/plot_bicluster_newsgroups.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def number_normalizer(tokens):

class NumberNormalizingVectorizer(TfidfVectorizer):
def build_tokenizer(self):
tokenize = super(NumberNormalizingVectorizer, self).build_tokenizer()
tokenize = super().build_tokenizer()
return lambda doc: list(number_normalizer(tokenize(doc)))


Expand Down
4 changes: 2 additions & 2 deletions sklearn/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ def __repr__(self):

def __getstate__(self):
try:
state = super(BaseEstimator, self).__getstate__()
state = super().__getstate__()
except AttributeError:
state = self.__dict__.copy()

Expand All @@ -264,7 +264,7 @@ def __setstate__(self, state):
self.__class__.__name__, pickle_version, __version__),
UserWarning)
try:
super(BaseEstimator, self).__setstate__(state)
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)

Expand Down
34 changes: 17 additions & 17 deletions sklearn/cluster/bicluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,14 +281,14 @@ class SpectralCoclustering(BaseSpectral):
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=None, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
super().__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)

def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
Expand Down Expand Up @@ -429,20 +429,20 @@ def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=None, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
super().__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best

def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
super()._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
Expand Down
2 changes: 1 addition & 1 deletion sklearn/cluster/hierarchical.py
Original file line number Diff line number Diff line change
Expand Up @@ -929,7 +929,7 @@ def __init__(self, n_clusters=2, affinity="euclidean",
memory=None,
connectivity=None, compute_full_tree='auto',
linkage='ward', pooling_func=np.mean):
super(FeatureAgglomeration, self).__init__(
super().__init__(
n_clusters=n_clusters, memory=memory, connectivity=connectivity,
compute_full_tree=compute_full_tree, linkage=linkage,
affinity=affinity)
Expand Down
2 changes: 1 addition & 1 deletion sklearn/cluster/k_means_.py
Original file line number Diff line number Diff line change
Expand Up @@ -1458,7 +1458,7 @@ def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):

super(MiniBatchKMeans, self).__init__(
super().__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)

Expand Down
4 changes: 2 additions & 2 deletions sklearn/compose/tests/test_target.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,11 +246,11 @@ class DummyCheckerListRegressor(DummyRegressor):

def fit(self, X, y, sample_weight=None):
assert isinstance(X, list)
return super(DummyCheckerListRegressor, self).fit(X, y, sample_weight)
return super().fit(X, y, sample_weight)

def predict(self, X):
assert isinstance(X, list)
return super(DummyCheckerListRegressor, self).predict(X)
return super().predict(X)


def test_transform_target_regressor_ensure_y_array():
Expand Down
4 changes: 2 additions & 2 deletions sklearn/covariance/elliptic_envelope.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ class EllipticEnvelope(MinCovDet, OutlierMixin):
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, contamination=0.1,
random_state=None):
super(EllipticEnvelope, self).__init__(
super().__init__(
store_precision=store_precision,
assume_centered=assume_centered,
support_fraction=support_fraction,
Expand All @@ -107,7 +107,7 @@ def fit(self, X, y=None):
not used, present for API consistency by convention.

"""
super(EllipticEnvelope, self).fit(X)
super().fit(X)
self.offset_ = np.percentile(-self.dist_, 100. * self.contamination)
return self

Expand Down
4 changes: 2 additions & 2 deletions sklearn/covariance/graph_lasso_.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ class GraphicalLasso(EmpiricalCovariance):

def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
super(GraphicalLasso, self).__init__(assume_centered=assume_centered)
super().__init__(assume_centered=assume_centered)
self.alpha = alpha
self.mode = mode
self.tol = tol
Expand Down Expand Up @@ -581,7 +581,7 @@ class GraphicalLassoCV(GraphicalLasso):
def __init__(self, alphas=4, n_refinements=4, cv='warn', tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=None,
verbose=False, assume_centered=False):
super(GraphicalLassoCV, self).__init__(
super().__init__(
mode=mode, tol=tol, verbose=verbose, enet_tol=enet_tol,
max_iter=max_iter, assume_centered=assume_centered)
self.alphas = alphas
Expand Down
8 changes: 4 additions & 4 deletions sklearn/covariance/shrunk_covariance_.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ class ShrunkCovariance(EmpiricalCovariance):
"""
def __init__(self, store_precision=True, assume_centered=False,
shrinkage=0.1):
super(ShrunkCovariance, self).__init__(store_precision=store_precision,
assume_centered=assume_centered)
super().__init__(store_precision=store_precision,
assume_centered=assume_centered)
self.shrinkage = shrinkage

def fit(self, X, y=None):
Expand Down Expand Up @@ -401,8 +401,8 @@ class LedoitWolf(EmpiricalCovariance):
"""
def __init__(self, store_precision=True, assume_centered=False,
block_size=1000):
super(LedoitWolf, self).__init__(store_precision=store_precision,
assume_centered=assume_centered)
super().__init__(store_precision=store_precision,
assume_centered=assume_centered)
self.block_size = block_size

def fit(self, X, y=None):
Expand Down
8 changes: 4 additions & 4 deletions sklearn/cross_decomposition/cca_.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ class CCA(_PLS):

def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(CCA, self).__init__(n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
super().__init__(n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
4 changes: 2 additions & 2 deletions sklearn/cross_decomposition/pls_.py
Original file line number Diff line number Diff line change
Expand Up @@ -588,7 +588,7 @@ class PLSRegression(_PLS):

def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
super(PLSRegression, self).__init__(
super().__init__(
n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
Expand Down Expand Up @@ -734,7 +734,7 @@ class PLSCanonical(_PLS):

def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
super(PLSCanonical, self).__init__(
super().__init__(
n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
Expand Down
2 changes: 1 addition & 1 deletion sklearn/decomposition/fastica_.py
Original file line number Diff line number Diff line change
Expand Up @@ -463,7 +463,7 @@ def my_g(x):
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_args=None, max_iter=200, tol=1e-4,
w_init=None, random_state=None):
super(FastICA, self).__init__()
super().__init__()
if max_iter < 1:
raise ValueError("max_iter should be greater than 1, got "
"(max_iter={})".format(max_iter))
Expand Down
2 changes: 1 addition & 1 deletion sklearn/decomposition/sparse_pca.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
n_iter=100, callback=None, batch_size=3, verbose=False,
shuffle=True, n_jobs=None, method='lars', random_state=None,
normalize_components=False):
super(MiniBatchSparsePCA, self).__init__(
super().__init__(
n_components=n_components, alpha=alpha, verbose=verbose,
ridge_alpha=ridge_alpha, n_jobs=n_jobs, method=method,
random_state=random_state,
Expand Down
4 changes: 2 additions & 2 deletions sklearn/dummy.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ def score(self, X, y, sample_weight=None):
"""
if X is None:
X = np.zeros(shape=(len(y), 1))
return super(DummyClassifier, self).score(X, y, sample_weight)
return super().score(X, y, sample_weight)


class DummyRegressor(BaseEstimator, RegressorMixin):
Expand Down Expand Up @@ -548,4 +548,4 @@ def score(self, X, y, sample_weight=None):
"""
if X is None:
X = np.zeros(shape=(len(y), 1))
return super(DummyRegressor, self).score(X, y, sample_weight)
return super().score(X, y, sample_weight)
10 changes: 5 additions & 5 deletions sklearn/ensemble/bagging.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ def __init__(self,
n_jobs=None,
random_state=None,
verbose=0):
super(BaseBagging, self).__init__(
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators)

Expand Down Expand Up @@ -565,7 +565,7 @@ def __init__(self,
random_state=None,
verbose=0):

super(BaggingClassifier, self).__init__(
super().__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
Expand All @@ -580,7 +580,7 @@ def __init__(self,

def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(BaggingClassifier, self)._validate_estimator(
super()._validate_estimator(
default=DecisionTreeClassifier())

def _set_oob_score(self, X, y):
Expand Down Expand Up @@ -935,7 +935,7 @@ def __init__(self,
n_jobs=None,
random_state=None,
verbose=0):
super(BaggingRegressor, self).__init__(
super().__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
Expand Down Expand Up @@ -990,7 +990,7 @@ def predict(self, X):

def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(BaggingRegressor, self)._validate_estimator(
super()._validate_estimator(
default=DecisionTreeRegressor())

def _set_oob_score(self, X, y):
Expand Down
19 changes: 9 additions & 10 deletions sklearn/ensemble/forest.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def __init__(self,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
Expand Down Expand Up @@ -411,7 +411,7 @@ def __init__(self,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
super().__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
Expand Down Expand Up @@ -651,7 +651,7 @@ def __init__(self,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
super().__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
Expand Down Expand Up @@ -1008,7 +1008,7 @@ def __init__(self,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
super().__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
Expand Down Expand Up @@ -1267,7 +1267,7 @@ def __init__(self,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
super().__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
Expand Down Expand Up @@ -1523,7 +1523,7 @@ def __init__(self,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
super().__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
Expand Down Expand Up @@ -1746,7 +1746,7 @@ def __init__(self,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
super().__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
Expand Down Expand Up @@ -1921,7 +1921,7 @@ def __init__(self,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
super().__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
Expand Down Expand Up @@ -2002,8 +2002,7 @@ def fit_transform(self, X, y=None, sample_weight=None):

rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
super().fit(X, y, sample_weight=sample_weight)

self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output,
categories='auto')
Expand Down
Loading