From e8289e3f961bcae9b22f52b0939d9f2bb618cd10 Mon Sep 17 00:00:00 2001 From: Aditya Vyas Date: Mon, 15 Apr 2019 23:31:07 -0400 Subject: [PATCH 1/8] Replace absolute imports with relative ones --- sklearn/base.py | 2 +- sklearn/calibration.py | 2 +- sklearn/cluster/_hierarchical.pyx | 2 +- sklearn/cluster/_k_means.pyx | 2 +- sklearn/cluster/affinity_propagation_.py | 2 +- sklearn/datasets/openml.py | 2 +- sklearn/datasets/species_distributions.py | 4 +- sklearn/datasets/svmlight_format.py | 2 +- sklearn/ensemble/_gradient_boosting.pyx | 12 +-- sklearn/exceptions.py | 16 ++-- sklearn/externals/copy_joblib.sh | 2 +- .../externals/loky/backend/reduction.py | 4 +- .../joblib/externals/loky/backend/spawn.py | 2 +- .../externals/loky/cloudpickle_wrapper.py | 4 +- sklearn/feature_extraction/_hashing.pyx | 4 +- sklearn/gaussian_process/gpc.py | 14 ++-- sklearn/gaussian_process/gpr.py | 12 +-- sklearn/linear_model/cd_fast.pyx | 2 +- sklearn/linear_model/logistic.py | 4 +- sklearn/linear_model/ridge.py | 6 +- sklearn/linear_model/sgd_fast.pyx | 4 +- sklearn/manifold/_barnes_hut_tsne.pyx | 2 +- sklearn/metrics/cluster/bicluster.py | 4 +- .../cluster/expected_mutual_info_fast.pyx | 2 +- sklearn/metrics/cluster/unsupervised.py | 2 +- sklearn/model_selection/_split.py | 2 +- sklearn/neighbors/binary_tree.pxi | 2 +- sklearn/neighbors/quad_tree.pyx | 2 +- sklearn/setup.py | 2 +- sklearn/tree/_utils.pxd | 2 +- sklearn/tree/_utils.pyx | 2 +- sklearn/utils/_random.pyx | 2 +- sklearn/utils/estimator_checks.py | 84 +++++++++---------- sklearn/utils/random.py | 2 +- sklearn/utils/seq_dataset.pyx.tp | 2 +- sklearn/utils/stats.py | 2 +- 36 files changed, 109 insertions(+), 109 deletions(-) diff --git a/sklearn/base.py b/sklearn/base.py index 167baaf2b7ebd..d13619617bde4 100644 --- a/sklearn/base.py +++ b/sklearn/base.py @@ -12,7 +12,7 @@ import numpy as np from . import __version__ -from sklearn.utils import _IS_32BIT +from .utils import _IS_32BIT _DEFAULT_TAGS = { 'non_deterministic': False, diff --git a/sklearn/calibration.py b/sklearn/calibration.py index 4329eba81a883..434d30a557540 100644 --- a/sklearn/calibration.py +++ b/sklearn/calibration.py @@ -16,7 +16,7 @@ from scipy.special import expit from scipy.special import xlogy from scipy.optimize import fmin_bfgs -from sklearn.preprocessing import LabelEncoder +from .preprocessing import LabelEncoder from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone from .preprocessing import label_binarize, LabelBinarizer diff --git a/sklearn/cluster/_hierarchical.pyx b/sklearn/cluster/_hierarchical.pyx index 0a87b82229819..b5b22000b36ec 100644 --- a/sklearn/cluster/_hierarchical.pyx +++ b/sklearn/cluster/_hierarchical.pyx @@ -13,7 +13,7 @@ ctypedef np.int8_t INT8 np.import_array() -from sklearn.utils.fast_dict cimport IntFloatDict +from ..utils.fast_dict cimport IntFloatDict # C++ from cython.operator cimport dereference as deref, preincrement as inc diff --git a/sklearn/cluster/_k_means.pyx b/sklearn/cluster/_k_means.pyx index f81203d40bb4d..8a66f25065126 100644 --- a/sklearn/cluster/_k_means.pyx +++ b/sklearn/cluster/_k_means.pyx @@ -17,7 +17,7 @@ cimport numpy as np cimport cython from cython cimport floating -from sklearn.utils.sparsefuncs_fast import assign_rows_csr +from ..utils.sparsefuncs_fast import assign_rows_csr from ..utils._cython_blas cimport _dot ctypedef np.float64_t DOUBLE diff --git a/sklearn/cluster/affinity_propagation_.py b/sklearn/cluster/affinity_propagation_.py index 54065217660a0..1ee5213e0fefb 100644 --- a/sklearn/cluster/affinity_propagation_.py +++ b/sklearn/cluster/affinity_propagation_.py @@ -8,7 +8,7 @@ import numpy as np import warnings -from sklearn.exceptions import ConvergenceWarning +from ..exceptions import ConvergenceWarning from ..base import BaseEstimator, ClusterMixin from ..utils import as_float_array, check_array from ..utils.validation import check_is_fitted diff --git a/sklearn/datasets/openml.py b/sklearn/datasets/openml.py index d28c9f3120fac..2363a9a4689ca 100644 --- a/sklearn/datasets/openml.py +++ b/sklearn/datasets/openml.py @@ -14,7 +14,7 @@ import numpy as np import scipy.sparse -from sklearn.externals import _arff +from ..externals import _arff from .base import get_data_home from urllib.error import HTTPError from ..utils import Bunch diff --git a/sklearn/datasets/species_distributions.py b/sklearn/datasets/species_distributions.py index 1e1a52cd3c824..34e8251f9551f 100644 --- a/sklearn/datasets/species_distributions.py +++ b/sklearn/datasets/species_distributions.py @@ -49,8 +49,8 @@ from .base import _fetch_remote from .base import RemoteFileMetadata from ..utils import Bunch -from sklearn.datasets.base import _pkl_filepath -from sklearn.utils import _joblib +from .base import _pkl_filepath +from ..utils import _joblib # The original data can be found at: # https://biodiversityinformatics.amnh.org/open_source/maxent/samples.zip diff --git a/sklearn/datasets/svmlight_format.py b/sklearn/datasets/svmlight_format.py index 8cdc4b9760c71..90bd047e3cf80 100644 --- a/sklearn/datasets/svmlight_format.py +++ b/sklearn/datasets/svmlight_format.py @@ -140,7 +140,7 @@ def load_svmlight_file(f, n_features=None, dtype=np.float64, To use joblib.Memory to cache the svmlight file:: from joblib import Memory - from sklearn.datasets import load_svmlight_file + from .datasets import load_svmlight_file mem = Memory("./mycache") @mem.cache diff --git a/sklearn/ensemble/_gradient_boosting.pyx b/sklearn/ensemble/_gradient_boosting.pyx index 120de8b3abeb8..c46ed25a4c4dc 100644 --- a/sklearn/ensemble/_gradient_boosting.pyx +++ b/sklearn/ensemble/_gradient_boosting.pyx @@ -18,12 +18,12 @@ np.import_array() from scipy.sparse import issparse from scipy.sparse import csr_matrix -from sklearn.tree._tree cimport Node -from sklearn.tree._tree cimport Tree -from sklearn.tree._tree cimport DTYPE_t -from sklearn.tree._tree cimport SIZE_t -from sklearn.tree._tree cimport INT32_t -from sklearn.tree._utils cimport safe_realloc +from ..tree._tree cimport Node +from ..tree._tree cimport Tree +from ..tree._tree cimport DTYPE_t +from ..tree._tree cimport SIZE_t +from ..tree._tree cimport INT32_t +from ..tree._utils cimport safe_realloc ctypedef np.int32_t int32 ctypedef np.float64_t float64 diff --git a/sklearn/exceptions.py b/sklearn/exceptions.py index 9cf207e40fdd6..4468a464c727e 100644 --- a/sklearn/exceptions.py +++ b/sklearn/exceptions.py @@ -33,7 +33,7 @@ class NotFittedError(ValueError, AttributeError): NotFittedError('This LinearSVC instance is not fitted yet'...) .. versionchanged:: 0.18 - Moved from sklearn.utils.validation. + Moved from .utils.validation. """ @@ -41,7 +41,7 @@ class ChangedBehaviorWarning(UserWarning): """Warning class used to notify the user of any change in the behavior. .. versionchanged:: 0.18 - Moved from sklearn.base. + Moved from .base. """ @@ -49,7 +49,7 @@ class ConvergenceWarning(UserWarning): """Custom warning to capture convergence problems .. versionchanged:: 0.18 - Moved from sklearn.utils. + Moved from .utils. """ @@ -67,7 +67,7 @@ class DataConversionWarning(UserWarning): - passes an input whose shape can be interpreted ambiguously. .. versionchanged:: 0.18 - Moved from sklearn.utils.validation. + Moved from .utils.validation. """ @@ -81,7 +81,7 @@ class DataDimensionalityWarning(UserWarning): dimensionality of the problem will not be reduced. .. versionchanged:: 0.18 - Moved from sklearn.utils. + Moved from .utils. """ @@ -124,7 +124,7 @@ class FitFailedWarning(RuntimeWarning): Details: \\nValueError: Penalty term must be positive; got (C=-2)\\n'...) .. versionchanged:: 0.18 - Moved from sklearn.cross_validation. + Moved from .cross_validation. """ @@ -135,7 +135,7 @@ class NonBLASDotWarning(EfficiencyWarning): operation and hence the efficiency may be affected. .. versionchanged:: 0.18 - Moved from sklearn.utils.validation, extends EfficiencyWarning. + Moved from .utils.validation, extends EfficiencyWarning. """ @@ -152,5 +152,5 @@ class UndefinedMetricWarning(UserWarning): """Warning used when the metric is invalid .. versionchanged:: 0.18 - Moved from sklearn.base. + Moved from .base. """ diff --git a/sklearn/externals/copy_joblib.sh b/sklearn/externals/copy_joblib.sh index f2c4ab3ed359b..750ab459f942b 100755 --- a/sklearn/externals/copy_joblib.sh +++ b/sklearn/externals/copy_joblib.sh @@ -18,7 +18,7 @@ rm -rf $INSTALL_FOLDER # Needed to rewrite the doctests # Note: BSD sed -i needs an argument unders OSX # so first renaming to .bak and then deleting backup files -find joblib -name "*.py" | xargs sed -i.bak "s/from joblib/from sklearn.externals.joblib/" +find joblib -name "*.py" | xargs sed -i.bak "s/from joblib/from .joblib/" find joblib -name "*.bak" | xargs rm # Remove the tests folders to speed-up test time for scikit-learn. diff --git a/sklearn/externals/joblib/externals/loky/backend/reduction.py b/sklearn/externals/joblib/externals/loky/backend/reduction.py index 2a8347590a67e..897d587221ed7 100644 --- a/sklearn/externals/joblib/externals/loky/backend/reduction.py +++ b/sklearn/externals/joblib/externals/loky/backend/reduction.py @@ -122,7 +122,7 @@ def _rebuild_partial(func, args, keywords): # global variable to change the pickler behavior try: - from sklearn.externals.joblib.externals import cloudpickle # noqa: F401 + from ....externals import cloudpickle # noqa: F401 DEFAULT_ENV = "cloudpickle" except ImportError: # If cloudpickle is not present, fallback to pickle @@ -149,7 +149,7 @@ def set_loky_pickler(loky_pickler=None): return if loky_pickler == "cloudpickle": - from sklearn.externals.joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls + from ....externals.cloudpickle import CloudPickler as loky_pickler_cls else: try: from importlib import import_module diff --git a/sklearn/externals/joblib/externals/loky/backend/spawn.py b/sklearn/externals/joblib/externals/loky/backend/spawn.py index d92d189ddc193..12cae52f16040 100644 --- a/sklearn/externals/joblib/externals/loky/backend/spawn.py +++ b/sklearn/externals/joblib/externals/loky/backend/spawn.py @@ -12,7 +12,7 @@ import types from multiprocessing import process, util -from sklearn.externals.joblib.externals.loky.backend import context +from . import context if sys.platform != 'win32': diff --git a/sklearn/externals/joblib/externals/loky/cloudpickle_wrapper.py b/sklearn/externals/joblib/externals/loky/cloudpickle_wrapper.py index 9edf9240f21f4..31befe7bf052a 100644 --- a/sklearn/externals/joblib/externals/loky/cloudpickle_wrapper.py +++ b/sklearn/externals/joblib/externals/loky/cloudpickle_wrapper.py @@ -2,7 +2,7 @@ from functools import partial try: - from sklearn.externals.joblib.externals.cloudpickle import dumps, loads + from ..cloudpickle import dumps, loads cloudpickle = True except ImportError: cloudpickle = False @@ -93,7 +93,7 @@ def wrap_non_picklable_objects(obj, keep_wrapper=True): complex classes. """ if not cloudpickle: - raise ImportError("could not from sklearn.externals.joblib.externals import cloudpickle. Please install " + raise ImportError("could not from .. import cloudpickle. Please install " "cloudpickle to allow extended serialization. " "(`pip install cloudpickle`).") diff --git a/sklearn/feature_extraction/_hashing.pyx b/sklearn/feature_extraction/_hashing.pyx index 9fa5828a9e715..ad6fdfb852052 100644 --- a/sklearn/feature_extraction/_hashing.pyx +++ b/sklearn/feature_extraction/_hashing.pyx @@ -11,8 +11,8 @@ from libc.stdlib cimport abs cimport numpy as np import numpy as np -from sklearn.utils.murmurhash cimport murmurhash3_bytes_s32 -from sklearn.utils.fixes import sp_version +from ..utils.murmurhash cimport murmurhash3_bytes_s32 +from ..utils.fixes import sp_version np.import_array() diff --git a/sklearn/gaussian_process/gpc.py b/sklearn/gaussian_process/gpc.py index bca6bc506de32..b366c39d4522d 100644 --- a/sklearn/gaussian_process/gpc.py +++ b/sklearn/gaussian_process/gpc.py @@ -12,14 +12,14 @@ from scipy.optimize import fmin_l_bfgs_b from scipy.special import erf, expit -from sklearn.base import BaseEstimator, ClassifierMixin, clone -from sklearn.gaussian_process.kernels \ +from ..base import BaseEstimator, ClassifierMixin, clone +from .kernels \ import RBF, CompoundKernel, ConstantKernel as C -from sklearn.utils.validation import check_X_y, check_is_fitted, check_array -from sklearn.utils import check_random_state -from sklearn.preprocessing import LabelEncoder -from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier -from sklearn.exceptions import ConvergenceWarning +from ..utils.validation import check_X_y, check_is_fitted, check_array +from ..utils import check_random_state +from ..preprocessing import LabelEncoder +from ..multiclass import OneVsRestClassifier, OneVsOneClassifier +from ..exceptions import ConvergenceWarning # Values required for approximating the logistic sigmoid by diff --git a/sklearn/gaussian_process/gpr.py b/sklearn/gaussian_process/gpr.py index c2c1884a50d7b..4762f8b1cc368 100644 --- a/sklearn/gaussian_process/gpr.py +++ b/sklearn/gaussian_process/gpr.py @@ -11,12 +11,12 @@ from scipy.linalg import cholesky, cho_solve, solve_triangular from scipy.optimize import fmin_l_bfgs_b -from sklearn.base import BaseEstimator, RegressorMixin, clone -from sklearn.base import MultiOutputMixin -from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C -from sklearn.utils import check_random_state -from sklearn.utils.validation import check_X_y, check_array -from sklearn.exceptions import ConvergenceWarning +from ..base import BaseEstimator, RegressorMixin, clone +from ..base import MultiOutputMixin +from .kernels import RBF, ConstantKernel as C +from ..utils import check_random_state +from ..utils.validation import check_X_y, check_array +from ..exceptions import ConvergenceWarning class GaussianProcessRegressor(BaseEstimator, RegressorMixin, diff --git a/sklearn/linear_model/cd_fast.pyx b/sklearn/linear_model/cd_fast.pyx index 6c58c94eb5d43..ad0fa4277f3be 100644 --- a/sklearn/linear_model/cd_fast.pyx +++ b/sklearn/linear_model/cd_fast.pyx @@ -24,7 +24,7 @@ from ..utils._cython_blas cimport (_axpy, _dot, _asum, _ger, _gemv, _nrm2, from ..utils._cython_blas cimport RowMajor, ColMajor, Trans, NoTrans -from sklearn.utils cimport _random +from ..utils cimport _random ctypedef np.float64_t DOUBLE ctypedef np.uint32_t UINT32_t diff --git a/sklearn/linear_model/logistic.py b/sklearn/linear_model/logistic.py index be664d5b5c087..b419e09c135cd 100644 --- a/sklearn/linear_model/logistic.py +++ b/sklearn/linear_model/logistic.py @@ -1304,7 +1304,7 @@ class LogisticRegression(BaseEstimator, LinearClassifierMixin, Note that 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can - preprocess the data with a scaler from sklearn.preprocessing. + preprocess the data with a scaler from ..preprocessing. .. versionadded:: 0.17 Stochastic Average Gradient descent solver. @@ -1758,7 +1758,7 @@ class LogisticRegressionCV(LogisticRegression, BaseEstimator, Note that 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data - with a scaler from sklearn.preprocessing. + with a scaler from ..preprocessing. .. versionadded:: 0.17 Stochastic Average Gradient descent solver. diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 9d96f48e93b9b..6373c9fd21f39 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -286,7 +286,7 @@ def ridge_regression(X, y, alpha, sample_weight=None, solver='auto', both n_samples and n_features are large. Note that 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a - scaler from sklearn.preprocessing. + scaler from ..preprocessing. All last five solvers support both dense and sparse data. However, only @@ -650,7 +650,7 @@ class Ridge(_BaseRidge, RegressorMixin): both n_samples and n_features are large. Note that 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a - scaler from sklearn.preprocessing. + scaler from ..preprocessing. All last five solvers support both dense and sparse data. However, only 'sag' and 'sparse_cg' supports sparse input when `fit_intercept` is @@ -809,7 +809,7 @@ class RidgeClassifier(LinearClassifierMixin, _BaseRidge): when both n_samples and n_features are large. Note that 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a - scaler from sklearn.preprocessing. + scaler from ..preprocessing. .. versionadded:: 0.17 Stochastic Average Gradient descent solver. diff --git a/sklearn/linear_model/sgd_fast.pyx b/sklearn/linear_model/sgd_fast.pyx index f5f3173a36d67..0156d0d41a199 100644 --- a/sklearn/linear_model/sgd_fast.pyx +++ b/sklearn/linear_model/sgd_fast.pyx @@ -21,8 +21,8 @@ from numpy.math cimport INFINITY cdef extern from "sgd_fast_helpers.h": bint skl_isfinite(double) nogil -from sklearn.utils.weight_vector cimport WeightVector -from sklearn.utils.seq_dataset cimport SequentialDataset64 as SequentialDataset +from ..utils.weight_vector cimport WeightVector +from ..utils.seq_dataset cimport SequentialDataset64 as SequentialDataset np.import_array() diff --git a/sklearn/manifold/_barnes_hut_tsne.pyx b/sklearn/manifold/_barnes_hut_tsne.pyx index a9885b1d3ed59..bc51da01f26ed 100644 --- a/sklearn/manifold/_barnes_hut_tsne.pyx +++ b/sklearn/manifold/_barnes_hut_tsne.pyx @@ -15,7 +15,7 @@ from libc.math cimport sqrt, log import numpy as np cimport numpy as np -from sklearn.neighbors.quad_tree cimport _QuadTree +from ..neighbors.quad_tree cimport _QuadTree cdef char* EMPTY_STRING = "" diff --git a/sklearn/metrics/cluster/bicluster.py b/sklearn/metrics/cluster/bicluster.py index be8379cb34bbb..515785f3e68f2 100644 --- a/sklearn/metrics/cluster/bicluster.py +++ b/sklearn/metrics/cluster/bicluster.py @@ -1,8 +1,8 @@ import numpy as np -from sklearn.utils.linear_assignment_ import linear_assignment -from sklearn.utils.validation import check_consistent_length, check_array +from ...utils.linear_assignment_ import linear_assignment +from ...utils.validation import check_consistent_length, check_array __all__ = ["consensus_score"] diff --git a/sklearn/metrics/cluster/expected_mutual_info_fast.pyx b/sklearn/metrics/cluster/expected_mutual_info_fast.pyx index a314abb10620d..9ed2c9a24b6c3 100644 --- a/sklearn/metrics/cluster/expected_mutual_info_fast.pyx +++ b/sklearn/metrics/cluster/expected_mutual_info_fast.pyx @@ -11,7 +11,7 @@ from scipy.special import gammaln import numpy as np cimport numpy as np cimport cython -from sklearn.utils.lgamma cimport lgamma +from ...utils.lgamma cimport lgamma np.import_array() ctypedef np.float64_t DOUBLE diff --git a/sklearn/metrics/cluster/unsupervised.py b/sklearn/metrics/cluster/unsupervised.py index 6f02fef0be57a..05206ab42a291 100644 --- a/sklearn/metrics/cluster/unsupervised.py +++ b/sklearn/metrics/cluster/unsupervised.py @@ -16,7 +16,7 @@ from ..pairwise import pairwise_distances_chunked from ..pairwise import pairwise_distances from ...preprocessing import LabelEncoder -from sklearn.utils import deprecated +from ...utils import deprecated def check_number_of_labels(n_labels, n_samples): diff --git a/sklearn/model_selection/_split.py b/sklearn/model_selection/_split.py index de8511f9922c1..e99c85fbf09ca 100644 --- a/sklearn/model_selection/_split.py +++ b/sklearn/model_selection/_split.py @@ -2056,7 +2056,7 @@ def check_cv(cv='warn', y=None, classifier=False): if not hasattr(cv, 'split') or isinstance(cv, str): if not isinstance(cv, Iterable) or isinstance(cv, str): raise ValueError("Expected cv as an integer, cross-validation " - "object (from sklearn.model_selection) " + "object (from .) " "or an iterable. Got %s." % cv) return _CVIterableWrapper(cv) diff --git a/sklearn/neighbors/binary_tree.pxi b/sklearn/neighbors/binary_tree.pxi index 9b18b1538dcd4..057f869fdefba 100755 --- a/sklearn/neighbors/binary_tree.pxi +++ b/sklearn/neighbors/binary_tree.pxi @@ -147,7 +147,7 @@ cimport numpy as np from libc.math cimport fabs, sqrt, exp, cos, pow, log from libc.stdlib cimport calloc, malloc, free from libc.string cimport memcpy -from sklearn.utils.lgamma cimport lgamma +from ..utils.lgamma cimport lgamma import numpy as np import warnings diff --git a/sklearn/neighbors/quad_tree.pyx b/sklearn/neighbors/quad_tree.pyx index b70d8f968a3b6..c2a8df44a1d80 100644 --- a/sklearn/neighbors/quad_tree.pyx +++ b/sklearn/neighbors/quad_tree.pyx @@ -12,7 +12,7 @@ from libc.stdlib cimport malloc, free from libc.string cimport memcpy from libc.stdio cimport printf -from sklearn.tree._utils cimport safe_realloc, sizet_ptr_to_ndarray +from ..tree._utils cimport safe_realloc, sizet_ptr_to_ndarray from ..utils import check_array import numpy as np diff --git a/sklearn/setup.py b/sklearn/setup.py index 80eea57fb6fbe..f714dbee68a29 100644 --- a/sklearn/setup.py +++ b/sklearn/setup.py @@ -1,6 +1,6 @@ import os -from sklearn._build_utils import maybe_cythonize_extensions +from ._build_utils import maybe_cythonize_extensions def configuration(parent_package='', top_path=None): diff --git a/sklearn/tree/_utils.pxd b/sklearn/tree/_utils.pxd index a861c233c7dbf..044b5d91d2b45 100644 --- a/sklearn/tree/_utils.pxd +++ b/sklearn/tree/_utils.pxd @@ -11,7 +11,7 @@ import numpy as np cimport numpy as np from ._tree cimport Node -from sklearn.neighbors.quad_tree cimport Cell +from ..neighbors.quad_tree cimport Cell ctypedef np.npy_float32 DTYPE_t # Type of X ctypedef np.npy_float64 DOUBLE_t # Type of y, sample_weight diff --git a/sklearn/tree/_utils.pyx b/sklearn/tree/_utils.pyx index 2771821a9f273..fdbd48e75f3a9 100644 --- a/sklearn/tree/_utils.pyx +++ b/sklearn/tree/_utils.pyx @@ -20,7 +20,7 @@ import numpy as np cimport numpy as np np.import_array() -from sklearn.utils cimport _random +from ..utils cimport _random # ============================================================================= # Helper functions diff --git a/sklearn/utils/_random.pyx b/sklearn/utils/_random.pyx index 1dcf924e3a90b..ad01a7b46a409 100644 --- a/sklearn/utils/_random.pyx +++ b/sklearn/utils/_random.pyx @@ -19,7 +19,7 @@ import numpy as np cimport numpy as np np.import_array() -from sklearn.utils import check_random_state +from . import check_random_state cdef UINT32_t DEFAULT_SEED = 1 diff --git a/sklearn/utils/estimator_checks.py b/sklearn/utils/estimator_checks.py index 73c98ea4685be..b0d1b20f57f83 100644 --- a/sklearn/utils/estimator_checks.py +++ b/sklearn/utils/estimator_checks.py @@ -11,51 +11,51 @@ from scipy import sparse from scipy.stats import rankdata -from sklearn.utils import IS_PYPY -from sklearn.utils import _joblib -from sklearn.utils.testing import assert_raises, _get_args -from sklearn.utils.testing import assert_raises_regex -from sklearn.utils.testing import assert_raise_message -from sklearn.utils.testing import assert_equal -from sklearn.utils.testing import assert_not_equal -from sklearn.utils.testing import assert_in -from sklearn.utils.testing import assert_array_equal -from sklearn.utils.testing import assert_array_almost_equal -from sklearn.utils.testing import assert_allclose -from sklearn.utils.testing import assert_allclose_dense_sparse -from sklearn.utils.testing import assert_warns_message -from sklearn.utils.testing import set_random_state -from sklearn.utils.testing import assert_greater -from sklearn.utils.testing import assert_greater_equal -from sklearn.utils.testing import SkipTest -from sklearn.utils.testing import ignore_warnings -from sklearn.utils.testing import assert_dict_equal -from sklearn.utils.testing import create_memmap_backed_data -from sklearn.utils import is_scalar_nan -from sklearn.discriminant_analysis import LinearDiscriminantAnalysis -from sklearn.linear_model import Ridge - - -from sklearn.base import (clone, ClusterMixin, is_classifier, is_regressor, +from . import IS_PYPY +from . import _joblib +from .testing import assert_raises, _get_args +from .testing import assert_raises_regex +from .testing import assert_raise_message +from .testing import assert_equal +from .testing import assert_not_equal +from .testing import assert_in +from .testing import assert_array_equal +from .testing import assert_array_almost_equal +from .testing import assert_allclose +from .testing import assert_allclose_dense_sparse +from .testing import assert_warns_message +from .testing import set_random_state +from .testing import assert_greater +from .testing import assert_greater_equal +from .testing import SkipTest +from .testing import ignore_warnings +from .testing import assert_dict_equal +from .testing import create_memmap_backed_data +from . import is_scalar_nan +from ..discriminant_analysis import LinearDiscriminantAnalysis +from ..linear_model import Ridge + + +from ..base import (clone, ClusterMixin, is_classifier, is_regressor, _DEFAULT_TAGS, RegressorMixin, is_outlier_detector) -from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score - -from sklearn.random_projection import BaseRandomProjection -from sklearn.feature_selection import SelectKBest -from sklearn.pipeline import make_pipeline -from sklearn.exceptions import DataConversionWarning -from sklearn.exceptions import SkipTestWarning -from sklearn.model_selection import train_test_split -from sklearn.model_selection import ShuffleSplit -from sklearn.model_selection._validation import _safe_split -from sklearn.metrics.pairwise import (rbf_kernel, linear_kernel, +from ..metrics import accuracy_score, adjusted_rand_score, f1_score + +from ..random_projection import BaseRandomProjection +from ..feature_selection import SelectKBest +from ..pipeline import make_pipeline +from ..exceptions import DataConversionWarning +from ..exceptions import SkipTestWarning +from ..model_selection import train_test_split +from ..model_selection import ShuffleSplit +from ..model_selection._validation import _safe_split +from ..metrics.pairwise import (rbf_kernel, linear_kernel, pairwise_distances) -from sklearn.utils import shuffle -from sklearn.utils.validation import has_fit_parameter, _num_samples -from sklearn.preprocessing import StandardScaler -from sklearn.datasets import load_iris, load_boston, make_blobs +from .import shuffle +from .validation import has_fit_parameter, _num_samples +from ..preprocessing import StandardScaler +from ..datasets import load_iris, load_boston, make_blobs BOSTON = None @@ -274,7 +274,7 @@ def check_estimator(Estimator): shapes, etc. Additional tests for classifiers, regressors, clustering or transformers will be run if the Estimator class inherits from the corresponding mixin - from sklearn.base. + from ..base. This test can be applied to classes or instances. Classes currently have some additional tests that related to construction, diff --git a/sklearn/utils/random.py b/sklearn/utils/random.py index 7efbba37d548e..7792602ab3746 100644 --- a/sklearn/utils/random.py +++ b/sklearn/utils/random.py @@ -5,7 +5,7 @@ import scipy.sparse as sp import array -from sklearn.utils import check_random_state +from . import check_random_state from ._random import sample_without_replacement __all__ = ['sample_without_replacement'] diff --git a/sklearn/utils/seq_dataset.pyx.tp b/sklearn/utils/seq_dataset.pyx.tp index c5540352bc050..f1b34c4c86bce 100644 --- a/sklearn/utils/seq_dataset.pyx.tp +++ b/sklearn/utils/seq_dataset.pyx.tp @@ -45,7 +45,7 @@ import numpy as np np.import_array() -from sklearn.utils cimport _random +from . cimport _random cdef class SequentialDataset{{name}}: """Base class for datasets with sequential data access. diff --git a/sklearn/utils/stats.py b/sklearn/utils/stats.py index ff770afa55ad6..5a8a136305179 100644 --- a/sklearn/utils/stats.py +++ b/sklearn/utils/stats.py @@ -1,6 +1,6 @@ import numpy as np -from sklearn.utils.extmath import stable_cumsum +from .extmath import stable_cumsum def _weighted_percentile(array, sample_weight, percentile=50): From 49e09ca824d3c5fe4d66c320aa6f3b96fe10ddfd Mon Sep 17 00:00:00 2001 From: Aditya Vyas Date: Tue, 16 Apr 2019 00:02:22 -0400 Subject: [PATCH 2/8] fix import error --- sklearn/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/setup.py b/sklearn/setup.py index f714dbee68a29..012c8ecbd94a8 100644 --- a/sklearn/setup.py +++ b/sklearn/setup.py @@ -1,6 +1,6 @@ import os -from ._build_utils import maybe_cythonize_extensions +from _build_utils import maybe_cythonize_extensions def configuration(parent_package='', top_path=None): From 1be879d772b6aa38586433ffdd2e515bd904f8a7 Mon Sep 17 00:00:00 2001 From: Aditya Vyas Date: Tue, 16 Apr 2019 00:41:42 -0400 Subject: [PATCH 3/8] add a . before import --- sklearn/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/setup.py b/sklearn/setup.py index 012c8ecbd94a8..f714dbee68a29 100644 --- a/sklearn/setup.py +++ b/sklearn/setup.py @@ -1,6 +1,6 @@ import os -from _build_utils import maybe_cythonize_extensions +from ._build_utils import maybe_cythonize_extensions def configuration(parent_package='', top_path=None): From fdb55571de8fd0b24e5fb3015edc1a0dd4193ba0 Mon Sep 17 00:00:00 2001 From: Aditya Vyas Date: Tue, 16 Apr 2019 11:53:59 -0400 Subject: [PATCH 4/8] revert changes in docstrings --- sklearn/exceptions.py | 16 ++++++++-------- sklearn/linear_model/logistic.py | 2 +- sklearn/linear_model/ridge.py | 6 +++--- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/sklearn/exceptions.py b/sklearn/exceptions.py index 4468a464c727e..9cf207e40fdd6 100644 --- a/sklearn/exceptions.py +++ b/sklearn/exceptions.py @@ -33,7 +33,7 @@ class NotFittedError(ValueError, AttributeError): NotFittedError('This LinearSVC instance is not fitted yet'...) .. versionchanged:: 0.18 - Moved from .utils.validation. + Moved from sklearn.utils.validation. """ @@ -41,7 +41,7 @@ class ChangedBehaviorWarning(UserWarning): """Warning class used to notify the user of any change in the behavior. .. versionchanged:: 0.18 - Moved from .base. + Moved from sklearn.base. """ @@ -49,7 +49,7 @@ class ConvergenceWarning(UserWarning): """Custom warning to capture convergence problems .. versionchanged:: 0.18 - Moved from .utils. + Moved from sklearn.utils. """ @@ -67,7 +67,7 @@ class DataConversionWarning(UserWarning): - passes an input whose shape can be interpreted ambiguously. .. versionchanged:: 0.18 - Moved from .utils.validation. + Moved from sklearn.utils.validation. """ @@ -81,7 +81,7 @@ class DataDimensionalityWarning(UserWarning): dimensionality of the problem will not be reduced. .. versionchanged:: 0.18 - Moved from .utils. + Moved from sklearn.utils. """ @@ -124,7 +124,7 @@ class FitFailedWarning(RuntimeWarning): Details: \\nValueError: Penalty term must be positive; got (C=-2)\\n'...) .. versionchanged:: 0.18 - Moved from .cross_validation. + Moved from sklearn.cross_validation. """ @@ -135,7 +135,7 @@ class NonBLASDotWarning(EfficiencyWarning): operation and hence the efficiency may be affected. .. versionchanged:: 0.18 - Moved from .utils.validation, extends EfficiencyWarning. + Moved from sklearn.utils.validation, extends EfficiencyWarning. """ @@ -152,5 +152,5 @@ class UndefinedMetricWarning(UserWarning): """Warning used when the metric is invalid .. versionchanged:: 0.18 - Moved from .base. + Moved from sklearn.base. """ diff --git a/sklearn/linear_model/logistic.py b/sklearn/linear_model/logistic.py index b419e09c135cd..c282f88dbce3a 100644 --- a/sklearn/linear_model/logistic.py +++ b/sklearn/linear_model/logistic.py @@ -1304,7 +1304,7 @@ class LogisticRegression(BaseEstimator, LinearClassifierMixin, Note that 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can - preprocess the data with a scaler from ..preprocessing. + .preprocess the data with a scaler from sklearn.preprocessing. .. versionadded:: 0.17 Stochastic Average Gradient descent solver. diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 6373c9fd21f39..9d96f48e93b9b 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -286,7 +286,7 @@ def ridge_regression(X, y, alpha, sample_weight=None, solver='auto', both n_samples and n_features are large. Note that 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a - scaler from ..preprocessing. + scaler from sklearn.preprocessing. All last five solvers support both dense and sparse data. However, only @@ -650,7 +650,7 @@ class Ridge(_BaseRidge, RegressorMixin): both n_samples and n_features are large. Note that 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a - scaler from ..preprocessing. + scaler from sklearn.preprocessing. All last five solvers support both dense and sparse data. However, only 'sag' and 'sparse_cg' supports sparse input when `fit_intercept` is @@ -809,7 +809,7 @@ class RidgeClassifier(LinearClassifierMixin, _BaseRidge): when both n_samples and n_features are large. Note that 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a - scaler from ..preprocessing. + scaler from sklearn.preprocessing. .. versionadded:: 0.17 Stochastic Average Gradient descent solver. From 2f6ec3d71ca975939482092f089b59caff44cbef Mon Sep 17 00:00:00 2001 From: Aditya Vyas Date: Tue, 16 Apr 2019 13:06:48 -0400 Subject: [PATCH 5/8] remove . before _build_utils --- sklearn/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/setup.py b/sklearn/setup.py index f714dbee68a29..012c8ecbd94a8 100644 --- a/sklearn/setup.py +++ b/sklearn/setup.py @@ -1,6 +1,6 @@ import os -from ._build_utils import maybe_cythonize_extensions +from _build_utils import maybe_cythonize_extensions def configuration(parent_package='', top_path=None): From 2d36952b35bd5ce189dc982579e12d8a0c4c0747 Mon Sep 17 00:00:00 2001 From: Aditya Vyas Date: Tue, 16 Apr 2019 16:04:10 -0400 Subject: [PATCH 6/8] remove relative imports from docstrings - 2 --- sklearn/linear_model/logistic.py | 4 ++-- sklearn/model_selection/_split.py | 2 +- sklearn/setup.py | 2 +- sklearn/utils/estimator_checks.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sklearn/linear_model/logistic.py b/sklearn/linear_model/logistic.py index c282f88dbce3a..be664d5b5c087 100644 --- a/sklearn/linear_model/logistic.py +++ b/sklearn/linear_model/logistic.py @@ -1304,7 +1304,7 @@ class LogisticRegression(BaseEstimator, LinearClassifierMixin, Note that 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can - .preprocess the data with a scaler from sklearn.preprocessing. + preprocess the data with a scaler from sklearn.preprocessing. .. versionadded:: 0.17 Stochastic Average Gradient descent solver. @@ -1758,7 +1758,7 @@ class LogisticRegressionCV(LogisticRegression, BaseEstimator, Note that 'sag' and 'saga' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data - with a scaler from ..preprocessing. + with a scaler from sklearn.preprocessing. .. versionadded:: 0.17 Stochastic Average Gradient descent solver. diff --git a/sklearn/model_selection/_split.py b/sklearn/model_selection/_split.py index e99c85fbf09ca..de8511f9922c1 100644 --- a/sklearn/model_selection/_split.py +++ b/sklearn/model_selection/_split.py @@ -2056,7 +2056,7 @@ def check_cv(cv='warn', y=None, classifier=False): if not hasattr(cv, 'split') or isinstance(cv, str): if not isinstance(cv, Iterable) or isinstance(cv, str): raise ValueError("Expected cv as an integer, cross-validation " - "object (from .) " + "object (from sklearn.model_selection) " "or an iterable. Got %s." % cv) return _CVIterableWrapper(cv) diff --git a/sklearn/setup.py b/sklearn/setup.py index 012c8ecbd94a8..80eea57fb6fbe 100644 --- a/sklearn/setup.py +++ b/sklearn/setup.py @@ -1,6 +1,6 @@ import os -from _build_utils import maybe_cythonize_extensions +from sklearn._build_utils import maybe_cythonize_extensions def configuration(parent_package='', top_path=None): diff --git a/sklearn/utils/estimator_checks.py b/sklearn/utils/estimator_checks.py index b0d1b20f57f83..26360c1ef07c1 100644 --- a/sklearn/utils/estimator_checks.py +++ b/sklearn/utils/estimator_checks.py @@ -274,7 +274,7 @@ def check_estimator(Estimator): shapes, etc. Additional tests for classifiers, regressors, clustering or transformers will be run if the Estimator class inherits from the corresponding mixin - from ..base. + from sklearn.base. This test can be applied to classes or instances. Classes currently have some additional tests that related to construction, From 864062c1149dee33d1ebaba57bd25dd9a56edcc4 Mon Sep 17 00:00:00 2001 From: Aditya Vyas Date: Wed, 17 Apr 2019 11:43:36 -0400 Subject: [PATCH 7/8] revert import changes in joblib folder --- sklearn/externals/joblib/externals/loky/backend/reduction.py | 4 ++-- sklearn/externals/joblib/externals/loky/backend/spawn.py | 2 +- .../externals/joblib/externals/loky/cloudpickle_wrapper.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sklearn/externals/joblib/externals/loky/backend/reduction.py b/sklearn/externals/joblib/externals/loky/backend/reduction.py index 897d587221ed7..2a8347590a67e 100644 --- a/sklearn/externals/joblib/externals/loky/backend/reduction.py +++ b/sklearn/externals/joblib/externals/loky/backend/reduction.py @@ -122,7 +122,7 @@ def _rebuild_partial(func, args, keywords): # global variable to change the pickler behavior try: - from ....externals import cloudpickle # noqa: F401 + from sklearn.externals.joblib.externals import cloudpickle # noqa: F401 DEFAULT_ENV = "cloudpickle" except ImportError: # If cloudpickle is not present, fallback to pickle @@ -149,7 +149,7 @@ def set_loky_pickler(loky_pickler=None): return if loky_pickler == "cloudpickle": - from ....externals.cloudpickle import CloudPickler as loky_pickler_cls + from sklearn.externals.joblib.externals.cloudpickle import CloudPickler as loky_pickler_cls else: try: from importlib import import_module diff --git a/sklearn/externals/joblib/externals/loky/backend/spawn.py b/sklearn/externals/joblib/externals/loky/backend/spawn.py index 12cae52f16040..d92d189ddc193 100644 --- a/sklearn/externals/joblib/externals/loky/backend/spawn.py +++ b/sklearn/externals/joblib/externals/loky/backend/spawn.py @@ -12,7 +12,7 @@ import types from multiprocessing import process, util -from . import context +from sklearn.externals.joblib.externals.loky.backend import context if sys.platform != 'win32': diff --git a/sklearn/externals/joblib/externals/loky/cloudpickle_wrapper.py b/sklearn/externals/joblib/externals/loky/cloudpickle_wrapper.py index 31befe7bf052a..9edf9240f21f4 100644 --- a/sklearn/externals/joblib/externals/loky/cloudpickle_wrapper.py +++ b/sklearn/externals/joblib/externals/loky/cloudpickle_wrapper.py @@ -2,7 +2,7 @@ from functools import partial try: - from ..cloudpickle import dumps, loads + from sklearn.externals.joblib.externals.cloudpickle import dumps, loads cloudpickle = True except ImportError: cloudpickle = False @@ -93,7 +93,7 @@ def wrap_non_picklable_objects(obj, keep_wrapper=True): complex classes. """ if not cloudpickle: - raise ImportError("could not from .. import cloudpickle. Please install " + raise ImportError("could not from sklearn.externals.joblib.externals import cloudpickle. Please install " "cloudpickle to allow extended serialization. " "(`pip install cloudpickle`).") From 456010583f0f115e912225f7e004baa7671b4f16 Mon Sep 17 00:00:00 2001 From: Aditya Vyas Date: Wed, 17 Apr 2019 11:44:49 -0400 Subject: [PATCH 8/8] revert changes in copy_joblib.sh --- sklearn/externals/copy_joblib.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/externals/copy_joblib.sh b/sklearn/externals/copy_joblib.sh index 750ab459f942b..f2c4ab3ed359b 100755 --- a/sklearn/externals/copy_joblib.sh +++ b/sklearn/externals/copy_joblib.sh @@ -18,7 +18,7 @@ rm -rf $INSTALL_FOLDER # Needed to rewrite the doctests # Note: BSD sed -i needs an argument unders OSX # so first renaming to .bak and then deleting backup files -find joblib -name "*.py" | xargs sed -i.bak "s/from joblib/from .joblib/" +find joblib -name "*.py" | xargs sed -i.bak "s/from joblib/from sklearn.externals.joblib/" find joblib -name "*.bak" | xargs rm # Remove the tests folders to speed-up test time for scikit-learn.