diff --git a/.circleci/config.yml b/.circleci/config.yml index b5f679af6..8990d3f22 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -24,6 +24,7 @@ jobs: - NUMPYDOC_VERSION: 'latest' - SPHINXCONTRIB_BIBTEX_VERSION: 'latest' - PYDATA_SPHINX_THEME_VERSION: 'latest' + - SPHINX_DESIGN_VERSION: 'latest' steps: - add_ssh_keys: fingerprints: diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 5c4218dec..98f2b4e11 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -255,7 +255,7 @@ jobs: - template: build_tools/azure/posix.yml parameters: name: macOS - vmImage: macOS-11 + vmImage: macOS-12 dependsOn: [linting, git_commit] condition: | and( diff --git a/build_tools/circle/build_doc.sh b/build_tools/circle/build_doc.sh index 32699e8a8..9601b44aa 100755 --- a/build_tools/circle/build_doc.sh +++ b/build_tools/circle/build_doc.sh @@ -114,6 +114,7 @@ mamba create -n $CONDA_ENV_NAME --yes --quiet \ "$(get_dep sphinxcontrib-bibtex $SPHINXCONTRIB_BIBTEX_VERSION)" \ "$(get_dep sphinx-copybutton $SPHINXCONTRIB_BIBTEX_VERSION)" \ "$(get_dep pydata-sphinx-theme $PYDATA_SPHINX_THEME_VERSION)" \ + "$(get_dep sphinx-design $SPHINX_DESIGN_VERSION)" \ memory_profiler packaging seaborn pytest coverage compilers tensorflow source activate $CONDA_ENV_NAME diff --git a/conftest.py b/conftest.py index 45a5ce679..0dc6e5a23 100644 --- a/conftest.py +++ b/conftest.py @@ -7,7 +7,14 @@ import os +import numpy as np import pytest +from sklearn.utils.fixes import parse_version + +# use legacy numpy print options to avoid failures due to NumPy 2.+ scalar +# representation +if parse_version(np.__version__) > parse_version("2.0.0"): + np.set_printoptions(legacy="1.25") def pytest_runtest_setup(item): diff --git a/doc/_static/css/imbalanced-learn.css b/doc/_static/css/imbalanced-learn.css index 6c778540b..3778ee94c 100644 --- a/doc/_static/css/imbalanced-learn.css +++ b/doc/_static/css/imbalanced-learn.css @@ -21,39 +21,44 @@ /* Override some aspects of the pydata-sphinx-theme */ -/* Getting started index page */ +/* Main index page overview cards */ .intro-card { - background: #fff; - border-radius: 0; - padding: 30px 10px 10px 10px; - margin: 10px 0px; -} - -.intro-card .card-text { - margin: 20px 0px; - /*min-height: 150px; */ -} - -.custom-button { - background-color: #dcdcdc; - border: none; - color: #484848; - text-align: center; - text-decoration: none; - display: inline-block; - font-size: 0.9rem; - border-radius: 0.5rem; + padding: 30px 10px 20px 10px; +} + +.intro-card .sd-card-img-top { + margin: 10px; + height: 52px; + background: none !important; +} + +.intro-card .sd-card-title { + color: var(--pst-color-primary); + font-size: var(--pst-font-size-h5); + padding: 1rem 0rem 0.5rem 0rem; +} + +.intro-card .sd-card-footer { + border: none !important; +} + +.intro-card .sd-card-footer p.sd-card-text { max-width: 220px; - padding: 0.5rem 0rem; + margin-left: auto; + margin-right: auto; +} + +.intro-card .sd-btn-secondary { + background-color: #6c757d !important; + border-color: #6c757d !important; } -.custom-button a { - color: #484848; +.intro-card .sd-btn-secondary:hover { + background-color: #5a6268 !important; + border-color: #545b62 !important; } -.custom-button p { - margin-top: 0; - margin-bottom: 0rem; - color: #484848; +.card, .card img { + background-color: var(--pst-color-background); } diff --git a/doc/_static/img/logo_wide_dark.png b/doc/_static/img/logo_wide_dark.png new file mode 100644 index 000000000..38f997886 Binary files /dev/null and b/doc/_static/img/logo_wide_dark.png differ diff --git a/doc/_static/index_api.svg b/doc/_static/index_api.svg new file mode 100644 index 000000000..69f7ba1d2 --- /dev/null +++ b/doc/_static/index_api.svg @@ -0,0 +1,97 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + diff --git a/doc/_static/index_examples.svg b/doc/_static/index_examples.svg new file mode 100644 index 000000000..de3d90237 --- /dev/null +++ b/doc/_static/index_examples.svg @@ -0,0 +1,76 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + + + diff --git a/doc/_static/index_getting_started.svg b/doc/_static/index_getting_started.svg new file mode 100644 index 000000000..2d36622cb --- /dev/null +++ b/doc/_static/index_getting_started.svg @@ -0,0 +1,66 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/doc/_static/index_user_guide.svg b/doc/_static/index_user_guide.svg new file mode 100644 index 000000000..bd1705351 --- /dev/null +++ b/doc/_static/index_user_guide.svg @@ -0,0 +1,67 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + diff --git a/doc/conf.py b/doc/conf.py index a6361eafd..5561808ab 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -43,6 +43,7 @@ "sphinx_issues", "sphinx_gallery.gen_gallery", "sphinx_copybutton", + "sphinx_design", ] # Specify how to identify the prompt when copying code snippets @@ -106,10 +107,12 @@ html_theme_options = { "external_links": [], "github_url": "https://github.com/scikit-learn-contrib/imbalanced-learn", - # "twitter_url": "https://twitter.com/pandas_dev", "use_edit_page_button": True, "show_toc_level": 1, # "navbar_align": "right", # For testing that the navbar items align properly + "logo": { + "image_dark": "https://imbalanced-learn.org/stable/_static/img/logo_wide_dark.png" + }, } html_context = { @@ -323,15 +326,7 @@ def generate_min_dependency_substitutions(app): # -- Additional temporary hacks ----------------------------------------------- -# Temporary work-around for spacing problem between parameter and parameter -# type in the doc, see https://github.com/numpy/numpydoc/issues/215. The bug -# has been fixed in sphinx (https://github.com/sphinx-doc/sphinx/pull/5976) but -# through a change in sphinx basic.css except rtd_theme does not use basic.css. -# In an ideal world, this would get fixed in this PR: -# https://github.com/readthedocs/sphinx_rtd_theme/pull/747/files - def setup(app): app.connect("builder-inited", generate_min_dependency_table) app.connect("builder-inited", generate_min_dependency_substitutions) - app.add_css_file("basic.css") diff --git a/doc/index.rst b/doc/index.rst index aa3d7a9b2..238786314 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -21,80 +21,82 @@ Imbalanced-learn (imported as :mod:`imblearn`) is an open source, MIT-licensed library relying on scikit-learn (imported as :mod:`sklearn`) and provides tools when dealing with classification with imbalanced classes. -.. raw:: html - -
-
-
-
-
- -
Getting started
-

Check out the getting started guides to install imbalanced-learn. - Some extra information to get started with a new contribution is also provided.

- -.. container:: custom-button - - :ref:`To the installation guideline` - -.. raw:: html - -
-
-
-
-
-
- -
User guide
-

The user guide provides in-depth information on the - key concepts of imbalanced-learn with useful background information and explanation.

- -.. container:: custom-button - - :ref:`To the user guide` - -.. raw:: html - -
-
-
-
-
-
- -
API reference
-

The reference guide contains a detailed description of - the imbalanced-learn API. To known more about methods parameters.

- -.. container:: custom-button - - :ref:`To the reference guide` - -.. raw:: html - -
-
-
-
-
-
- -
Examples
-

The gallery of examples is a good place to see imbalanced-learn in action. - Select an example and dive in.

- -.. container:: custom-button - - :ref:`To the gallery of examples` - -.. raw:: html - -
-
-
-
-
+.. grid:: 1 2 2 2 + :gutter: 4 + :padding: 2 2 0 0 + :class-container: sd-text-center + + .. grid-item-card:: Getting started + :img-top: _static/index_getting_started.svg + :class-card: intro-card + :shadow: md + + Check out the getting started guides to install `imbalanced-learn`. + Some extra information to get started with a new contribution is also provided. + + +++ + + .. button-ref:: getting_started + :ref-type: ref + :click-parent: + :color: secondary + :expand: + + To the installation guideline + + .. grid-item-card:: User guide + :img-top: _static/index_user_guide.svg + :class-card: intro-card + :shadow: md + + The user guide provides in-depth information on the key concepts of + `imbalanced-learn` with useful background information and explanation. + + +++ + + .. button-ref:: user_guide + :ref-type: ref + :click-parent: + :color: secondary + :expand: + + To the user guide + + .. grid-item-card:: API reference + :img-top: _static/index_api.svg + :class-card: intro-card + :shadow: md + + The reference guide contains a detailed description of + the `imbalanced-learn` API. To known more about methods parameters. + + +++ + + .. button-ref:: api + :ref-type: ref + :click-parent: + :color: secondary + :expand: + + To the reference guide + + .. grid-item-card:: Examples + :img-top: _static/index_examples.svg + :class-card: intro-card + :shadow: md + + The gallery of examples is a good place to see `imbalanced-learn` in action. + Select an example and dive in. + + +++ + + .. button-ref:: general_examples + :ref-type: ref + :click-parent: + :color: secondary + :expand: + + To the gallery of examples .. toctree:: diff --git a/doc/under_sampling.rst b/doc/under_sampling.rst index 499b5a3d9..8f8e7fbb8 100644 --- a/doc/under_sampling.rst +++ b/doc/under_sampling.rst @@ -497,8 +497,7 @@ The class can be used as:: >>> from sklearn.linear_model import LogisticRegression >>> from imblearn.under_sampling import InstanceHardnessThreshold >>> iht = InstanceHardnessThreshold(random_state=0, - ... estimator=LogisticRegression( - ... solver='lbfgs', multi_class='auto')) + ... estimator=LogisticRegression()) >>> X_resampled, y_resampled = iht.fit_resample(X, y) >>> print(sorted(Counter(y_resampled).items())) [(0, 64), (1, 64), (2, 64)] diff --git a/doc/whats_new/v0.12.rst b/doc/whats_new/v0.12.rst index 1213f1f56..fb79497d8 100644 --- a/doc/whats_new/v0.12.rst +++ b/doc/whats_new/v0.12.rst @@ -1,5 +1,33 @@ .. _changes_0_12: +Version 0.12.4 +============== + +**October 4, 2024** + +Changelog +--------- + +Compatibility +............. + +- Compatibility with NumPy 2.0+ + :pr:`1097` by :user:`Guillaume Lemaitre `. + +Version 0.12.3 +============== + +**May 28, 2024** + +Changelog +--------- + +Compatibility +............. + +- Compatibility with scikit-learn 1.5 + :pr:`1074` and :pr:`1084` by :user:`Guillaume Lemaitre `. + Version 0.12.2 ============== diff --git a/examples/api/plot_sampling_strategy_usage.py b/examples/api/plot_sampling_strategy_usage.py index dbb52fcdf..1c76a06b2 100644 --- a/examples/api/plot_sampling_strategy_usage.py +++ b/examples/api/plot_sampling_strategy_usage.py @@ -129,7 +129,7 @@ # %% [markdown] # `sampling_strategy` as a `dict` -# ------------------------------ +# ------------------------------- # # When `sampling_strategy` is a `dict`, the keys correspond to the targeted # classes. The values correspond to the desired number of samples for each diff --git a/examples/applications/plot_outlier_rejections.py b/examples/applications/plot_outlier_rejections.py index 55f03e273..985b9211a 100644 --- a/examples/applications/plot_outlier_rejections.py +++ b/examples/applications/plot_outlier_rejections.py @@ -109,12 +109,12 @@ def outlier_rejection(X, y): pipe = make_pipeline( FunctionSampler(func=outlier_rejection), - LogisticRegression(solver="lbfgs", multi_class="auto", random_state=rng), + LogisticRegression(random_state=rng), ) y_pred = pipe.fit(X_train, y_train).predict(X_test) print(classification_report(y_test, y_pred)) -clf = LogisticRegression(solver="lbfgs", multi_class="auto", random_state=rng) +clf = LogisticRegression(random_state=rng) y_pred = clf.fit(X_train, y_train).predict(X_test) print(classification_report(y_test, y_pred)) diff --git a/examples/ensemble/plot_comparison_ensemble_classifier.py b/examples/ensemble/plot_comparison_ensemble_classifier.py index 602e477e5..8c318e5bc 100644 --- a/examples/ensemble/plot_comparison_ensemble_classifier.py +++ b/examples/ensemble/plot_comparison_ensemble_classifier.py @@ -197,7 +197,7 @@ from imblearn.ensemble import EasyEnsembleClassifier, RUSBoostClassifier -estimator = AdaBoostClassifier(n_estimators=10) +estimator = AdaBoostClassifier(n_estimators=10, algorithm="SAMME") eec = EasyEnsembleClassifier(n_estimators=10, estimator=estimator) eec.fit(X_train, y_train) y_pred_eec = eec.predict(X_test) diff --git a/imblearn/_min_dependencies.py b/imblearn/_min_dependencies.py index 497688765..ec1f5dedb 100644 --- a/imblearn/_min_dependencies.py +++ b/imblearn/_min_dependencies.py @@ -37,6 +37,7 @@ "numpydoc": ("1.5.0", "docs"), "sphinxcontrib-bibtex": ("2.4.1", "docs"), "pydata-sphinx-theme": ("0.13.3", "docs"), + "sphinx-design": ("0.5.0", "docs"), } diff --git a/imblearn/_version.py b/imblearn/_version.py index 2c4231c9c..ff7e11ace 100644 --- a/imblearn/_version.py +++ b/imblearn/_version.py @@ -22,4 +22,4 @@ # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # -__version__ = "0.12.2" +__version__ = "0.12.4" diff --git a/imblearn/ensemble/_bagging.py b/imblearn/ensemble/_bagging.py index 2808239a7..acb0c70fa 100644 --- a/imblearn/ensemble/_bagging.py +++ b/imblearn/ensemble/_bagging.py @@ -386,7 +386,7 @@ def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None): self.sampler_ = clone(self.sampler) # RandomUnderSampler is not supporting sample_weight. We need to pass # None. - return super()._fit(X, y, self.max_samples, sample_weight=None) + return super()._fit(X, y, self.max_samples) # TODO: remove when minimum supported version of scikit-learn is 1.1 @available_if(_estimator_has("decision_function")) diff --git a/imblearn/ensemble/_easy_ensemble.py b/imblearn/ensemble/_easy_ensemble.py index 1da81d93c..e3c85741c 100644 --- a/imblearn/ensemble/_easy_ensemble.py +++ b/imblearn/ensemble/_easy_ensemble.py @@ -300,7 +300,7 @@ def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None): check_target_type(y) # RandomUnderSampler is not supporting sample_weight. We need to pass # None. - return super()._fit(X, y, self.max_samples, sample_weight=None) + return super()._fit(X, y, self.max_samples) # TODO: remove when minimum supported version of scikit-learn is 1.1 @available_if(_estimator_has("decision_function")) @@ -365,9 +365,11 @@ def base_estimator_(self): raise error raise error - def _more_tags(self): + def _get_estimator(self): if self.estimator is None: - estimator = AdaBoostClassifier(algorithm="SAMME") - else: - estimator = self.estimator - return {"allow_nan": _safe_tags(estimator, "allow_nan")} + return AdaBoostClassifier(algorithm="SAMME") + return self.estimator + + # TODO: remove when minimum supported version of scikit-learn is 1.5 + def _more_tags(self): + return {"allow_nan": _safe_tags(self._get_estimator(), "allow_nan")} diff --git a/imblearn/ensemble/tests/test_bagging.py b/imblearn/ensemble/tests/test_bagging.py index 5705de553..382597183 100644 --- a/imblearn/ensemble/tests/test_bagging.py +++ b/imblearn/ensemble/tests/test_bagging.py @@ -174,7 +174,7 @@ def test_probability(): # Degenerate case, where some classes are missing ensemble = BalancedBaggingClassifier( - estimator=LogisticRegression(solver="lbfgs", multi_class="auto"), + estimator=LogisticRegression(solver="lbfgs"), random_state=0, max_samples=5, ) @@ -435,7 +435,7 @@ def test_estimators_samples(): # remap the y outside of the BalancedBaggingclassifier # _, y = np.unique(y, return_inverse=True) bagging = BalancedBaggingClassifier( - LogisticRegression(solver="lbfgs", multi_class="auto"), + LogisticRegression(), max_samples=0.5, max_features=0.5, random_state=1, diff --git a/imblearn/metrics/pairwise.py b/imblearn/metrics/pairwise.py index 11f654f02..40f099258 100644 --- a/imblearn/metrics/pairwise.py +++ b/imblearn/metrics/pairwise.py @@ -161,7 +161,7 @@ def fit(self, X, y): f"elements in n_categories and {self.n_features_in_} in " f"X." ) - self.n_categories_ = np.array(self.n_categories, copy=False) + self.n_categories_ = np.asarray(self.n_categories) classes = unique_labels(y) # list of length n_features of ndarray (n_categories, n_classes) diff --git a/imblearn/over_sampling/_smote/base.py b/imblearn/over_sampling/_smote/base.py index 93b7e8a7b..8ef902920 100644 --- a/imblearn/over_sampling/_smote/base.py +++ b/imblearn/over_sampling/_smote/base.py @@ -11,16 +11,17 @@ import warnings import numpy as np +import sklearn from scipy import sparse from sklearn.base import clone from sklearn.exceptions import DataConversionWarning from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder from sklearn.utils import ( - _get_column_indices, _safe_indexing, check_array, check_random_state, ) +from sklearn.utils.fixes import parse_version from sklearn.utils.sparsefuncs_fast import ( csr_mean_variance_axis0, ) @@ -34,6 +35,12 @@ from ...utils.fixes import _is_pandas_df, _mode from ..base import BaseOverSampler +sklearn_version = parse_version(sklearn.__version__).base_version +if parse_version(sklearn_version) < parse_version("1.5"): + from sklearn.utils import _get_column_indices +else: + from sklearn.utils._indexing import _get_column_indices + class BaseSMOTE(BaseOverSampler): """Base class for the different SMOTE algorithms.""" diff --git a/imblearn/pipeline.py b/imblearn/pipeline.py index 01eead7ea..7453446ad 100644 --- a/imblearn/pipeline.py +++ b/imblearn/pipeline.py @@ -12,9 +12,11 @@ # Christos Aridas # Guillaume Lemaitre # License: BSD +import sklearn from sklearn import pipeline from sklearn.base import clone -from sklearn.utils import Bunch, _print_elapsed_time +from sklearn.utils import Bunch +from sklearn.utils.fixes import parse_version from sklearn.utils.metaestimators import available_if from sklearn.utils.validation import check_memory @@ -34,6 +36,12 @@ __all__ = ["Pipeline", "make_pipeline"] +sklearn_version = parse_version(sklearn.__version__).base_version +if parse_version(sklearn_version) < parse_version("1.5"): + from sklearn.utils import _print_elapsed_time +else: + from sklearn.utils._user_interface import _print_elapsed_time + class Pipeline(_ParamsValidationMixin, pipeline.Pipeline): """Pipeline of transforms and resamples with a final estimator. @@ -163,11 +171,12 @@ def _validate_steps(self): for t in transformers: if t is None or t == "passthrough": continue - if not ( - hasattr(t, "fit") - or hasattr(t, "fit_transform") - or hasattr(t, "fit_resample") - ) or not (hasattr(t, "transform") or hasattr(t, "fit_resample")): + + is_transfomer = hasattr(t, "fit") and hasattr(t, "transform") + is_sampler = hasattr(t, "fit_resample") + is_not_transfomer_or_sampler = not (is_transfomer or is_sampler) + + if is_not_transfomer_or_sampler: raise TypeError( "All intermediate steps of the chain should " "be estimators that implement fit and transform or " @@ -175,9 +184,7 @@ def _validate_steps(self): "'%s' (type %s) doesn't)" % (t, type(t)) ) - if hasattr(t, "fit_resample") and ( - hasattr(t, "fit_transform") or hasattr(t, "transform") - ): + if is_transfomer and is_sampler: raise TypeError( "All intermediate steps of the chain should " "be estimators that implement fit and transform or " diff --git a/imblearn/tests/test_docstring_parameters.py b/imblearn/tests/test_docstring_parameters.py index b595d77d7..1bd6ecf51 100644 --- a/imblearn/tests/test_docstring_parameters.py +++ b/imblearn/tests/test_docstring_parameters.py @@ -11,7 +11,6 @@ import pytest from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression -from sklearn.utils import IS_PYPY from sklearn.utils._testing import ( _get_func_name, check_docstring_parameters, @@ -70,7 +69,6 @@ # Python 3.7 @pytest.mark.filterwarnings("ignore::FutureWarning") @pytest.mark.filterwarnings("ignore::DeprecationWarning") -@pytest.mark.skipif(IS_PYPY, reason="test segfaults on PyPy") def test_docstring_parameters(): # Test module docstring formatting @@ -154,9 +152,6 @@ def test_tabs(): for importer, modname, ispkg in walk_packages( imblearn.__path__, prefix="imblearn." ): - if IS_PYPY: - continue - # because we don't import mod = importlib.import_module(modname) diff --git a/imblearn/tests/test_pipeline.py b/imblearn/tests/test_pipeline.py index 409dbce41..d89e03a11 100644 --- a/imblearn/tests/test_pipeline.py +++ b/imblearn/tests/test_pipeline.py @@ -272,7 +272,7 @@ def test_pipeline_methods_anova(): X = iris.data y = iris.target # Test with Anova + LogisticRegression - clf = LogisticRegression(solver="lbfgs", multi_class="auto") + clf = LogisticRegression() filter1 = SelectKBest(f_classif, k=2) pipe = Pipeline([("anova", filter1), ("logistic", clf)]) pipe.fit(X, y) @@ -639,7 +639,7 @@ def test_classes_property(): clf = make_pipeline( SelectKBest(k=1), - LogisticRegression(solver="lbfgs", multi_class="auto", random_state=0), + LogisticRegression(), ) with raises(AttributeError): getattr(clf, "classes_") diff --git a/imblearn/utils/_metadata_requests.py b/imblearn/utils/_metadata_requests.py index 1150c7d75..c81aa4ff0 100644 --- a/imblearn/utils/_metadata_requests.py +++ b/imblearn/utils/_metadata_requests.py @@ -1086,9 +1086,12 @@ def _serialize(self): def __iter__(self): if self._self_request: - yield "$self_request", RouterMappingPair( - mapping=MethodMapping.from_str("one-to-one"), - router=self._self_request, + yield ( + "$self_request", + RouterMappingPair( + mapping=MethodMapping.from_str("one-to-one"), + router=self._self_request, + ), ) for name, route_mapping in self._route_mappings.items(): yield (name, route_mapping) @@ -1234,7 +1237,7 @@ def __init__(self, name, keys, validate_keys=True): def __get__(self, instance, owner): # we would want to have a method which accepts only the expected args - def func(**kw): + def func(*args, **kw): """Updates the request for provided parameters This docstring is overwritten below. @@ -1253,15 +1256,32 @@ def func(**kw): f"arguments are: {set(self.keys)}" ) - requests = instance._get_metadata_request() + # This makes it possible to use the decorated method as an unbound + # method, for instance when monkeypatching. + # https://github.com/scikit-learn/scikit-learn/issues/28632 + if instance is None: + _instance = args[0] + args = args[1:] + else: + _instance = instance + + # Replicating python's behavior when positional args are given other + # than `self`, and `self` is only allowed if this method is unbound. + if args: + raise TypeError( + f"set_{self.name}_request() takes 0 positional argument but" + f" {len(args)} were given" + ) + + requests = _instance._get_metadata_request() method_metadata_request = getattr(requests, self.name) for prop, alias in kw.items(): if alias is not UNCHANGED: method_metadata_request.add_request(param=prop, alias=alias) - instance._metadata_request = requests + _instance._metadata_request = requests - return instance + return _instance # Now we set the relevant attributes of the function so that it seems # like a normal method to the end user, with known expected arguments. @@ -1525,13 +1545,13 @@ def process_routing(_obj, _method, /, **kwargs): metadata to corresponding methods or corresponding child objects. The object names are those defined in `obj.get_metadata_routing()`. """ - if not _routing_enabled() and not kwargs: + if not kwargs: # If routing is not enabled and kwargs are empty, then we don't have to # try doing any routing, we can simply return a structure which returns # an empty dict on routed_params.ANYTHING.ANY_METHOD. class EmptyRequest: def get(self, name, default=None): - return default if default else {} + return Bunch(**{method: dict() for method in METHODS}) def __getitem__(self, name): return Bunch(**{method: dict() for method in METHODS}) diff --git a/imblearn/utils/estimator_checks.py b/imblearn/utils/estimator_checks.py index 570427759..2fc893391 100644 --- a/imblearn/utils/estimator_checks.py +++ b/imblearn/utils/estimator_checks.py @@ -309,7 +309,7 @@ def check_samplers_sparse(name, sampler_orig): sampler = clone(sampler) X_res, y_res = sampler.fit_resample(X, y) assert sparse.issparse(X_res_sparse) - assert_allclose(X_res_sparse.A, X_res, rtol=1e-5) + assert_allclose(X_res_sparse.toarray(), X_res, rtol=1e-5) assert_allclose(y_res_sparse, y_res) diff --git a/setup.cfg b/setup.cfg index 862620367..5cd5d6139 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.12.2 +current_version = 0.12.4 tag = False parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\.(?P[a-z]+)(?P\d+))? serialize =