From 253592ff84ddfa660a9a04ac82e9e0864d085667 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Wed, 3 Feb 2021 11:08:53 +0800 Subject: [PATCH 01/94] [MNT] Update package in build_tools --- build_tools/requirements.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/build_tools/requirements.txt b/build_tools/requirements.txt index 0d6c909..e4c5b19 100644 --- a/build_tools/requirements.txt +++ b/build_tools/requirements.txt @@ -2,4 +2,8 @@ flake8 pytest-cov lightgbm xgboost -cython>=0.28.5 \ No newline at end of file +cython>=0.28.5 +numpy>=1.13.3,<1.20.0 +scipy>=0.19.1 +joblib>=0.11 +scikit-learn>=0.22 \ No newline at end of file From 10fca9b890699924e09a265b028ee764c7fd0851 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Wed, 3 Feb 2021 11:09:17 +0800 Subject: [PATCH 02/94] [DOC] Update config for ReadtheDocs --- .readthedocs.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 .readthedocs.yml diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000..566f168 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,14 @@ +version: 2 + +formats: all + +sphinx: + configuration: docs/conf.py + +python: + version: 3.7 + install: + - requirements: build_tools/requirements.txt + - method: pip + path: . + - requirements: docs/requirements.txt From d5f3e0ea4555e6998dfec79a656d159d2aeb0b93 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Wed, 3 Feb 2021 11:23:42 +0800 Subject: [PATCH 03/94] [DOC] Update sphinx conf.py to support RTD --- docs/conf.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 93dec78..7454185 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,7 +13,13 @@ import os import sys -sys.path.insert(0, os.path.abspath('..')) +import deepforest + + +# -- Path setup -------------------------------------------------------------- +ON_READTHEDOCS = os.environ.get("READTHEDOCS") == "True" +if not ON_READTHEDOCS: + sys.path.insert(0, os.path.abspath("..")) # -- Project information ----------------------------------------------------- From 9220885ac324914ecdfd220d6b846b45cc13c2de Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Wed, 3 Feb 2021 11:32:22 +0800 Subject: [PATCH 04/94] [DOC] Publish the docs on RTD --- CHANGELOG.rst | 2 +- README.rst | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d8fab9f..46630ad 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -31,4 +31,4 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` -- |Feature| configurable predictor parameter `#9 `__ \ No newline at end of file +- |Feature| configurable predictor parameter `#9 `__ @tczhao \ No newline at end of file diff --git a/README.rst b/README.rst index 7ff6912..d6ed875 100644 --- a/README.rst +++ b/README.rst @@ -1,11 +1,14 @@ Deep Forest (DF) 21 =================== -|github|_ |codecov|_ |python|_ |pypi|_ +|github|_ |readthedocs|_ |codecov|_ |python|_ |pypi|_ .. |github| image:: https://github.com/LAMDA-NJU/Deep-Forest/workflows/DeepForest-CI/badge.svg .. _github: https://github.com/LAMDA-NJU/Deep-Forest/actions +.. |readthedocs| image:: https://readthedocs.org/projects/deep-forest/badge/?version=latest +.. _readthedocs: https://deep-forest.readthedocs.io/en/latest/ + .. |codecov| image:: https://codecov.io/gh/LAMDA-NJU/Deep-Forest/branch/master/graph/badge.svg?token=5BVXOT8RPO .. _codecov: https://codecov.io/gh/LAMDA-NJU/Deep-Forest @@ -24,7 +27,7 @@ Deep Forest (DF) 21 Whenever one used tree-based machine learning approaches such as Random Forest or GBDT, DF21 may offer a new powerful option. -For a quick start, please refer to `How to Get Started `__. For a detailed guidance on parameter tunning, please refer to `Parameters Tunning `__. +For a quick start, please refer to `How to Get Started `__. For a detailed guidance on parameter tunning, please refer to `Parameters Tunning `__. Installation ------------ @@ -58,7 +61,7 @@ Quickstart Resources --------- -* `Documentation `__ +* `Documentation `__ * Deep Forest: `[Paper] `__ * Keynote at AISTATS 2019: `[Slides] `__ From 08debc0101c258d505a250a1068d6effdb88ab31 Mon Sep 17 00:00:00 2001 From: Joey Gao <1783198484@qq.com> Date: Wed, 3 Feb 2021 12:56:02 +0800 Subject: [PATCH 05/94] [ENH] Add base class `BaseEstimator` and `ClassifierMixin` (#8) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 添加`CascadeForestClassifier`类对`cross_val_score`交叉验证的支持。 给`BaseCascadeForest`添加基类`BaseEstimator`以包含`get_params`方法; 对`CascadeForestClassifier`添加`ClassifierMixin`基类以包含`score`方法。 * formated by Autopep8 * update __init__ of CascadeForestClassifier * sync with the master branch Co-authored-by: Yi-Xuan Xu --- deepforest/cascade.py | 45 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 04484d4..52d9b12 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -7,6 +7,7 @@ import numbers import numpy as np from abc import ABCMeta, abstractmethod +from sklearn.base import BaseEstimator, ClassifierMixin from . import _utils from . import _io @@ -191,7 +192,7 @@ def adddoc(cls): return adddoc -class BaseCascadeForest(metaclass=ABCMeta): +class BaseCascadeForest(BaseEstimator, metaclass=ABCMeta): def __init__( self, @@ -772,10 +773,44 @@ def clean(self): @deepforest_model_doc( """Implementation of the deep forest for classification.""" ) -class CascadeForestClassifier(BaseCascadeForest): - - def __init__(self, **kwargs): - super().__init__(**kwargs) +class CascadeForestClassifier(BaseCascadeForest, ClassifierMixin): + + def __init__(self, + n_bins=255, + bin_subsample=2e5, + bin_type="percentile", + max_layers=20, + n_estimators=2, + n_trees=100, + max_depth=None, + min_samples_leaf=1, + use_predictor=False, + predictor="forest", + predictor_kwargs={}, + n_tolerant_rounds=2, + delta=1e-5, + partial_mode=False, + n_jobs=None, + random_state=None, + verbose=1): + super().__init__( + n_bins=n_bins, + bin_subsample=bin_subsample, + bin_type=bin_type, + max_layers=max_layers, + n_estimators=n_estimators, + n_trees=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + use_predictor=use_predictor, + predictor=predictor, + predictor_kwargs=predictor_kwargs, + n_tolerant_rounds=n_tolerant_rounds, + delta=delta, + partial_mode=partial_mode, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose) def _repr_performance(self, pivot): msg = "Val Acc = {:.3f} %" From e19358d2b45adf002c814bc5e67334cd976566e8 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Wed, 3 Feb 2021 13:13:02 +0800 Subject: [PATCH 06/94] [DOC] Update CHANGELOG.rst --- CHANGELOG.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 46630ad..48a885d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -31,4 +31,5 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` -- |Feature| configurable predictor parameter `#9 `__ @tczhao \ No newline at end of file +- |Feature| configurable predictor parameter (`#9 `__) @tczhao +- |Enhancement| add base class ``BaseEstimator`` and ``ClassifierMixin`` (`#8 `__) @pjgao From 5e80183cf2ec7fd31d88c24bd7a6e800a90ffc2e Mon Sep 17 00:00:00 2001 From: tczhao Date: Wed, 3 Feb 2021 23:01:21 +1100 Subject: [PATCH 07/94] [ENH] Add sample weight for `fit` (#7) * feat(fit): add sample_weight * fix(pytest): remove unused operator * Polish up docstrings * Update CHANGELOG.rst Co-authored-by: Yi-Xuan Xu --- CHANGELOG.rst | 1 + deepforest/_estimator.py | 4 ++-- deepforest/_layer.py | 11 +++++++---- deepforest/cascade.py | 19 +++++++++++++++---- deepforest/forest.py | 7 ++++++- tests/test_model.py | 33 ++++++++++++++++++++++++++++++++- 6 files changed, 63 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 48a885d..225dd7a 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -31,5 +31,6 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| support sample weight in :meth:`fit` (`#7 `__) @tczhao - |Feature| configurable predictor parameter (`#9 `__) @tczhao - |Enhancement| add base class ``BaseEstimator`` and ``ClassifierMixin`` (`#8 `__) @pjgao diff --git a/deepforest/_estimator.py b/deepforest/_estimator.py index 66d275d..428e6ff 100644 --- a/deepforest/_estimator.py +++ b/deepforest/_estimator.py @@ -61,8 +61,8 @@ def __init__( def oob_decision_function_(self): return self.estimator_.oob_decision_function_ - def fit_transform(self, X, y): - self.estimator_.fit(X, y) + def fit_transform(self, X, y, sample_weight=None): + self.estimator_.fit(X, y, sample_weight) X_aug = self.estimator_.oob_decision_function_ return X_aug diff --git a/deepforest/_layer.py b/deepforest/_layer.py index e94e00e..88d3676 100644 --- a/deepforest/_layer.py +++ b/deepforest/_layer.py @@ -20,7 +20,8 @@ def _build_estimator( oob_decision_function, partial_mode=True, buffer=None, - verbose=1 + verbose=1, + sample_weight=None ): """Private function used to fit a single estimator.""" if verbose > 1: @@ -28,7 +29,7 @@ def _build_estimator( key = estimator_name + "_" + str(estimator_idx) print(msg.format(_utils.ctime(), key, layer_idx)) - X_aug_train = estimator.fit_transform(X, y) + X_aug_train = estimator.fit_transform(X, y, sample_weight) oob_decision_function += estimator.oob_decision_function_ if partial_mode: @@ -107,7 +108,7 @@ def _validate_params(self): msg = "`n_trees` = {} should be strictly positive." raise ValueError(msg.format(self.n_trees)) - def fit_transform(self, X, y): + def fit_transform(self, X, y, sample_weight=None): self._validate_params() n_samples, _ = X.shape @@ -128,6 +129,7 @@ def fit_transform(self, X, y): self.partial_mode, self.buffer, self.verbose, + sample_weight, ) X_aug.append(X_aug_) key = "{}-{}-{}".format(self.layer_idx, estimator_idx, "rf") @@ -145,6 +147,7 @@ def fit_transform(self, X, y): self.partial_mode, self.buffer, self.verbose, + sample_weight, ) X_aug.append(X_aug_) key = "{}-{}-{}".format(self.layer_idx, estimator_idx, "erf") @@ -153,7 +156,7 @@ def fit_transform(self, X, y): # Set the OOB estimations and validation accuracy self.oob_decision_function_ = oob_decision_function / self.n_estimators y_pred = np.argmax(oob_decision_function, axis=1) - self.val_acc_ = accuracy_score(y, y_pred) + self.val_acc_ = accuracy_score(y, y_pred, sample_weight=sample_weight) X_aug = np.hstack(X_aug) return X_aug diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 52d9b12..51113fa 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -422,7 +422,8 @@ def predict(self, X): def n_aug_features_(self): return 2 * self.n_estimators * self.n_outputs_ - def fit(self, X, y): + # flake8: noqa: E501 + def fit(self, X, y, sample_weight=None): """ Build a deep forest using the training data. @@ -449,6 +450,8 @@ def fit(self, X, y): ``np.uint8``. y : :obj:`numpy.ndarray` of shape (n_samples,) The class labels of input samples. + sample_weight : :obj:`numpy.ndarray` of shape (n_samples,), default=None + Sample weights. If ``None``, then samples are equally weighted. """ self._check_input(X, y) self._validate_params() @@ -491,7 +494,7 @@ def fit(self, X, y): print("{} Fitting cascade layer = {:<2}".format(_utils.ctime(), 0)) tic = time.time() - X_aug_train_ = layer_.fit_transform(X_train_, y) + X_aug_train_ = layer_.fit_transform(X_train_, y, sample_weight) toc = time.time() training_time = toc - tic @@ -567,7 +570,11 @@ def fit(self, X, y): print(msg.format(_utils.ctime(), layer_idx)) tic = time.time() - X_aug_train_ = layer_.fit_transform(X_middle_train_, y) + X_aug_train_ = layer_.fit_transform( + X_middle_train_, + y, + sample_weight + ) toc = time.time() training_time = toc - tic @@ -667,7 +674,11 @@ def fit(self, X, y): print(msg.format(_utils.ctime(), self.predictor_name)) tic = time.time() - self.predictor_.fit(X_middle_train_, y) + self.predictor_.fit( + X_middle_train_, + y, + sample_weight=sample_weight + ) toc = time.time() if self.verbose > 0: diff --git a/deepforest/forest.py b/deepforest/forest.py index a670ef9..c501706 100644 --- a/deepforest/forest.py +++ b/deepforest/forest.py @@ -96,8 +96,9 @@ def _parallel_build_trees( X, y, n_samples_bootstrap, + sample_weight, out, - lock + lock, ): """ Private function used to fit a single tree in parallel.""" @@ -107,8 +108,11 @@ def _parallel_build_trees( n_samples_bootstrap) # Fit the tree on the bootstrapped samples + if sample_weight is not None: + sample_weight = sample_weight[sample_mask] feature, threshold, children, value = tree.fit(X[sample_mask], y[sample_mask], + sample_weight=sample_weight, check_input=False) if not children.flags["C_CONTIGUOUS"]: @@ -422,6 +426,7 @@ def fit(self, X, y, sample_weight=None): X, y, n_samples_bootstrap, + sample_weight, oob_decision_function, lock) for i, t in enumerate(trees)) diff --git a/tests/test_model.py b/tests/test_model.py index 8a625bd..a1fcd3e 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -1,7 +1,8 @@ import copy import pytest import shutil -from numpy.testing import assert_array_equal +import numpy as np +from numpy.testing import assert_array_equal, assert_raises from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split @@ -126,6 +127,36 @@ def test_model_workflow_partial_mode(): shutil.rmtree(save_dir) +def test_model_sample_weight(): + """Run the workflow of deep forest with a local buffer.""" + + case_kwargs = copy.deepcopy(kwargs) + + # Training without sample_weight + model = CascadeForestClassifier(**case_kwargs) + model.fit(X_train, y_train) + y_pred_no_sample_weight = model.predict(X_test) + + # Training with equal sample_weight + model = CascadeForestClassifier(**case_kwargs) + sample_weight = np.ones(y_train.size) + model.fit(X_train, y_train, sample_weight=sample_weight) + y_pred_equal_sample_weight = model.predict(X_test) + + # Make sure the same predictions with None and equal sample_weight + assert_array_equal(y_pred_no_sample_weight, y_pred_equal_sample_weight) + + model = CascadeForestClassifier(**case_kwargs) + sample_weight = np.where(y_train == 0, 0.1, y_train) + model.fit(X_train, y_train, sample_weight=y_train) + y_pred_skewed_sample_weight = model.predict(X_test) + + # Make sure the different predictions with None and equal sample_weight + assert_raises(AssertionError, assert_array_equal, y_pred_skewed_sample_weight, y_pred_equal_sample_weight) + + model.clean() # clear the buffer + + def test_model_workflow_in_memory(): """Run the workflow of deep forest with in-memory mode.""" From a089b10f4e629e745dbf9127656afa05ef93861c Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Thu, 4 Feb 2021 13:14:22 +0800 Subject: [PATCH 08/94] [DOC] Add the reference on paper --- README.rst | 2 +- docs/index.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index d6ed875..f32d68a 100644 --- a/README.rst +++ b/README.rst @@ -18,7 +18,7 @@ Deep Forest (DF) 21 .. |pypi| image:: https://img.shields.io/pypi/v/deep-forest?color=blue .. _pypi: https://pypi.org/project/deep-forest/ -**DF21** is an implementation of Deep Forest 2021.2.1. It is designed to have the following advantages: +**DF21** is an implementation of `Deep Forest `__ 2021.2.1. It is designed to have the following advantages: - **Powerful**: Better accuracy than existing tree-based ensemble methods. - **Easy to Use**: Less efforts on tunning parameters. diff --git a/docs/index.rst b/docs/index.rst index 23965be..7c6fe9a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,7 +1,7 @@ DF21 Documentation ================== -**DF21** is an implementation of Deep Forest 2021.2.1. It is designed to have the following advantages: +**DF21** is an implementation of `Deep Forest `__ 2021.2.1. It is designed to have the following advantages: - **Powerful**: Better accuracy than existing tree-based ensemble methods. - **Easy to Use**: Less efforts on tunning parameters. From 1506ce4fd3ee54110d670336b5ef4c35aaf9805f Mon Sep 17 00:00:00 2001 From: tczhao Date: Fri, 5 Feb 2021 16:14:59 +1100 Subject: [PATCH 09/94] [ENH] Set up pre-commit and black formatter (#15) * style(lint): add black formatter * feat(lint): add git action black * style(py): format using black * Update .pre-commit-config.yaml * update badge --- .github/workflows/build-and-test.yml | 1 - .github/workflows/code-quality.yml | 1 + .pre-commit-config.yaml | 10 + CONTRIBUTIONG.md | 7 + README.rst | 5 +- build_tools/requirements.txt | 10 +- deepforest/__init__.py | 12 +- deepforest/_binner.py | 26 +-- deepforest/_estimator.py | 16 +- deepforest/_io.py | 60 ++--- deepforest/_layer.py | 19 +- deepforest/_utils.py | 4 +- deepforest/cascade.py | 121 +++++----- deepforest/forest.py | 323 ++++++++++++++++----------- deepforest/setup.py | 35 +-- deepforest/tree/__init__.py | 4 +- deepforest/tree/setup.py | 53 +++-- deepforest/tree/tree.py | 295 +++++++++++++----------- pyproject.toml | 19 +- requirements-dev.txt | 3 + setup.py | 70 +++--- tests/test_binner.py | 45 ++-- tests/test_buffer.py | 26 ++- tests/test_forest.py | 23 +- tests/test_io.py | 5 +- tests/test_model.py | 100 +++++---- 26 files changed, 744 insertions(+), 549 deletions(-) create mode 100644 .pre-commit-config.yaml create mode 100644 CONTRIBUTIONG.md create mode 100644 requirements-dev.txt diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 0af89b2..41b6329 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -24,7 +24,6 @@ jobs: - name: Install package dependencies run: | python -m pip install --upgrade pip - pip install -r requirements.txt pip install -r build_tools/requirements.txt - name: Install run: pip install --verbose --editable . diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml index 1ce6e44..41e9d47 100644 --- a/.github/workflows/code-quality.yml +++ b/.github/workflows/code-quality.yml @@ -27,5 +27,6 @@ jobs: pip install -r build_tools/requirements.txt - name: Check code quality run: | + black --check --config pyproject.toml ./ chmod +x "${GITHUB_WORKSPACE}/build_tools/linting.sh" ./build_tools/linting.sh diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..ffcc612 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,10 @@ +repos: +- repo: https://github.com/ambv/black + rev: 20.8b1 + hooks: + - id: black + language_version: python3 +- repo: https://gitlab.com/pycqa/flake8 + rev: 3.8.4 + hooks: + - id: flake8 diff --git a/CONTRIBUTIONG.md b/CONTRIBUTIONG.md new file mode 100644 index 0000000..56d33f2 --- /dev/null +++ b/CONTRIBUTIONG.md @@ -0,0 +1,7 @@ +## Install requirements + +`python -m pip install --upgrade pip` +`pip install -r build_tools/requirements.txt` +`pre-commit install` + +## Add your change to CHANGELOG.rst \ No newline at end of file diff --git a/README.rst b/README.rst index f32d68a..a69a143 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,7 @@ Deep Forest (DF) 21 =================== -|github|_ |readthedocs|_ |codecov|_ |python|_ |pypi|_ +|github|_ |readthedocs|_ |codecov|_ |python|_ |pypi|_ |style|_ .. |github| image:: https://github.com/LAMDA-NJU/Deep-Forest/workflows/DeepForest-CI/badge.svg .. _github: https://github.com/LAMDA-NJU/Deep-Forest/actions @@ -18,6 +18,9 @@ Deep Forest (DF) 21 .. |pypi| image:: https://img.shields.io/pypi/v/deep-forest?color=blue .. _pypi: https://pypi.org/project/deep-forest/ +.. |style| image;: https://img.shields.io/badge/code%20style-black-000000.svg +.. _style: https://github.com/psf/black + **DF21** is an implementation of `Deep Forest `__ 2021.2.1. It is designed to have the following advantages: - **Powerful**: Better accuracy than existing tree-based ensemble methods. diff --git a/build_tools/requirements.txt b/build_tools/requirements.txt index e4c5b19..c0f047e 100644 --- a/build_tools/requirements.txt +++ b/build_tools/requirements.txt @@ -1,9 +1,9 @@ -flake8 +-r ../requirements.txt +pytest +pre-commit +black==20.8b1 +flake8==3.8.4 pytest-cov lightgbm xgboost cython>=0.28.5 -numpy>=1.13.3,<1.20.0 -scipy>=0.19.1 -joblib>=0.11 -scikit-learn>=0.22 \ No newline at end of file diff --git a/deepforest/__init__.py b/deepforest/__init__.py index ebbdabc..fd72c17 100644 --- a/deepforest/__init__.py +++ b/deepforest/__init__.py @@ -5,8 +5,10 @@ from .tree import ExtraTreeClassifier -__all__ = ["CascadeForestClassifier", - "RandomForestClassifier", - "ExtraTreesClassifier", - "DecisionTreeClassifier", - "ExtraTreeClassifier"] +__all__ = [ + "CascadeForestClassifier", + "RandomForestClassifier", + "ExtraTreesClassifier", + "DecisionTreeClassifier", + "ExtraTreeClassifier", +] diff --git a/deepforest/_binner.py b/deepforest/_binner.py index 220c448..00e59b2 100644 --- a/deepforest/_binner.py +++ b/deepforest/_binner.py @@ -21,13 +21,14 @@ def _find_binning_thresholds_per_feature( - col_data, n_bins, bin_type="percentile" + col_data, n_bins, bin_type="percentile" ): """ Private function used to find midpoints for samples along a specific feature. """ if len(col_data.shape) != 1: + msg = ( "Per-feature data should be of the shape (n_samples,), but" " got {}-dims instead." @@ -72,17 +73,13 @@ def _find_binning_thresholds( rng = check_random_state(random_state) if n_samples > bin_subsample: - subset = rng.choice( - np.arange(n_samples), bin_subsample, replace=False - ) + subset = rng.choice(np.arange(n_samples), bin_subsample, replace=False) X = X.take(subset, axis=0) binning_thresholds = [] for f_idx in range(n_features): threshold = _find_binning_thresholds_per_feature( - X[:, f_idx], - n_bins, - bin_type + X[:, f_idx], n_bins, bin_type ) binning_thresholds.append(threshold) @@ -90,13 +87,12 @@ def _find_binning_thresholds( class Binner(TransformerMixin, BaseEstimator): - def __init__( self, n_bins=255, bin_subsample=2e5, bin_type="percentile", - random_state=None + random_state=None, ): self.n_bins = n_bins + 1 # + 1 for missing values self.bin_subsample = int(bin_subsample) @@ -107,8 +103,10 @@ def __init__( def _validate_params(self): if not 2 <= self.n_bins - 1 <= 255: - msg = ("`n_bins` should be in the range [2, 255], bug got" - " {} instead.") + msg = ( + "`n_bins` should be in the range [2, 255], bug got" + " {} instead." + ) raise ValueError(msg.format(self.n_bins - 1)) if not self.bin_subsample > 0: @@ -119,8 +117,10 @@ def _validate_params(self): raise ValueError(msg.format(self.bin_subsample)) if self.bin_type not in ("percentile", "interval"): - msg = ("The type of binner should be one of {{percentile, interval" - "}}, bug got {} instead.") + msg = ( + "The type of binner should be one of {{percentile, interval" + "}}, bug got {} instead." + ) raise ValueError(msg.format(self.bin_type)) def fit(self, X): diff --git a/deepforest/_estimator.py b/deepforest/_estimator.py index 428e6ff..35917e5 100644 --- a/deepforest/_estimator.py +++ b/deepforest/_estimator.py @@ -12,7 +12,7 @@ def make_estimator( max_depth=None, min_samples_leaf=1, n_jobs=None, - random_state=None + random_state=None, ): # RandomForestClassifier if name == "rf": @@ -30,7 +30,7 @@ def make_estimator( max_depth=max_depth, min_samples_leaf=min_samples_leaf, n_jobs=n_jobs, - random_state=random_state + random_state=random_state, ) else: msg = "Unknown type of estimator, which should be one of {{rf, erf}}." @@ -40,7 +40,6 @@ def make_estimator( class Estimator(object): - def __init__( self, name, @@ -48,14 +47,11 @@ def __init__( max_depth=None, min_samples_leaf=1, n_jobs=None, - random_state=None + random_state=None, ): - self.estimator_ = make_estimator(name, - n_trees, - max_depth, - min_samples_leaf, - n_jobs, - random_state) + self.estimator_ = make_estimator( + name, n_trees, max_depth, min_samples_leaf, n_jobs, random_state + ) @property def oob_decision_function_(self): diff --git a/deepforest/_io.py b/deepforest/_io.py index 041d71a..0db1dea 100644 --- a/deepforest/_io.py +++ b/deepforest/_io.py @@ -10,7 +10,7 @@ class is designed to support the partial mode in deep forest. import shutil import warnings import tempfile -from joblib import (load, dump) +from joblib import load, dump class Buffer(object): @@ -33,12 +33,15 @@ class Buffer(object): store_data : bool, default=False Whether to cache the intermediate data to the local buffer. """ - def __init__(self, - use_buffer, - buffer_dir=None, - store_est=True, - store_pred=True, - store_data=False): + + def __init__( + self, + use_buffer, + buffer_dir=None, + store_est=True, + store_pred=True, + store_data=False, + ): self.use_buffer = use_buffer self.store_est = store_est and use_buffer @@ -48,16 +51,19 @@ def __init__(self, # Create buffer if self.use_buffer: - self.buffer = tempfile.TemporaryDirectory(prefix="buffer_", - dir=self.buffer_dir) + self.buffer = tempfile.TemporaryDirectory( + prefix="buffer_", dir=self.buffer_dir + ) if store_data: - self.data_dir_ = tempfile.mkdtemp(prefix="data_", - dir=self.buffer.name) + self.data_dir_ = tempfile.mkdtemp( + prefix="data_", dir=self.buffer.name + ) if store_est or store_pred: - self.model_dir_ = tempfile.mkdtemp(prefix="model_", - dir=self.buffer.name) + self.model_dir_ = tempfile.mkdtemp( + prefix="model_", dir=self.buffer.name + ) self.pred_dir_ = os.path.join(self.model_dir_, "predictor.est") @property @@ -97,14 +103,16 @@ def cache_data(self, layer_idx, X, is_training_data=True): return X if is_training_data: - cache_dir = os.path.join(self.data_dir_, - "joblib_train_{}.mmap".format(layer_idx)) + cache_dir = os.path.join( + self.data_dir_, "joblib_train_{}.mmap".format(layer_idx) + ) # Delete if os.path.exists(cache_dir): os.unlink(cache_dir) else: - cache_dir = os.path.join(self.data_dir_, - "joblib_test_{}.mmap".format(layer_idx)) + cache_dir = os.path.join( + self.data_dir_, "joblib_test_{}.mmap".format(layer_idx) + ) # Delete if os.path.exists(cache_dir): os.unlink(cache_dir) @@ -209,8 +217,10 @@ def del_estimator(self, layer_idx): try: os.unlink(os.path.join(self.model_dir_, est_name)) except OSError: - msg = ("Permission denied when deleting the dumped" - " estimators during the early stopping stage.") + msg = ( + "Permission denied when deleting the dumped" + " estimators during the early stopping stage." + ) warnings.warn(msg, RuntimeWarning) def close(self): @@ -225,7 +235,7 @@ def close(self): def model_mkdir(dirname): """Make the directory for saving the model.""" if os.path.isdir(dirname): - msg = ("The directory to be created already exists {}.") + msg = "The directory to be created already exists {}." raise RuntimeError(msg.format(dirname)) os.mkdir(dirname) @@ -312,17 +322,13 @@ def model_loadobj(dirname, obj_type, d=None): n_estimators=d["n_estimators"], partial_mode=d["partial_mode"], buffer=d["buffer"], - verbose=d["verbose"] + verbose=d["verbose"], ) for est_type in ("rf", "erf"): for est_idx in range(n_estimators): - est_key = "{}-{}-{}".format( - layer_idx, est_idx, est_type - ) - dest = os.path.join( - dirname, "estimator", est_key + ".est" - ) + est_key = "{}-{}-{}".format(layer_idx, est_idx, est_type) + dest = os.path.join(dirname, "estimator", est_key + ".est") if not os.path.isfile(dest): msg = "Missing estimator in the path: {}." diff --git a/deepforest/_layer.py b/deepforest/_layer.py index 88d3676..2c80093 100644 --- a/deepforest/_layer.py +++ b/deepforest/_layer.py @@ -21,7 +21,7 @@ def _build_estimator( partial_mode=True, buffer=None, verbose=1, - sample_weight=None + sample_weight=None, ): """Private function used to fit a single estimator.""" if verbose > 1: @@ -43,7 +43,6 @@ def _build_estimator( class Layer(object): - def __init__( self, layer_idx, @@ -81,9 +80,9 @@ def _make_estimator(self, estimator_idx, estimator_name): """Make and configure a copy of the estimator.""" # Set the non-overlapped random state if self.random_state is not None: - random_state = (self.random_state + - 10 * estimator_idx + - 100 * self.layer_idx) + random_state = ( + self.random_state + 10 * estimator_idx + 100 * self.layer_idx + ) else: random_state = None @@ -93,7 +92,7 @@ def _make_estimator(self, estimator_idx, estimator_name): max_depth=self.max_depth, min_samples_leaf=self.min_samples_leaf, n_jobs=self.n_jobs, - random_state=random_state + random_state=random_state, ) return estimator @@ -170,13 +169,13 @@ def transform(self, X): for idx, (key, estimator) in enumerate(self.estimators_.items()): if self.verbose > 1: msg = "{} - Evaluating estimator = {:<5} in layer = {}" - key = key.split('-')[-1] + "_" + str(key.split('-')[-2]) + key = key.split("-")[-1] + "_" + str(key.split("-")[-2]) print(msg.format(_utils.ctime(), key, self.layer_idx)) if self.partial_mode: # Load the estimator from the buffer estimator = self.buffer.load_estimator(estimator) - left, right = self.n_classes*idx, self.n_classes*(idx+1) + left, right = self.n_classes * idx, self.n_classes * (idx + 1) X_aug[:, left:right] += estimator.transform(X) return X_aug @@ -188,13 +187,13 @@ def predict_full(self, X): for idx, (key, estimator) in enumerate(self.estimators_.items()): if self.verbose > 1: msg = "{} - Evaluating estimator = {:<5} in layer = {}" - key = key.split('-')[-1] + "_" + str(key.split('-')[-2]) + key = key.split("-")[-1] + "_" + str(key.split("-")[-2]) print(msg.format(_utils.ctime(), key, self.layer_idx)) if self.partial_mode: # Load the estimator from the buffer estimator = self.buffer.load_estimator(estimator) - left, right = self.n_classes*idx, self.n_classes*(idx+1) + left, right = self.n_classes * idx, self.n_classes * (idx + 1) pred[:, left:right] += estimator.predict(X) return pred diff --git a/deepforest/_utils.py b/deepforest/_utils.py index 5fa7d21..c985edd 100644 --- a/deepforest/_utils.py +++ b/deepforest/_utils.py @@ -36,7 +36,7 @@ def init_array(X, n_aug_features): n_samples, n_features = X.shape n_dims = n_features + n_aug_features X_middle = np.zeros((n_samples, n_dims), dtype=np.uint8) - X_middle[:, : n_features] += X + X_middle[:, :n_features] += X return X_middle @@ -59,5 +59,5 @@ def merge_array(X_middle, X_aug, n_features): def ctime(): """A formatter on current time used for printing running status.""" - ctime = '[' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] + ']' + ctime = "[" + datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + "]" return ctime diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 51113fa..2ea19fd 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -39,6 +39,7 @@ def _build_predictor( # Random Forest if predictor_name == "forest": from .forest import RandomForestClassifier + predictor = RandomForestClassifier( **_get_predictor_kwargs( predictor_kwargs, @@ -182,6 +183,7 @@ def _build_predictor( def deepforest_model_doc(header): """Decorator on obtaining documentation for deep forest models.""" + def adddoc(cls): doc = [header + "\n\n"] doc.extend([__model_doc]) @@ -193,7 +195,6 @@ def adddoc(cls): class BaseCascadeForest(BaseEstimator, metaclass=ABCMeta): - def __init__( self, n_bins=255, @@ -212,7 +213,7 @@ def __init__( partial_mode=False, n_jobs=None, random_state=None, - verbose=1 + verbose=1, ): self.n_bins = n_bins self.bin_subsample = bin_subsample @@ -272,8 +273,10 @@ def _set_layer(self, layer_idx, layer): Register a layer into the internal container with the given index.""" layer_key = "layer_{}".format(layer_idx) if layer_key in self.layers_: - msg = ("Layer with the key {} already exists in the internal" - " container.") + msg = ( + "Layer with the key {} already exists in the internal" + " container." + ) raise RuntimeError(msg.format(layer_key)) self.layers_.update({layer_key: layer}) @@ -296,8 +299,10 @@ def _set_binner(self, binner_idx, binner): Register a binner into the internal container with the given index.""" binner_key = "binner_{}".format(binner_idx) if binner_key in self.binners_: - msg = ("Binner with the key {} already exists in the internal" - " container.") + msg = ( + "Binner with the key {} already exists in the internal" + " container." + ) raise RuntimeError(msg.format(binner_key)) self.binners_.update({binner_key: binner}) @@ -319,8 +324,10 @@ def _set_n_trees(self, layer_idx): n_trees = 100 * (layer_idx + 1) return n_trees if n_trees <= 500 else 500 else: - msg = ("Invalid value for n_trees. Allowed values are integers or" - " 'auto'.") + msg = ( + "Invalid value for n_trees. Allowed values are integers or" + " 'auto'." + ) raise ValueError(msg) def _check_input(self, X, y=None): @@ -367,8 +374,10 @@ def _bin_data(self, binner, X, is_training_data=True): binning_time = toc - tic if self.verbose > 1: - msg = ("{} Binning {} data: {:.3f} MB => {:.3f} MB |" - " Elapsed = {:.3f} s") + msg = ( + "{} Binning {} data: {:.3f} MB => {:.3f} MB |" + " Elapsed = {:.3f} s" + ) print( msg.format( _utils.ctime(), @@ -461,7 +470,7 @@ def fit(self, X, y, sample_weight=None): n_bins=self.n_bins, bin_subsample=self.bin_subsample, bin_type=self.bin_type, - random_state=self.random_state + random_state=self.random_state, ) # Bin the training data @@ -487,7 +496,7 @@ def fit(self, X, y, sample_weight=None): self.buffer_, self.n_jobs, self.random_state, - self.verbose + self.verbose, ) if self.verbose > 0: @@ -508,7 +517,7 @@ def fit(self, X, y, sample_weight=None): _utils.ctime(), 0, self._repr_performance(pivot), - training_time + training_time, ) ) @@ -535,7 +544,7 @@ def fit(self, X, y, sample_weight=None): n_bins=self.n_bins, bin_subsample=self.bin_subsample, bin_type=self.bin_type, - random_state=self.random_state + random_state=self.random_state, ) X_binned_aug_train_ = self._bin_data( @@ -543,7 +552,8 @@ def fit(self, X, y, sample_weight=None): ) X_middle_train_ = _utils.merge_array( - X_middle_train_, X_binned_aug_train_, self.n_features_) + X_middle_train_, X_binned_aug_train_, self.n_features_ + ) # Build a cascade layer layer_idx = self.n_layers_ @@ -558,7 +568,7 @@ def fit(self, X, y, sample_weight=None): self.buffer_, self.n_jobs, self.random_state, - self.verbose + self.verbose, ) X_middle_train_ = self.buffer_.cache_data( @@ -571,9 +581,7 @@ def fit(self, X, y, sample_weight=None): tic = time.time() X_aug_train_ = layer_.fit_transform( - X_middle_train_, - y, - sample_weight + X_middle_train_, y, sample_weight ) toc = time.time() training_time = toc - tic @@ -587,7 +595,7 @@ def fit(self, X, y, sample_weight=None): _utils.ctime(), layer_idx, self._repr_performance(new_pivot), - training_time + training_time, ) ) @@ -617,9 +625,7 @@ def fit(self, X, y, sample_weight=None): msg = "{} Early stopping counter: {} out of {}" print( msg.format( - _utils.ctime(), - n_counter, - self.n_tolerant_rounds + _utils.ctime(), n_counter, self.n_tolerant_rounds ) ) @@ -659,7 +665,7 @@ def fit(self, X, y, sample_weight=None): n_bins=self.n_bins, bin_subsample=self.bin_subsample, bin_type=self.bin_type, - random_state=self.random_state + random_state=self.random_state, ) X_binned_aug_train_ = self._bin_data( @@ -667,7 +673,8 @@ def fit(self, X, y, sample_weight=None): ) X_middle_train_ = _utils.merge_array( - X_middle_train_, X_binned_aug_train_, self.n_features_) + X_middle_train_, X_binned_aug_train_, self.n_features_ + ) if self.verbose > 0: msg = "{} Fitting the concatenated predictor: {}" @@ -675,9 +682,7 @@ def fit(self, X, y, sample_weight=None): tic = time.time() self.predictor_.fit( - X_middle_train_, - y, - sample_weight=sample_weight + X_middle_train_, y, sample_weight=sample_weight ) toc = time.time() @@ -767,8 +772,10 @@ def load(self, dirname): # Some checks after loading if len(self.layers_) != self.n_layers_: - msg = ("The size of the loaded dictionary of layers {} does not" - " match n_layers_ {}.") + msg = ( + "The size of the loaded dictionary of layers {} does not" + " match n_layers_ {}." + ) raise RuntimeError(msg.format(len(self.layers_), self.n_layers_)) self.is_fitted_ = True @@ -785,25 +792,26 @@ def clean(self): """Implementation of the deep forest for classification.""" ) class CascadeForestClassifier(BaseCascadeForest, ClassifierMixin): - - def __init__(self, - n_bins=255, - bin_subsample=2e5, - bin_type="percentile", - max_layers=20, - n_estimators=2, - n_trees=100, - max_depth=None, - min_samples_leaf=1, - use_predictor=False, - predictor="forest", - predictor_kwargs={}, - n_tolerant_rounds=2, - delta=1e-5, - partial_mode=False, - n_jobs=None, - random_state=None, - verbose=1): + def __init__( + self, + n_bins=255, + bin_subsample=2e5, + bin_type="percentile", + max_layers=20, + n_estimators=2, + n_trees=100, + max_depth=None, + min_samples_leaf=1, + use_predictor=False, + predictor="forest", + predictor_kwargs={}, + n_tolerant_rounds=2, + delta=1e-5, + partial_mode=False, + n_jobs=None, + random_state=None, + verbose=1, + ): super().__init__( n_bins=n_bins, bin_subsample=bin_subsample, @@ -821,7 +829,8 @@ def __init__(self, partial_mode=partial_mode, n_jobs=n_jobs, random_state=random_state, - verbose=verbose) + verbose=verbose, + ) def _repr_performance(self, pivot): msg = "Val Acc = {:.3f} %" @@ -868,7 +877,8 @@ def predict_proba(self, X): binner_, X_aug_test_, is_training_data=False ) X_middle_test_ = _utils.merge_array( - X_middle_test_, X_aug_test_, self.n_features_) + X_middle_test_, X_aug_test_, self.n_features_ + ) X_aug_test_ = layer.transform(X_middle_test_) else: binner_ = self._get_binner(layer_idx) @@ -876,7 +886,8 @@ def predict_proba(self, X): binner_, X_aug_test_, is_training_data=False ) X_middle_test_ = _utils.merge_array( - X_middle_test_, X_aug_test_, self.n_features_) + X_middle_test_, X_aug_test_, self.n_features_ + ) # Skip calling the `transform` if not using the predictor if self.use_predictor: @@ -889,9 +900,11 @@ def predict_proba(self, X): binner_ = self._get_binner(self.n_layers_) X_aug_test_ = self._bin_data( - binner_, X_aug_test_, is_training_data=False) + binner_, X_aug_test_, is_training_data=False + ) X_middle_test_ = _utils.merge_array( - X_middle_test_, X_aug_test_, self.n_features_) + X_middle_test_, X_aug_test_, self.n_features_ + ) predictor = self.buffer_.load_predictor(self.predictor_) proba = predictor.predict_proba(X_middle_test_) diff --git a/deepforest/forest.py b/deepforest/forest.py index c501706..030c5d7 100644 --- a/deepforest/forest.py +++ b/deepforest/forest.py @@ -6,8 +6,7 @@ """ -__all__ = ["RandomForestClassifier", - "ExtraTreesClassifier"] +__all__ = ["RandomForestClassifier", "ExtraTreesClassifier"] import numbers from warnings import warn @@ -24,8 +23,7 @@ from sklearn.base import BaseEstimator from sklearn.base import MetaEstimatorMixin from sklearn.base import ClassifierMixin, MultiOutputMixin -from sklearn.utils import (check_random_state, - compute_sample_weight) +from sklearn.utils import check_random_state, compute_sample_weight from sklearn.exceptions import DataConversionWarning from sklearn.utils.fixes import _joblib_parallel_args from sklearn.utils.validation import check_is_fitted, _check_sample_weight @@ -92,28 +90,31 @@ def _generate_sample_mask(random_state, n_samples, n_samples_bootstrap): def _parallel_build_trees( - tree, - X, - y, - n_samples_bootstrap, - sample_weight, - out, - lock, + tree, + X, + y, + n_samples_bootstrap, + sample_weight, + out, + lock, ): """ Private function used to fit a single tree in parallel.""" n_samples = X.shape[0] - sample_mask = _generate_sample_mask(tree.random_state, n_samples, - n_samples_bootstrap) + sample_mask = _generate_sample_mask( + tree.random_state, n_samples, n_samples_bootstrap + ) # Fit the tree on the bootstrapped samples if sample_weight is not None: sample_weight = sample_weight[sample_mask] - feature, threshold, children, value = tree.fit(X[sample_mask], - y[sample_mask], - sample_weight=sample_weight, - check_input=False) + feature, threshold, children, value = tree.fit( + X[sample_mask], + y[sample_mask], + sample_weight=sample_weight, + check_input=False, + ) if not children.flags["C_CONTIGUOUS"]: children = np.ascontiguousarray(children) @@ -125,11 +126,9 @@ def _parallel_build_trees( value /= value.sum(axis=1)[:, np.newaxis] # Set the OOB predictions - oob_prediction = _C_FOREST.predict(X[~sample_mask, :], - feature, - threshold, - children, - value) + oob_prediction = _C_FOREST.predict( + X[~sample_mask, :], feature, threshold, children, value + ) with lock: out[~sample_mask, :] += oob_prediction @@ -169,7 +168,7 @@ def _set_random_states(estimator, random_state=None): random_state = check_random_state(random_state) to_set = {} for key in sorted(estimator.get_params(deep=True)): - if key == 'random_state' or key.endswith('__random_state'): + if key == "random_state" or key.endswith("__random_state"): to_set[key] = random_state.randint(np.iinfo(np.int32).max) if to_set: @@ -183,9 +182,10 @@ def _partition_estimators(n_estimators, n_jobs): n_jobs = min(effective_n_jobs(n_jobs), n_estimators) # Partition estimators between jobs - n_estimators_per_job = np.full(n_jobs, n_estimators // n_jobs, - dtype=np.int) - n_estimators_per_job[:n_estimators % n_jobs] += 1 + n_estimators_per_job = np.full( + n_jobs, n_estimators // n_jobs, dtype=np.int + ) + n_estimators_per_job[: n_estimators % n_jobs] += 1 starts = np.cumsum(n_estimators_per_job) return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist() @@ -235,8 +235,9 @@ class BaseEnsemble(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta): _required_parameters: List[str] = [] @abstractmethod - def __init__(self, base_estimator, *, n_estimators=10, - estimator_params=tuple()): + def __init__( + self, base_estimator, *, n_estimators=10, estimator_params=tuple() + ): # Set parameters self.base_estimator = base_estimator self.n_estimators = n_estimators @@ -252,12 +253,16 @@ def _validate_estimator(self, default=None): Sets the base_estimator_` attributes. """ if not isinstance(self.n_estimators, numbers.Integral): - raise ValueError("n_estimators must be an integer, " - "got {0}.".format(type(self.n_estimators))) + raise ValueError( + "n_estimators must be an integer, " + "got {0}.".format(type(self.n_estimators)) + ) if self.n_estimators <= 0: - raise ValueError("n_estimators must be greater than zero, " - "got {0}.".format(self.n_estimators)) + raise ValueError( + "n_estimators must be greater than zero, " + "got {0}.".format(self.n_estimators) + ) if self.base_estimator is not None: self.base_estimator_ = self.base_estimator @@ -274,8 +279,9 @@ def _make_estimator(self, append=True, random_state=None): sub-estimators. """ estimator = clone(self.base_estimator_) - estimator.set_params(**{p: getattr(self, p) - for p in self.estimator_params}) + estimator.set_params( + **{p: getattr(self, p) for p in self.estimator_params} + ) # Pass the inferred class information to avoid redudant finding. estimator.classes_ = self.classes_ @@ -311,19 +317,23 @@ class BaseForest(MultiOutputMixin, BaseEnsemble, metaclass=ABCMeta): """ @abstractmethod - def __init__(self, - base_estimator, - n_estimators=100, *, - estimator_params=tuple(), - n_jobs=None, - random_state=None, - verbose=0, - class_weight=None, - max_samples=None): + def __init__( + self, + base_estimator, + n_estimators=100, + *, + estimator_params=tuple(), + n_jobs=None, + random_state=None, + verbose=0, + class_weight=None, + max_samples=None + ): super().__init__( base_estimator=base_estimator, n_estimators=n_estimators, - estimator_params=estimator_params) + estimator_params=estimator_params, + ) self.n_jobs = n_jobs self.random_state = random_state @@ -382,10 +392,13 @@ def fit(self, X, y, sample_weight=None): y = np.atleast_1d(y) if y.ndim == 2 and y.shape[1] == 1: - warn("A column-vector y was passed when a 1d array was" - " expected. Please change the shape of y to " - "(n_samples,), for example using ravel().", - DataConversionWarning, stacklevel=2) + warn( + "A column-vector y was passed when a 1d array was" + " expected. Please change the shape of y to " + "(n_samples,), for example using ravel().", + DataConversionWarning, + stacklevel=2, + ) if y.ndim == 1: # reshape is necessary to preserve the data contiguity against vs @@ -400,8 +413,7 @@ def fit(self, X, y, sample_weight=None): # Get bootstrap sample size n_samples_bootstrap = _get_n_samples_bootstrap( - n_samples=X.shape[0], - max_samples=self.max_samples + n_samples=X.shape[0], max_samples=self.max_samples ) # Check parameters @@ -409,18 +421,22 @@ def fit(self, X, y, sample_weight=None): random_state = check_random_state(self.random_state) n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs) - trees = [self._make_estimator(append=False, - random_state=random_state) - for i in range(self.n_estimators)] + trees = [ + self._make_estimator(append=False, random_state=random_state) + for i in range(self.n_estimators) + ] # Pre-allocate OOB estimations - oob_decision_function = np.zeros((n_samples, - self.classes_[0].shape[0])) + oob_decision_function = np.zeros( + (n_samples, self.classes_[0].shape[0]) + ) lock = threading.Lock() - rets = Parallel(n_jobs=n_jobs, verbose=self.verbose, - **_joblib_parallel_args(prefer='threads', - require="sharedmem"))( + rets = Parallel( + n_jobs=n_jobs, + verbose=self.verbose, + **_joblib_parallel_args(prefer="threads", require="sharedmem") + )( delayed(_parallel_build_trees)( t, X, @@ -428,8 +444,10 @@ def fit(self, X, y, sample_weight=None): n_samples_bootstrap, sample_weight, oob_decision_function, - lock) - for i, t in enumerate(trees)) + lock, + ) + for i, t in enumerate(trees) + ) # Collect newly grown trees for feature, threshold, children, value in rets: @@ -443,12 +461,16 @@ def fit(self, X, y, sample_weight=None): # Check the OOB predictions if (oob_decision_function.sum(axis=1) == 0).any(): - warn("Some inputs do not have OOB predictions. " - "This probably means too few trees were used " - "to compute any reliable oob predictions.") + warn( + "Some inputs do not have OOB predictions. " + "This probably means too few trees were used " + "to compute any reliable oob predictions." + ) - prediction = (oob_decision_function / - oob_decision_function.sum(axis=1)[:, np.newaxis]) + prediction = ( + oob_decision_function + / oob_decision_function.sum(axis=1)[:, np.newaxis] + ) self.oob_decision_function_ = prediction @@ -473,15 +495,18 @@ class ForestClassifier(ClassifierMixin, BaseForest, metaclass=ABCMeta): """ @abstractmethod - def __init__(self, - base_estimator, - n_estimators=100, *, - estimator_params=tuple(), - n_jobs=None, - random_state=None, - verbose=0, - class_weight=None, - max_samples=None): + def __init__( + self, + base_estimator, + n_estimators=100, + *, + estimator_params=tuple(), + n_jobs=None, + random_state=None, + verbose=0, + class_weight=None, + max_samples=None + ): super().__init__( base_estimator, n_estimators=n_estimators, @@ -490,7 +515,8 @@ def __init__(self, random_state=random_state, verbose=verbose, class_weight=class_weight, - max_samples=max_samples) + max_samples=max_samples, + ) def _validate_y_class_weight(self, y): @@ -505,29 +531,31 @@ def _validate_y_class_weight(self, y): y_store_unique_indices = np.zeros(y.shape, dtype=np.int) for k in range(self.n_outputs_): - classes_k, y_store_unique_indices[:, k] = \ - np.unique(y[:, k], return_inverse=True) + classes_k, y_store_unique_indices[:, k] = np.unique( + y[:, k], return_inverse=True + ) self.classes_.append(classes_k) self.n_classes_.append(classes_k.shape[0]) y = y_store_unique_indices if self.class_weight is not None: - valid_presets = ('balanced', 'balanced_subsample') + valid_presets = ("balanced", "balanced_subsample") if isinstance(self.class_weight, str): if self.class_weight not in valid_presets: - raise ValueError('Valid presets for class_weight include ' - '"balanced" and "balanced_subsample".' - 'Given "%s".' - % self.class_weight) + raise ValueError( + "Valid presets for class_weight include " + '"balanced" and "balanced_subsample".' + 'Given "%s".' % self.class_weight + ) - if (self.class_weight != 'balanced_subsample' or - not self.bootstrap): + if self.class_weight != "balanced_subsample" or not self.bootstrap: if self.class_weight == "balanced_subsample": class_weight = "balanced" else: class_weight = self.class_weight - expanded_class_weight = compute_sample_weight(class_weight, - y_original) + expanded_class_weight = compute_sample_weight( + class_weight, y_original + ) return y, expanded_class_weight @@ -542,11 +570,16 @@ def predict_proba(self, X): n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs) # Avoid storing the output of every estimator by summing them here - all_proba = [np.zeros((X.shape[0], j), dtype=np.float64) - for j in np.atleast_1d(self.n_classes_)] + all_proba = [ + np.zeros((X.shape[0], j), dtype=np.float64) + for j in np.atleast_1d(self.n_classes_) + ] lock = threading.Lock() - Parallel(n_jobs=n_jobs, verbose=self.verbose, - **_joblib_parallel_args(require="sharedmem"))( + Parallel( + n_jobs=n_jobs, + verbose=self.verbose, + **_joblib_parallel_args(require="sharedmem") + )( delayed(_accumulate_prediction)( self.features[i], self.thresholds[i], @@ -554,8 +587,10 @@ def predict_proba(self, X): self.values[i], X, all_proba, - lock) - for i in range(self.n_estimators)) + lock, + ) + for i in range(self.n_estimators) + ) for proba in all_proba: proba /= len(self.features) @@ -567,35 +602,45 @@ def predict_proba(self, X): class RandomForestClassifier(ForestClassifier): - @_deprecate_positional_args - def __init__(self, - n_estimators=100, *, - criterion="gini", - max_depth=None, - min_samples_split=2, - min_samples_leaf=1, - min_weight_fraction_leaf=0., - max_features="sqrt", - min_impurity_decrease=0., - min_impurity_split=None, - n_jobs=None, - random_state=None, - verbose=0, - class_weight=None, - max_samples=None): + def __init__( + self, + n_estimators=100, + *, + criterion="gini", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features="sqrt", + min_impurity_decrease=0.0, + min_impurity_split=None, + n_jobs=None, + random_state=None, + verbose=0, + class_weight=None, + max_samples=None + ): super().__init__( base_estimator=DecisionTreeClassifier(), n_estimators=n_estimators, - estimator_params=("criterion", "max_depth", "min_samples_split", - "min_samples_leaf", "min_weight_fraction_leaf", - "max_features", "min_impurity_decrease", - "min_impurity_split", "random_state"), + estimator_params=( + "criterion", + "max_depth", + "min_samples_split", + "min_samples_leaf", + "min_weight_fraction_leaf", + "max_features", + "min_impurity_decrease", + "min_impurity_split", + "random_state", + ), n_jobs=n_jobs, random_state=random_state, verbose=verbose, class_weight=class_weight, - max_samples=max_samples) + max_samples=max_samples, + ) self.criterion = criterion self.max_depth = max_depth @@ -608,35 +653,45 @@ def __init__(self, class ExtraTreesClassifier(ForestClassifier): - @_deprecate_positional_args - def __init__(self, - n_estimators=100, *, - criterion="gini", - max_depth=None, - min_samples_split=2, - min_samples_leaf=1, - min_weight_fraction_leaf=0., - max_features="sqrt", - min_impurity_decrease=0., - min_impurity_split=None, - n_jobs=None, - random_state=None, - verbose=0, - class_weight=None, - max_samples=None): + def __init__( + self, + n_estimators=100, + *, + criterion="gini", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features="sqrt", + min_impurity_decrease=0.0, + min_impurity_split=None, + n_jobs=None, + random_state=None, + verbose=0, + class_weight=None, + max_samples=None + ): super().__init__( base_estimator=ExtraTreeClassifier(), n_estimators=n_estimators, - estimator_params=("criterion", "max_depth", "min_samples_split", - "min_samples_leaf", "min_weight_fraction_leaf", - "max_features", "min_impurity_decrease", - "min_impurity_split", "random_state"), + estimator_params=( + "criterion", + "max_depth", + "min_samples_split", + "min_samples_leaf", + "min_weight_fraction_leaf", + "max_features", + "min_impurity_decrease", + "min_impurity_split", + "random_state", + ), n_jobs=n_jobs, random_state=random_state, verbose=verbose, class_weight=class_weight, - max_samples=max_samples) + max_samples=max_samples, + ) self.criterion = criterion self.max_depth = max_depth diff --git a/deepforest/setup.py b/deepforest/setup.py index 6b516e3..86fc494 100644 --- a/deepforest/setup.py +++ b/deepforest/setup.py @@ -16,20 +16,26 @@ def configuration(parent_package="", top_path=None): config = Configuration("deepforest", parent_package, top_path) config.add_subpackage("tree") - config.add_extension("_forest", - sources=["_forest.pyx"], - include_dirs=[numpy.get_include()], - libraries=libraries, - extra_compile_args=["-O3"]) - - config.add_extension("_cutils", - sources=["_cutils.pyx"], - include_dirs=[numpy.get_include()], - libraries=libraries, - extra_compile_args=["-O3"]) - - msg = ("Please install cython with a version >= {} in order to build a" - " deepforest development version.") + config.add_extension( + "_forest", + sources=["_forest.pyx"], + include_dirs=[numpy.get_include()], + libraries=libraries, + extra_compile_args=["-O3"], + ) + + config.add_extension( + "_cutils", + sources=["_cutils.pyx"], + include_dirs=[numpy.get_include()], + libraries=libraries, + extra_compile_args=["-O3"], + ) + + msg = ( + "Please install cython with a version >= {} in order to build a" + " deepforest development version." + ) msg = msg.format(CYTHON_MIN_VERSION) try: @@ -50,4 +56,5 @@ def configuration(parent_package="", top_path=None): if __name__ == "__main__": from numpy.distutils.core import setup + setup(**configuration().todict()) diff --git a/deepforest/tree/__init__.py b/deepforest/tree/__init__.py index 3240bf8..3fdfb3c 100644 --- a/deepforest/tree/__init__.py +++ b/deepforest/tree/__init__.py @@ -3,6 +3,4 @@ from .tree import ExtraTreeClassifier -__all__ = ["BaseDecisionTree", - "DecisionTreeClassifier", - "ExtraTreeClassifier"] +__all__ = ["BaseDecisionTree", "DecisionTreeClassifier", "ExtraTreeClassifier"] diff --git a/deepforest/tree/setup.py b/deepforest/tree/setup.py index 2465b0c..12f89ae 100644 --- a/deepforest/tree/setup.py +++ b/deepforest/tree/setup.py @@ -6,32 +6,41 @@ def configuration(parent_package="", top_path=None): config = Configuration("tree", parent_package, top_path) libraries = [] - if os.name == 'posix': - libraries.append('m') - config.add_extension("_tree", - sources=["_tree.pyx"], - include_dirs=[numpy.get_include()], - libraries=libraries, - extra_compile_args=["-O3"]) - config.add_extension("_splitter", - sources=["_splitter.pyx"], - include_dirs=[numpy.get_include()], - libraries=libraries, - extra_compile_args=["-O3"]) - config.add_extension("_criterion", - sources=["_criterion.pyx"], - include_dirs=[numpy.get_include()], - libraries=libraries, - extra_compile_args=["-O3"]) - config.add_extension("_utils", - sources=["_utils.pyx"], - include_dirs=[numpy.get_include()], - libraries=libraries, - extra_compile_args=["-O3"]) + if os.name == "posix": + libraries.append("m") + config.add_extension( + "_tree", + sources=["_tree.pyx"], + include_dirs=[numpy.get_include()], + libraries=libraries, + extra_compile_args=["-O3"], + ) + config.add_extension( + "_splitter", + sources=["_splitter.pyx"], + include_dirs=[numpy.get_include()], + libraries=libraries, + extra_compile_args=["-O3"], + ) + config.add_extension( + "_criterion", + sources=["_criterion.pyx"], + include_dirs=[numpy.get_include()], + libraries=libraries, + extra_compile_args=["-O3"], + ) + config.add_extension( + "_utils", + sources=["_utils.pyx"], + include_dirs=[numpy.get_include()], + libraries=libraries, + extra_compile_args=["-O3"], + ) return config if __name__ == "__main__": from numpy.distutils.core import setup + setup(**configuration().todict()) diff --git a/deepforest/tree/tree.py b/deepforest/tree/tree.py index c54ff3e..c73792b 100644 --- a/deepforest/tree/tree.py +++ b/deepforest/tree/tree.py @@ -6,8 +6,7 @@ """ -__all__ = ["DecisionTreeClassifier", - "ExtraTreeClassifier"] +__all__ = ["DecisionTreeClassifier", "ExtraTreeClassifier"] import numbers import warnings @@ -45,8 +44,10 @@ CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy} -DENSE_SPLITTERS = {"best": _splitter.BestSplitter, - "random": _splitter.RandomSplitter} +DENSE_SPLITTERS = { + "best": _splitter.BestSplitter, + "random": _splitter.RandomSplitter, +} # ============================================================================= # Base decision tree @@ -59,21 +60,25 @@ class BaseDecisionTree(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta): Warning: This class should not be used directly. Use derived classes instead. """ + @abstractmethod @_deprecate_positional_args - def __init__(self, *, - criterion, - splitter, - max_depth, - min_samples_split, - min_samples_leaf, - min_weight_fraction_leaf, - max_features, - random_state, - min_impurity_decrease, - min_impurity_split, - class_weight=None, - presort='deprecated'): + def __init__( + self, + *, + criterion, + splitter, + max_depth, + min_samples_split, + min_samples_leaf, + min_weight_fraction_leaf, + max_features, + random_state, + min_impurity_decrease, + min_impurity_split, + class_weight=None, + presort="deprecated" + ): self.criterion = criterion self.splitter = splitter self.max_depth = max_depth @@ -125,13 +130,14 @@ def n_internals(self): check_is_fitted(self) return self.tree_.n_internals - def fit(self, X, y, sample_weight=None, check_input=True, - X_idx_sorted=None): + def fit( + self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None + ): random_state = check_random_state(self.random_state) if X.dtype != np.uint8: - msg = 'The dtype of `X` should be `np.uint8`, but got {} instead.' + msg = "The dtype of `X` should be `np.uint8`, but got {} instead." raise RuntimeError(msg.format(X.dtype)) if check_input: @@ -140,9 +146,9 @@ def fit(self, X, y, sample_weight=None, check_input=True, # csr. check_X_params = dict(dtype=DTYPE, accept_sparse="csc") check_y_params = dict(ensure_2d=False, dtype=None) - X, y = self._validate_data(X, y, - validate_separately=(check_X_params, - check_y_params)) + X, y = self._validate_data( + X, y, validate_separately=(check_X_params, check_y_params) + ) # Determine output settings n_samples, self.n_features_ = X.shape @@ -170,15 +176,17 @@ def fit(self, X, y, sample_weight=None, check_input=True, y_encoded = np.zeros(y.shape, dtype=np.int) for k in range(self.n_outputs_): - classes_k, y_encoded[:, k] = np.unique(y[:, k], - return_inverse=True) + classes_k, y_encoded[:, k] = np.unique( + y[:, k], return_inverse=True + ) self.classes_.append(classes_k) self.n_classes_.append(classes_k.shape[0]) y = y_encoded if self.class_weight is not None: expanded_class_weight = compute_sample_weight( - self.class_weight, y_original) + self.class_weight, y_original + ) self.n_classes_ = np.array(self.n_classes_, dtype=np.int32) @@ -186,35 +194,42 @@ def fit(self, X, y, sample_weight=None, check_input=True, y = np.ascontiguousarray(y, dtype=DOUBLE) # Check parameters - max_depth = (np.iinfo(np.int32).max if self.max_depth is None - else self.max_depth) + max_depth = ( + np.iinfo(np.int32).max + if self.max_depth is None + else self.max_depth + ) if isinstance(self.min_samples_leaf, numbers.Integral): if not 1 <= self.min_samples_leaf: - raise ValueError("min_samples_leaf must be at least 1 " - "or in (0, 0.5], got %s" - % self.min_samples_leaf) + raise ValueError( + "min_samples_leaf must be at least 1 " + "or in (0, 0.5], got %s" % self.min_samples_leaf + ) min_samples_leaf = self.min_samples_leaf else: # float - if not 0. < self.min_samples_leaf <= 0.5: - raise ValueError("min_samples_leaf must be at least 1 " - "or in (0, 0.5], got %s" - % self.min_samples_leaf) + if not 0.0 < self.min_samples_leaf <= 0.5: + raise ValueError( + "min_samples_leaf must be at least 1 " + "or in (0, 0.5], got %s" % self.min_samples_leaf + ) min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples)) if isinstance(self.min_samples_split, numbers.Integral): if not 2 <= self.min_samples_split: - raise ValueError("min_samples_split must be an integer " - "greater than 1 or a float in (0.0, 1.0]; " - "got the integer %s" - % self.min_samples_split) + raise ValueError( + "min_samples_split must be an integer " + "greater than 1 or a float in (0.0, 1.0]; " + "got the integer %s" % self.min_samples_split + ) min_samples_split = self.min_samples_split else: # float - if not 0. < self.min_samples_split <= 1.: - raise ValueError("min_samples_split must be an integer " - "greater than 1 or a float in (0.0, 1.0]; " - "got the float %s" - % self.min_samples_split) + if not 0.0 < self.min_samples_split <= 1.0: + raise ValueError( + "min_samples_split must be an integer " + "greater than 1 or a float in (0.0, 1.0]; " + "got the float %s" % self.min_samples_split + ) min_samples_split = int(ceil(self.min_samples_split * n_samples)) min_samples_split = max(2, min_samples_split) @@ -226,25 +241,30 @@ def fit(self, X, y, sample_weight=None, check_input=True, elif self.max_features == "log2": max_features = max(1, int(np.log2(self.n_features_))) else: - raise ValueError("Invalid value for max_features. " - "Allowed string values are 'auto', " - "'sqrt' or 'log2'.") + raise ValueError( + "Invalid value for max_features. " + "Allowed string values are 'auto', " + "'sqrt' or 'log2'." + ) elif self.max_features is None: max_features = self.n_features_ elif isinstance(self.max_features, numbers.Integral): max_features = self.max_features else: # float if self.max_features > 0.0: - max_features = max(1, - int(self.max_features * self.n_features_)) + max_features = max( + 1, int(self.max_features * self.n_features_) + ) else: max_features = 0 self.max_features_ = max_features if len(y) != n_samples: - raise ValueError("Number of labels=%d does not match " - "number of samples=%d" % (len(y), n_samples)) + raise ValueError( + "Number of labels=%d does not match " + "number of samples=%d" % (len(y), n_samples) + ) if not 0 <= self.min_weight_fraction_leaf <= 0.5: raise ValueError("min_weight_fraction_leaf must in [0, 0.5]") if max_depth <= 0: @@ -263,61 +283,73 @@ def fit(self, X, y, sample_weight=None, check_input=True, # Set min_weight_leaf from min_weight_fraction_leaf if sample_weight is None: - min_weight_leaf = (self.min_weight_fraction_leaf * - n_samples) + min_weight_leaf = self.min_weight_fraction_leaf * n_samples else: - min_weight_leaf = (self.min_weight_fraction_leaf * - np.sum(sample_weight)) + min_weight_leaf = self.min_weight_fraction_leaf * np.sum( + sample_weight + ) min_impurity_split = self.min_impurity_split if min_impurity_split is not None: - warnings.warn("The min_impurity_split parameter is deprecated. " - "Its default value has changed from 1e-7 to 0 in " - "version 0.23, and it will be removed in 0.25. " - "Use the min_impurity_decrease parameter instead.", - FutureWarning) - - if min_impurity_split < 0.: - raise ValueError("min_impurity_split must be greater than " - "or equal to 0") + warnings.warn( + "The min_impurity_split parameter is deprecated. " + "Its default value has changed from 1e-7 to 0 in " + "version 0.23, and it will be removed in 0.25. " + "Use the min_impurity_decrease parameter instead.", + FutureWarning, + ) + + if min_impurity_split < 0.0: + raise ValueError( + "min_impurity_split must be greater than " "or equal to 0" + ) else: min_impurity_split = 0 - if self.min_impurity_decrease < 0.: - raise ValueError("min_impurity_decrease must be greater than " - "or equal to 0") + if self.min_impurity_decrease < 0.0: + raise ValueError( + "min_impurity_decrease must be greater than " "or equal to 0" + ) - if self.presort != 'deprecated': - warnings.warn("The parameter 'presort' is deprecated and has no " - "effect. It will be removed in v0.24. You can " - "suppress this warning by not passing any value " - "to the 'presort' parameter.", - FutureWarning) + if self.presort != "deprecated": + warnings.warn( + "The parameter 'presort' is deprecated and has no " + "effect. It will be removed in v0.24. You can " + "suppress this warning by not passing any value " + "to the 'presort' parameter.", + FutureWarning, + ) # Build tree criterion = self.criterion if not isinstance(criterion, Criterion): - criterion = CRITERIA_CLF[self.criterion](self.n_outputs_, - self.n_classes_) + criterion = CRITERIA_CLF[self.criterion]( + self.n_outputs_, self.n_classes_ + ) SPLITTERS = DENSE_SPLITTERS splitter = self.splitter if not isinstance(self.splitter, Splitter): - splitter = SPLITTERS[self.splitter](criterion, - self.max_features_, - min_samples_leaf, - min_weight_leaf, - random_state) + splitter = SPLITTERS[self.splitter]( + criterion, + self.max_features_, + min_samples_leaf, + min_weight_leaf, + random_state, + ) self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_) - builder = DepthFirstTreeBuilder(splitter, min_samples_split, - min_samples_leaf, - min_weight_leaf, - max_depth, - self.min_impurity_decrease, - min_impurity_split) + builder = DepthFirstTreeBuilder( + splitter, + min_samples_split, + min_samples_leaf, + min_weight_leaf, + max_depth, + self.min_impurity_decrease, + min_impurity_split, + ) builder.build(self.tree_, X, y, sample_weight, X_idx_sorted) @@ -328,8 +360,9 @@ def fit(self, X, y, sample_weight=None, check_input=True, # Only return the essential data for using a tree for prediction feature = self.tree_.feature threshold = self.tree_.threshold - children = np.vstack((self.tree_.children_left, - self.tree_.children_right)).T + children = np.vstack( + (self.tree_.children_left, self.tree_.children_right) + ).T value = self.tree_.value return feature, threshold, children, value @@ -341,10 +374,11 @@ def _validate_X_predict(self, X, check_input): n_features = X.shape[1] if self.n_features_ != n_features: - raise ValueError("Number of features of the model must " - "match the input. Model n_features is %s and " - "input n_features is %s " - % (self.n_features_, n_features)) + raise ValueError( + "Number of features of the model must " + "match the input. Model n_features is %s and " + "input n_features is %s " % (self.n_features_, n_features) + ) return X @@ -379,21 +413,23 @@ def predict(self, X, check_input=True): class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree): - @_deprecate_positional_args - def __init__(self, *, - criterion="gini", - splitter="best", - max_depth=None, - min_samples_split=2, - min_samples_leaf=1, - min_weight_fraction_leaf=0., - max_features=None, - random_state=None, - min_impurity_decrease=0., - min_impurity_split=None, - class_weight=None, - presort='deprecated'): + def __init__( + self, + *, + criterion="gini", + splitter="best", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=None, + random_state=None, + min_impurity_decrease=0.0, + min_impurity_split=None, + class_weight=None, + presort="deprecated" + ): super().__init__( criterion=criterion, @@ -407,16 +443,20 @@ def __init__(self, *, random_state=random_state, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, - presort=presort) + presort=presort, + ) - def fit(self, X, y, sample_weight=None, check_input=True, - X_idx_sorted=None): + def fit( + self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None + ): return super().fit( - X, y, + X, + y, sample_weight=sample_weight, check_input=check_input, - X_idx_sorted=X_idx_sorted) + X_idx_sorted=X_idx_sorted, + ) def predict_proba(self, X, check_input=True): @@ -424,7 +464,7 @@ def predict_proba(self, X, check_input=True): X = self._validate_X_predict(X, check_input) proba = self.tree_.predict(X) - proba = proba[:, :self.n_classes_] + proba = proba[:, : self.n_classes_] normalizer = proba.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba /= normalizer @@ -433,20 +473,22 @@ def predict_proba(self, X, check_input=True): class ExtraTreeClassifier(DecisionTreeClassifier): - @_deprecate_positional_args - def __init__(self, *, - criterion="gini", - splitter="random", - max_depth=None, - min_samples_split=2, - min_samples_leaf=1, - min_weight_fraction_leaf=0., - max_features="auto", - random_state=None, - min_impurity_decrease=0., - min_impurity_split=None, - class_weight=None): + def __init__( + self, + *, + criterion="gini", + splitter="random", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features="auto", + random_state=None, + min_impurity_decrease=0.0, + min_impurity_split=None, + class_weight=None + ): super().__init__( criterion=criterion, @@ -459,4 +501,5 @@ def __init__(self, *, class_weight=class_weight, min_impurity_decrease=min_impurity_decrease, min_impurity_split=min_impurity_split, - random_state=random_state) + random_state=random_state, + ) diff --git a/pyproject.toml b/pyproject.toml index 1e8ebdd..c206313 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,4 +5,21 @@ requires = [ "Cython>=0.28.5", "numpy>=1.13.3,<1.20.0", "scipy>=0.19.1" -] \ No newline at end of file +] +[tool.black] +line-length = 79 +include = '\.pyi?$' +exclude = ''' +/( + \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + | docs +)/ +''' diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..e58b921 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +pytest +pre-commit diff --git a/setup.py b/setup.py index da7fa0e..01b64f2 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,5 @@ import os import sys -import numpy -import setuptools from setuptools import find_packages from numpy.distutils.core import setup @@ -38,36 +36,38 @@ def configuration(parent_package="", top_path=None): os.chdir(local_path) sys.path.insert(0, local_path) - setup(configuration=configuration, - name=DISTNAME, - maintainer=MAINTAINER, - maintainer_email=MAINTAINER_EMAIL, - packages=find_packages(), - include_package_data=True, - description=DESCRIPTION, - url=URL, - version=VERSION, - long_description=LONG_DESCRIPTION, - zip_safe=False, - classifiers=[ - "Intended Audience :: Science/Research", - "Intended Audience :: Developers", - "Programming Language :: C", - "Programming Language :: Python", - "Topic :: Software Development", - "Topic :: Scientific/Engineering", - "Operating System :: Microsoft :: Windows", - "Operating System :: Unix", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - ], - python_requires=">=3.6", - install_requires=[ - "numpy>=1.13.3,<1.20.0", - "scipy>=0.19.1", - "joblib>=0.11", - "scikit-learn>=0.22", - ], - setup_requires=["cython"]) + setup( + configuration=configuration, + name=DISTNAME, + maintainer=MAINTAINER, + maintainer_email=MAINTAINER_EMAIL, + packages=find_packages(), + include_package_data=True, + description=DESCRIPTION, + url=URL, + version=VERSION, + long_description=LONG_DESCRIPTION, + zip_safe=False, + classifiers=[ + "Intended Audience :: Science/Research", + "Intended Audience :: Developers", + "Programming Language :: C", + "Programming Language :: Python", + "Topic :: Software Development", + "Topic :: Scientific/Engineering", + "Operating System :: Microsoft :: Windows", + "Operating System :: Unix", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + ], + python_requires=">=3.6", + install_requires=[ + "numpy>=1.13.3,<1.20.0", + "scipy>=0.19.1", + "joblib>=0.11", + "scikit-learn>=0.22", + ], + setup_requires=["cython"], + ) diff --git a/tests/test_binner.py b/tests/test_binner.py index cb1a0c9..398f965 100644 --- a/tests/test_binner.py +++ b/tests/test_binner.py @@ -3,16 +3,15 @@ from numpy.testing import assert_allclose import pytest -from deepforest._binner import ( - Binner, - _find_binning_thresholds_per_feature -) +from deepforest._binner import Binner, _find_binning_thresholds_per_feature -kwargs = {"n_bins": 255, - "bin_subsample": 2e5, - "bin_type": "percentile", - "random_state": 0} +kwargs = { + "n_bins": 255, + "bin_subsample": 2e5, + "bin_type": "percentile", + "random_state": 0, +} def test_find_binning_thresholds_regular_data(): @@ -26,14 +25,14 @@ def test_find_binning_thresholds_regular_data(): assert_allclose(bin_thresholds, [2, 4, 6, 8]) # Interval - bin_thresholds = _find_binning_thresholds_per_feature(data, - n_bins=10, - bin_type="interval") + bin_thresholds = _find_binning_thresholds_per_feature( + data, n_bins=10, bin_type="interval" + ) assert_allclose(bin_thresholds, [1, 2, 3, 4, 5, 6, 7, 8, 9]) - bin_thresholds = _find_binning_thresholds_per_feature(data, - n_bins=5, - bin_type="interval") + bin_thresholds = _find_binning_thresholds_per_feature( + data, n_bins=5, bin_type="interval" + ) assert_allclose(bin_thresholds, [2, 4, 6, 8]) @@ -42,9 +41,9 @@ def test_find_binning_thresholds_invalid_binner_type(): err_msg = "Unknown binning type: unknown." with pytest.raises(ValueError, match=err_msg): - _find_binning_thresholds_per_feature(data, - n_bins=10, - bin_type="unknown") + _find_binning_thresholds_per_feature( + data, n_bins=10, bin_type="unknown" + ) def test_find_binning_thresholds_invalid_data_shape(): @@ -55,10 +54,14 @@ def test_find_binning_thresholds_invalid_data_shape(): assert "Per-feature data should be of the shape" in str(execinfo.value) -@pytest.mark.parametrize('param', - [(0, {"n_bins": 1}), - (1, {"bin_subsample": 0}), - (2, {"bin_type": "unknown"})]) +@pytest.mark.parametrize( + "param", + [ + (0, {"n_bins": 1}), + (1, {"bin_subsample": 0}), + (2, {"bin_type": "unknown"}), + ], +) def test_binner_invalid_params(param): data = np.linspace(0, 10, 1001) case_kwargs = copy.deepcopy(kwargs) diff --git a/tests/test_buffer.py b/tests/test_buffer.py index f750f25..1d918f9 100644 --- a/tests/test_buffer.py +++ b/tests/test_buffer.py @@ -5,11 +5,13 @@ from deepforest import _io as io -open_buffer = io.Buffer(use_buffer=True, - buffer_dir="./", - store_est=True, - store_pred=True, - store_data=True) +open_buffer = io.Buffer( + use_buffer=True, + buffer_dir="./", + store_est=True, + store_pred=True, + store_data=True, +) close_buffer = io.Buffer(use_buffer=False) @@ -39,13 +41,19 @@ def test_store_data_open_buffer(): layer_idx = 0 ret = open_buffer.cache_data(layer_idx, X, is_training_data=True) assert isinstance(ret, np.memmap) - assert os.path.exists(os.path.join( - open_buffer.data_dir_, "joblib_train_{}.mmap".format(layer_idx))) + assert os.path.exists( + os.path.join( + open_buffer.data_dir_, "joblib_train_{}.mmap".format(layer_idx) + ) + ) ret = open_buffer.cache_data(layer_idx, X, is_training_data=False) assert isinstance(ret, np.memmap) - assert os.path.exists(os.path.join( - open_buffer.data_dir_, "joblib_test_{}.mmap".format(layer_idx))) + assert os.path.exists( + os.path.join( + open_buffer.data_dir_, "joblib_test_{}.mmap".format(layer_idx) + ) + ) def test_load_estimator_missing(): diff --git a/tests/test_forest.py b/tests/test_forest.py index 8c6eba4..ab09d71 100644 --- a/tests/test_forest.py +++ b/tests/test_forest.py @@ -7,12 +7,12 @@ # Load utils from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper from sklearn.datasets import load_iris, load_wine -from sklearn.ensemble._forest import (_get_n_samples_bootstrap as - sklearn_get_n_samples_bootstrap) +from sklearn.ensemble._forest import ( + _get_n_samples_bootstrap as sklearn_get_n_samples_bootstrap, +) -@pytest.mark.parametrize("max_samples", - [0.42, 42, None]) +@pytest.mark.parametrize("max_samples", [0.42, 42, None]) def test_n_samples_bootstrap(max_samples): n_samples = 420 actual = _get_n_samples_bootstrap(n_samples, max_samples) @@ -43,8 +43,9 @@ def test_n_samples_bootstrap_invalid_type(): n_samples = 42 max_samples = "42" - err_msg = ("`max_samples` should be int or float, but got type" - " ''") + err_msg = ( + "`max_samples` should be int or float, but got type" " ''" + ) with pytest.raises(TypeError, match=err_msg): _get_n_samples_bootstrap(n_samples, max_samples) @@ -62,15 +63,17 @@ def test_forest_workflow(load_func): X_binned = binner.fit_transform(X) # Random Forest - model = RandomForestClassifier(n_estimators=n_estimators, - random_state=random_state) + model = RandomForestClassifier( + n_estimators=n_estimators, random_state=random_state + ) model.fit(X_binned, y) model.predict(X_binned) # Extremely Random Forest - model = ExtraTreesClassifier(n_estimators=n_estimators, - random_state=random_state) + model = ExtraTreesClassifier( + n_estimators=n_estimators, random_state=random_state + ) model.fit(X_binned, y) model.predict(X_binned) diff --git a/tests/test_io.py b/tests/test_io.py index 74756a8..b9179b3 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -28,8 +28,9 @@ def test_mkdir_already_exist(): def test_model_saveobj_not_exist(): - err_msg = ("Cannot find the target directory: ./tmp." - " Please create it first.") + err_msg = ( + "Cannot find the target directory: ./tmp." " Please create it first." + ) with pytest.raises(RuntimeError, match=err_msg): io.model_saveobj(save_dir, "param", None) diff --git a/tests/test_model.py b/tests/test_model.py index a1fcd3e..d6d8de8 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -16,40 +16,45 @@ # Load data X, y = load_iris(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split( - X, y, test_size=0.42, random_state=42) + X, y, test_size=0.42, random_state=42 +) # Parameters -toy_kwargs = {"n_bins": 10, - "bin_subsample": 2e5, - "max_layers": 10, - "n_estimators": 1, - "n_trees": 100, - "max_depth": 3, - "min_samples_leaf": 1, - "use_predictor": True, - "predictor": "forest", - "predictor_kwargs": {}, - "n_tolerant_rounds": 2, - "delta": 1e-5, - "n_jobs": -1, - "random_state": 0, - "verbose": 2} - -kwargs = {"n_bins": 255, - "bin_subsample": 2e5, - "max_layers": 10, - "n_estimators": 2, - "n_trees": 100, - "max_depth": None, - "min_samples_leaf": 1, - "use_predictor": True, - "predictor": "forest", - "predictor_kwargs": {}, - "n_tolerant_rounds": 2, - "delta": 1e-5, - "n_jobs": -1, - "random_state": 0, - "verbose": 2} +toy_kwargs = { + "n_bins": 10, + "bin_subsample": 2e5, + "max_layers": 10, + "n_estimators": 1, + "n_trees": 100, + "max_depth": 3, + "min_samples_leaf": 1, + "use_predictor": True, + "predictor": "forest", + "predictor_kwargs": {}, + "n_tolerant_rounds": 2, + "delta": 1e-5, + "n_jobs": -1, + "random_state": 0, + "verbose": 2, +} + +kwargs = { + "n_bins": 255, + "bin_subsample": 2e5, + "max_layers": 10, + "n_estimators": 2, + "n_trees": 100, + "max_depth": None, + "min_samples_leaf": 1, + "use_predictor": True, + "predictor": "forest", + "predictor_kwargs": {}, + "n_tolerant_rounds": 2, + "delta": 1e-5, + "n_jobs": -1, + "random_state": 0, + "verbose": 2, +} @pytest.mark.parametrize( @@ -152,7 +157,12 @@ def test_model_sample_weight(): y_pred_skewed_sample_weight = model.predict(X_test) # Make sure the different predictions with None and equal sample_weight - assert_raises(AssertionError, assert_array_equal, y_pred_skewed_sample_weight, y_pred_equal_sample_weight) + assert_raises( + AssertionError, + assert_array_equal, + y_pred_skewed_sample_weight, + y_pred_equal_sample_weight, + ) model.clean() # clear the buffer @@ -183,10 +193,14 @@ def test_model_workflow_in_memory(): shutil.rmtree(save_dir) -@pytest.mark.parametrize('param', - [(0, {"max_layers": 0}), - (1, {"n_tolerant_rounds": 0}), - (2, {"delta": -1})]) +@pytest.mark.parametrize( + "param", + [ + (0, {"max_layers": 0}), + (1, {"n_tolerant_rounds": 0}), + (2, {"delta": -1}), + ], +) def test_model_invalid_training_params(param): case_kwargs = copy.deepcopy(toy_kwargs) case_kwargs.update(param[1]) @@ -204,18 +218,16 @@ def test_model_invalid_training_params(param): assert "delta " in str(excinfo.value) -@pytest.mark.parametrize('predictor', ['forest', 'xgboost', 'lightgbm']) +@pytest.mark.parametrize("predictor", ["forest", "xgboost", "lightgbm"]) def test_predictor_normal(predictor): - deepforest.cascade._build_predictor(predictor, - n_estimators=1, - n_outputs=2) + deepforest.cascade._build_predictor(predictor, n_estimators=1, n_outputs=2) def test_predictor_unknown(): with pytest.raises(NotImplementedError) as excinfo: - deepforest.cascade._build_predictor("unknown", - n_estimators=1, - n_outputs=2) + deepforest.cascade._build_predictor( + "unknown", n_estimators=1, n_outputs=2 + ) assert "name of the predictor should be one of" in str(excinfo.value) From 5dc09067f9910185349ad4de3bd6dad2c94a7a1c Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Fri, 5 Feb 2021 17:13:15 +0800 Subject: [PATCH 10/94] [DOC] Fix up typos --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index a69a143..06e9122 100644 --- a/README.rst +++ b/README.rst @@ -18,7 +18,7 @@ Deep Forest (DF) 21 .. |pypi| image:: https://img.shields.io/pypi/v/deep-forest?color=blue .. _pypi: https://pypi.org/project/deep-forest/ -.. |style| image;: https://img.shields.io/badge/code%20style-black-000000.svg +.. |style| image:: https://img.shields.io/badge/code%20style-black-000000.svg .. _style: https://github.com/psf/black **DF21** is an implementation of `Deep Forest `__ 2021.2.1. It is designed to have the following advantages: From ad030f4bec66ba63b20a002682649ac8fc444aae Mon Sep 17 00:00:00 2001 From: NiMaZi Date: Sat, 6 Feb 2021 05:47:20 +0100 Subject: [PATCH 11/94] [ENH] Support class label encoding in `fit` and `predict` (#18) * Add label encoder The label encoder that convert original labels into integers (0, 1, 2, ...) * check dtype and add comments * Bug fix * bug fix * Disable partial mode Label encoder does not deal with partial mode yet. * Label encoder with scikit-learn * bug fix * Add utility vars in __init__() * Bug fix There is a typo. :) * black formatting * Update cascade.py * modify save and load * fix format * Add testing case for label encoder * fix format * fix format * black formatting * add CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/cascade.py | 153 +++++++++++++++++++++++++------------- tests/test_model_input.py | 32 ++++++++ 3 files changed, 136 insertions(+), 50 deletions(-) create mode 100644 tests/test_model_input.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 225dd7a..3d0b11f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -31,6 +31,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| support class label encoding (`#18 `__) @NiMaZi - |Feature| support sample weight in :meth:`fit` (`#7 `__) @tczhao - |Feature| configurable predictor parameter (`#9 `__) @tczhao - |Enhancement| add base class ``BaseEstimator`` and ``ClassifierMixin`` (`#8 `__) @pjgao diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 2ea19fd..0df2048 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -7,6 +7,8 @@ import numbers import numpy as np from abc import ABCMeta, abstractmethod +from sklearn.preprocessing import LabelEncoder +from sklearn.utils.multiclass import type_of_target from sklearn.base import BaseEstimator, ClassifierMixin from . import _utils @@ -155,7 +157,6 @@ def _build_predictor( partial_mode : :obj:`bool`, default=False Whether to train the deep forest in partial mode. For large datasets, it is recommended to use the partial mode. - - If ``True``, the partial mode is activated and all fitted estimators will be dumped in a local buffer; - If ``False``, all fitted estimators are directly stored in the @@ -172,7 +173,6 @@ def _build_predictor( instance used by :mod:`np.random`. verbose : :obj:`int`, default=1 Controls the verbosity when fitting and predicting. - - If ``<= 0``, silent mode, which means no logging information will be displayed; - If ``1``, logging information on the cascade layer level will be @@ -181,14 +181,51 @@ def _build_predictor( """ -def deepforest_model_doc(header): - """Decorator on obtaining documentation for deep forest models.""" +__fit_doc = """ + .. note:: + Deep forest supports two kinds of modes for training: + - **Full memory mode**, in which the training / testing data and + all fitted estimators are directly stored in the memory. + - **Partial mode**, in which after fitting each estimator using + the training data, it will be dumped in the buffer. During the + evaluating stage, the dumped estimators are reloaded into the + memory sequentially to evaluate the testing data. + By setting the ``partial_mode`` to ``True``, the partial mode is + activated, and a local buffer will be created at the current + directory. The partial mode is able to reduce the running memory + cost when training the deep forest. + Parameters + ---------- + X : :obj:`numpy.ndarray` of shape (n_samples, n_features) + The training data. Internally, it will be converted to + ``np.uint8``. + y : :obj:`numpy.ndarray` of shape (n_samples,) + The class labels of input samples. + sample_weight : :obj:`numpy.ndarray` of shape (n_samples,), default=None + Sample weights. If ``None``, then samples are equally weighted. +""" + + +def deepforest_model_doc(header, item): + """ + Decorator on obtaining documentation for deep forest models. + Parameters + ---------- + header: string + Introduction to the decorated class or method. + item : string + Type of the docstring item. + """ + + def get_doc(item): + """Return the selected item.""" + __doc = {"model": __model_doc, "fit": __fit_doc} + return __doc[item] def adddoc(cls): doc = [header + "\n\n"] - doc.extend([__model_doc]) + doc.extend(get_doc(item)) cls.__doc__ = "".join(doc) - return cls return adddoc @@ -422,7 +459,6 @@ def _repr_performance(self, pivot): def predict(self, X): """ Predict class labels or regression values for X. - For classification, the predicted class for each sample in X is returned. For regression, the predicted value based on X is returned. """ @@ -433,35 +469,7 @@ def n_aug_features_(self): # flake8: noqa: E501 def fit(self, X, y, sample_weight=None): - """ - Build a deep forest using the training data. - - .. note:: - Deep forest supports two kinds of modes for training: - - - **Full memory mode**, in which the training / testing data and - all fitted estimators are directly stored in the memory. - - **Partial mode**, in which after fitting each estimator using - the training data, it will be dumped in the buffer. During the - evaluating stage, the dumped estimators are reloaded into the - memory sequentially to evaluate the testing data. - - By setting the ``partial_mode`` to ``True``, the partial mode is - activated, and a local buffer will be created at the current - directory. The partial mode is able to reduce the running memory - cost when training the deep forest. - - Parameters - ---------- - X : :obj:`numpy.ndarray` of shape (n_samples, n_features) - The training data. Internally, it will be converted to - ``np.uint8``. - y : :obj:`numpy.ndarray` of shape (n_samples,) - The class labels of input samples. - sample_weight : :obj:`numpy.ndarray` of shape (n_samples,), default=None - Sample weights. If ``None``, then samples are equally weighted. - """ self._check_input(X, y) self._validate_params() n_counter = 0 # a counter controlling the early stopping @@ -700,15 +708,11 @@ def fit(self, X, y, sample_weight=None): def save(self, dirname="model"): """ Save the model to the specified directory. - Parameters ---------- dirname : :obj:`str`, default="model" The name of the output directory. - - .. warning:: - Other methods on model serialization such as :mod:`pickle` or :mod:`joblib` are not recommended, especially when ``partial_mode`` is set to ``True``. @@ -726,8 +730,15 @@ def save(self, dirname="model"): d["buffer"] = self.buffer_ d["verbose"] = self.verbose d["use_predictor"] = self.use_predictor + if self.use_predictor: d["predictor_name"] = self.predictor_name + + # Save label encoder if labels are encoded. + if hasattr(self, "labels_are_encoded"): + d["labels_are_encoded"] = self.labels_are_encoded + d["label_encoder"] = self.label_encoder_ + _io.model_saveobj(dirname, "param", d) _io.model_saveobj(dirname, "binner", self.binners_) _io.model_saveobj(dirname, "layer", self.layers_, self.partial_mode) @@ -740,15 +751,11 @@ def save(self, dirname="model"): def load(self, dirname): """ Load the model from the specified directory. - Parameters ---------- dirname : :obj:`str` The name of the input directory. - - .. note:: - The dumped model after calling :meth:`load_model` is not exactly the same as the model before saving, because many objects irrelevant to model inference will not be saved. @@ -764,6 +771,11 @@ def load(self, dirname): self.verbose = d["verbose"] self.use_predictor = d["use_predictor"] + # Load label encoder if labels are encoded. + if "labels_are_encoded" in d: + self.labels_are_encoded = d["labels_are_encoded"] + self.label_encoder_ = d["label_encoder"] + # Load internal containers self.binners_ = _io.model_loadobj(dirname, "binner") self.layers_ = _io.model_loadobj(dirname, "layer", d) @@ -789,7 +801,7 @@ def clean(self): @deepforest_model_doc( - """Implementation of the deep forest for classification.""" + """Implementation of the deep forest for classification.""", "model" ) class CascadeForestClassifier(BaseCascadeForest, ClassifierMixin): def __init__( @@ -832,20 +844,63 @@ def __init__( verbose=verbose, ) + # Used to deal with classification labels + self.labels_are_encoded = False + self.type_of_target_ = None + self.label_encoder_ = None + + def _encode_class_labels(self, y): + """ + Fit the internal label encoder and return encoded labels. + """ + self.type_of_target_ = type_of_target(y) + if self.type_of_target_ in ("binary", "multiclass"): + self.labels_are_encoded = True + self.label_encoder_ = LabelEncoder() + encoded_y = self.label_encoder_.fit_transform(y) + else: + msg = ( + "CascadeForestClassifier is used for binary and multiclass" + " classification, wheras the training labels seem not to" + " be any one of them." + ) + raise ValueError(msg) + + return encoded_y + + def _decode_class_labels(self, y): + """ + Transform the predicted labels back to original encoding. + """ + if self.labels_are_encoded: + decoded_y = self.label_encoder_.inverse_transform(y) + else: + decoded_y = y + + return decoded_y + def _repr_performance(self, pivot): msg = "Val Acc = {:.3f} %" return msg.format(pivot * 100) + @deepforest_model_doc( + """Build a deep forest using the training data.""", "fit" + ) + def fit(self, X, y, sample_weight=None): + + # Check the input for classification + y = self._encode_class_labels(y) + + super().fit(X, y, sample_weight) + def predict_proba(self, X): """ Predict class probabilities for X. - Parameters ---------- X : :obj:`numpy.ndarray` of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``np.uint8``. - Returns ------- proba : :obj:`numpy.ndarray` of shape (n_samples, n_classes) @@ -917,18 +972,16 @@ def predict_proba(self, X): def predict(self, X): """ Predict class for X. - Parameters ---------- X : :obj:`numpy.ndarray` of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``np.uint8``. - Returns ------- y : :obj:`numpy.ndarray` of shape (n_samples,) The predicted classes. """ proba = self.predict_proba(X) - - return np.argmax(proba, axis=1) + y = self._decode_class_labels(np.argmax(proba, axis=1)) + return y diff --git a/tests/test_model_input.py b/tests/test_model_input.py new file mode 100644 index 0000000..0b516a2 --- /dev/null +++ b/tests/test_model_input.py @@ -0,0 +1,32 @@ +import numpy as np +from numpy.testing import assert_array_equal + +from sklearn.datasets import load_digits +from deepforest import CascadeForestClassifier + + +def test_model_input_label_encoder(): + """Test if the model behaves the same with and without label encoding.""" + + # Load data + X, y = load_digits(return_X_y=True) + y_as_str = np.char.add("label_", y.astype(str)) + + # Train model on integer labels. Labels should look like: 1, 2, 3, ... + model = CascadeForestClassifier(random_state=1) + model.fit(X, y) + y_pred_int_labels = model.predict(X) + + # Train model on string labels. Labels should look like: "label_1", "label_2", "label_3", ... + model = CascadeForestClassifier(random_state=1) + model.fit(X, y_as_str) + y_pred_str_labels = model.predict(X) + + # Check if the underlying data are the same + y_pred_int_labels_as_str = np.char.add( + "label_", y_pred_int_labels.astype(str) + ) + assert_array_equal(y_pred_str_labels, y_pred_int_labels_as_str) + + # Clean up buffer + model.clean() From f53abc0eacbb9ac7a6aef4443596b93d7038a884 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Sat, 6 Feb 2021 14:40:03 +0800 Subject: [PATCH 12/94] [ENH] Add the hook on forest estimator (#22) * Create scanner.py * Delete scanner.py * Update cascade.py * black formatting * black formatting * formatting again... * Update _estimator.py * add unit tests * Update CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/cascade.py | 58 +++++++++++++++++++++++++++++++++++++++++++ tests/test_model.py | 18 ++++++++++++++ 3 files changed, 77 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3d0b11f..e0326d4 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -31,6 +31,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| implement the :meth:`get_forest` method for efficient indexing (`#22 `__) @xuyxu - |Feature| support class label encoding (`#18 `__) @NiMaZi - |Feature| support sample weight in :meth:`fit` (`#7 `__) @tczhao - |Feature| configurable predictor parameter (`#9 `__) @tczhao diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 0df2048..a9e356a 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -705,6 +705,64 @@ def fit(self, X, y, sample_weight=None): return self + def get_forest(self, layer_idx, est_idx, forest_type): + """ + Get the `est_idx`-th forest estimator from the `layer_idx`-th + cascade layer in the model. + + Parameters + ---------- + layer_idx : :obj:`int` + The index of the cascade layer, should be in the range + ``[0, self.n_layers_-1]``. + est_idx : :obj:`int` + The index of the forest estimator, should be in the range + ``[0, self.n_estimators]``. + forest_type : :obj:`{"rf", "erf"}` + Specify the forest type. + + - If ``rf``, return the random forest. + - If ``erf``, return the extremely random forest. + + Returns + ------- + estimator : The forest estimator with the given index. + """ + if not self.is_fitted_: + raise AttributeError("Please fit the model first.") + + # Check the given index + if not 0 <= layer_idx < self.n_layers_: + msg = ( + "`layer_idx` should be in the range [0, {}), but got" + " {} instead." + ) + raise ValueError(msg.format(self.n_layers_, layer_idx)) + + if not 0 <= est_idx < self.n_estimators: + msg = ( + "`est_idx` should be in the range [0, {}), but got" + " {} instead." + ) + raise ValueError(msg.format(self.n_estimators, est_idx)) + + if forest_type not in ("rf", "erf"): + msg = ( + "`forest_type` should be one of {{rf, erf}}," + " but got {} instead." + ) + raise ValueError(msg.format(forest_type)) + + layer = self._get_layer(layer_idx) + est_key = "{}-{}-{}".format(layer_idx, est_idx, forest_type) + estimator = layer.estimators_[est_key] + + # Load the model if in partial mode + if self.partial_mode: + estimator = self.buffer_.load_estimator(estimator) + + return estimator.estimator_ + def save(self, dirname="model"): """ Save the model to the specified directory. diff --git a/tests/test_model.py b/tests/test_model.py index d6d8de8..582af0f 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -103,6 +103,24 @@ def test_model_properties_after_fitting(): model._set_binner(0, None) assert "already exists in the internal container" in str(excinfo.value) + # Test the hook on forest estimator + assert ( + model.get_forest(0, 0, "rf") + is model._get_layer(0).estimators_["0-0-rf"].estimator_ + ) + + with pytest.raises(ValueError) as excinfo: + model.get_forest(model.n_layers_, 0, "rf") + assert "`layer_idx` should be in the range" in str(excinfo.value) + + with pytest.raises(ValueError) as excinfo: + model.get_forest(0, model.n_estimators, "rf") + assert "`est_idx` should be in the range" in str(excinfo.value) + + with pytest.raises(ValueError) as excinfo: + model.get_forest(0, 0, "Unknown") + assert "`forest_type` should be one of" in str(excinfo.value) + def test_model_workflow_partial_mode(): """Run the workflow of deep forest with a local buffer.""" From 312700daa9292daa6fad995eebdd112b817e032c Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Sun, 7 Feb 2021 15:38:02 +0800 Subject: [PATCH 13/94] [FIX] Fix accepted data type on binning (#23) * fix bug on data type * Update CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/_binner.py | 4 +++- deepforest/_cutils.pyx | 4 ++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e0326d4..1dc5c83 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -31,6 +31,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Fix| fix accepted data types on the :obj:`binner` (`#23 `__) @xuyxu - |Feature| implement the :meth:`get_forest` method for efficient indexing (`#22 `__) @xuyxu - |Feature| support class label encoding (`#18 `__) @NiMaZi - |Feature| support sample weight in :meth:`fit` (`#7 `__) @tczhao diff --git a/deepforest/_binner.py b/deepforest/_binner.py index 00e59b2..35fd797 100644 --- a/deepforest/_binner.py +++ b/deepforest/_binner.py @@ -9,8 +9,8 @@ __all__ = ["Binner"] import numpy as np -from sklearn.utils import check_random_state from sklearn.base import BaseEstimator, TransformerMixin +from sklearn.utils import check_random_state, check_array from . import _cutils as _LIB @@ -162,7 +162,9 @@ def transform(self, X): msg.format(self.n_bins_non_missing_.shape[0], X.shape[1]) ) + X = check_array(X, dtype=X_DTYPE, force_all_finite=False) X_binned = np.zeros_like(X, dtype=X_BINNED_DTYPE, order="F") + _LIB._map_to_bins( X, self.bin_thresholds_, self.missing_values_bin_idx_, X_binned ) diff --git a/deepforest/_cutils.pyx b/deepforest/_cutils.pyx index 7cc0b12..398e437 100644 --- a/deepforest/_cutils.pyx +++ b/deepforest/_cutils.pyx @@ -74,11 +74,11 @@ cpdef _map_to_bins(object X, """ cdef: const X_DTYPE_C[:, :] X_ndarray = X - SIZE_t n_features = X_ndarray.shape[1] + SIZE_t n_features = X.shape[1] SIZE_t feature_idx for feature_idx in range(n_features): - _map_num_col_to_bins(X[:, feature_idx], + _map_num_col_to_bins(X_ndarray[:, feature_idx], binning_thresholds[feature_idx], missing_values_bin_idx, binned[:, feature_idx]) From 61a4cd8f9d7edd5cb9fd5ca5dbf9b2da460464a0 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Sun, 7 Feb 2021 15:45:51 +0800 Subject: [PATCH 14/94] [MNT] Update version number --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 01b64f2..79f3897 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ MAINTAINER = "Yi-Xuan Xu" MAINTAINER_EMAIL = "xuyx@lamda.nju.edu.cn" URL = "https://github.com/LAMDA-NJU/Deep-Forest" -VERSION = "0.1.0" +VERSION = "0.1.1" def configuration(parent_package="", top_path=None): From 17121e67c939b7ac7e84aa9cb2c5447d99412717 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Tue, 9 Feb 2021 12:13:49 +0800 Subject: [PATCH 15/94] [FIX] Fix corrupted docstrings caused by black formatter (#24) * fix bug on data type * Update CHANGELOG.rst * fix docstrings --- .github/workflows/code-quality.yml | 2 +- deepforest/cascade.py | 25 ++++++++++++++++++++++--- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml index 41e9d47..3676b41 100644 --- a/.github/workflows/code-quality.yml +++ b/.github/workflows/code-quality.yml @@ -27,6 +27,6 @@ jobs: pip install -r build_tools/requirements.txt - name: Check code quality run: | - black --check --config pyproject.toml ./ + black --skip-string-normalization --check --config pyproject.toml ./ chmod +x "${GITHUB_WORKSPACE}/build_tools/linting.sh" ./build_tools/linting.sh diff --git a/deepforest/cascade.py b/deepforest/cascade.py index a9e356a..c7480d2 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -142,8 +142,8 @@ def _build_predictor( predictor_kwargs : :obj:`dict`, default={} The configuration of the predictor concatenated to the deep forest. Specifying this will extend/overwrite the original parameters inherit - from deep forest. - If ``use_predictor`` is False, this parameter will have no effect. + from deep forest. If ``use_predictor`` is False, this parameter will + have no effect. n_tolerant_rounds : :obj:`int`, default=2 Specify when to conduct early stopping. The training process terminates when the validation performance on the training set does @@ -157,6 +157,7 @@ def _build_predictor( partial_mode : :obj:`bool`, default=False Whether to train the deep forest in partial mode. For large datasets, it is recommended to use the partial mode. + - If ``True``, the partial mode is activated and all fitted estimators will be dumped in a local buffer; - If ``False``, all fitted estimators are directly stored in the @@ -167,12 +168,14 @@ def _build_predictor( :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. random_state : :obj:`int` or ``None``, default=None - - If :obj:``int``, ``random_state`` is the seed used by the random + + - If :obj:`int`, ``random_state`` is the seed used by the random number generator; - If ``None``, the random number generator is the RandomState instance used by :mod:`np.random`. verbose : :obj:`int`, default=1 Controls the verbosity when fitting and predicting. + - If ``<= 0``, silent mode, which means no logging information will be displayed; - If ``1``, logging information on the cascade layer level will be @@ -182,18 +185,23 @@ def _build_predictor( __fit_doc = """ + .. note:: + Deep forest supports two kinds of modes for training: + - **Full memory mode**, in which the training / testing data and all fitted estimators are directly stored in the memory. - **Partial mode**, in which after fitting each estimator using the training data, it will be dumped in the buffer. During the evaluating stage, the dumped estimators are reloaded into the memory sequentially to evaluate the testing data. + By setting the ``partial_mode`` to ``True``, the partial mode is activated, and a local buffer will be created at the current directory. The partial mode is able to reduce the running memory cost when training the deep forest. + Parameters ---------- X : :obj:`numpy.ndarray` of shape (n_samples, n_features) @@ -209,6 +217,7 @@ def _build_predictor( def deepforest_model_doc(header, item): """ Decorator on obtaining documentation for deep forest models. + Parameters ---------- header: string @@ -766,10 +775,13 @@ def get_forest(self, layer_idx, est_idx, forest_type): def save(self, dirname="model"): """ Save the model to the specified directory. + Parameters ---------- dirname : :obj:`str`, default="model" The name of the output directory. + + .. warning:: Other methods on model serialization such as :mod:`pickle` or :mod:`joblib` are not recommended, especially when ``partial_mode`` @@ -809,10 +821,13 @@ def save(self, dirname="model"): def load(self, dirname): """ Load the model from the specified directory. + Parameters ---------- dirname : :obj:`str` The name of the input directory. + + .. note:: The dumped model after calling :meth:`load_model` is not exactly the same as the model before saving, because many objects @@ -954,11 +969,13 @@ def fit(self, X, y, sample_weight=None): def predict_proba(self, X): """ Predict class probabilities for X. + Parameters ---------- X : :obj:`numpy.ndarray` of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``np.uint8``. + Returns ------- proba : :obj:`numpy.ndarray` of shape (n_samples, n_classes) @@ -1030,11 +1047,13 @@ def predict_proba(self, X): def predict(self, X): """ Predict class for X. + Parameters ---------- X : :obj:`numpy.ndarray` of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``np.uint8``. + Returns ------- y : :obj:`numpy.ndarray` of shape (n_samples,) From 8075234b9cf8cd20b0ed9826d34f1798a564ce8e Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Tue, 9 Feb 2021 16:35:49 +0800 Subject: [PATCH 16/94] [DOC] Fix typos --- docs/api_reference.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/api_reference.rst b/docs/api_reference.rst index 6c9511d..db221bf 100644 --- a/docs/api_reference.rst +++ b/docs/api_reference.rst @@ -3,6 +3,9 @@ API Reference Below is the class and function reference for :mod:`deepforest`. Notice that the package is under active development, and some features may not be stable yet. +CascadeForestClassifier +----------------------- + .. autoclass:: deepforest.CascadeForestClassifier :members: :inherited-members: From 9e75acaf13a0be0a65374323966128c95845eca5 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Tue, 9 Feb 2021 17:15:35 +0800 Subject: [PATCH 17/94] [MNT] Update Codecov config --- .codecov.yml | 2 +- .github/workflows/build-and-test.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index 6eeb835..55c238a 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -14,4 +14,4 @@ comment: false # enable codecov to report to GitHub status checks github_checks: - annotations: false \ No newline at end of file + annotations: true \ No newline at end of file diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 41b6329..df8e963 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -7,7 +7,7 @@ on: branches: [ master ] jobs: - build: + build: runs-on: ${{ matrix.os }} strategy: matrix: From 28a32d222552f4fd062ed1fca9a035dae9f159da Mon Sep 17 00:00:00 2001 From: tczhao Date: Thu, 11 Feb 2021 14:10:37 +1100 Subject: [PATCH 18/94] [ENH] Add CascadeForestRegressor for regression (#25) * feat(regressor): add regressor interface * fix(doc): cascade, fix spacing and missing wrapper * doc(regressor): add regressor related example,api * fix(regressor): fix layer predict full handling * add experiment on regression * fix(regressor): fix variable name acc->mse in examples * fix(regressor): fix predict docstring * fix(regressor): n_output are not getting used downstream, set to None * fix(regressor): fix variable name acc->mse in examples print * fix typo in docstrings --- CHANGELOG.rst | 1 + README.rst | 17 + deepforest/__init__.py | 15 +- deepforest/_estimator.py | 75 +++- deepforest/_layer.py | 49 +- deepforest/cascade.py | 420 ++++++++++++++++-- deepforest/forest.py | 271 ++++++++++- deepforest/tree/__init__.py | 11 +- deepforest/tree/tree.py | 121 ++++- docs/api_reference.rst | 10 + docs/experiments.rst | 71 ++- docs/index.rst | 17 + tests/test_forest.py | 35 +- tests/test_layer_estimator.py | 32 +- ...test_model.py => test_model_classifier.py} | 14 +- tests/test_model_regressor.py | 231 ++++++++++ tests/test_tree regressor.py | 71 +++ .../{test_tree.py => test_tree_classifier.py} | 0 tests/test_tree_same.py | 60 ++- 19 files changed, 1422 insertions(+), 99 deletions(-) rename tests/{test_model.py => test_model_classifier.py} (95%) create mode 100644 tests/test_model_regressor.py create mode 100644 tests/test_tree regressor.py rename tests/{test_tree.py => test_tree_classifier.py} (100%) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 1dc5c83..9d5e7f4 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -31,6 +31,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| support regression prediction (`#25 `__) @tczhao - |Fix| fix accepted data types on the :obj:`binner` (`#23 `__) @xuyxu - |Feature| implement the :meth:`get_forest` method for efficient indexing (`#22 `__) @xuyxu - |Feature| support class label encoding (`#18 `__) @NiMaZi diff --git a/README.rst b/README.rst index 06e9122..9c065db 100644 --- a/README.rst +++ b/README.rst @@ -61,6 +61,23 @@ Quickstart print("\nTesting Accuracy: {:.3f} %".format(acc)) >>> Testing Accuracy: 98.667 % +.. code-block:: python + + from sklearn.datasets import load_boston + from sklearn.model_selection import train_test_split + from sklearn.metrics import mean_squared_error + + from deepforest import CascadeForestRegressor + + X, y = load_boston(return_X_y=True) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) + model = CascadeForestRegressor(random_state=1) + model.fit(X_train, y_train) + y_pred = model.predict(X_test) + mse = mean_squared_error(y_test, y_pred) + print("\nTesting MSE: {:.3f}".format(mse)) + >>> Testing MSE: 8.068 + Resources --------- diff --git a/deepforest/__init__.py b/deepforest/__init__.py index fd72c17..dcc150c 100644 --- a/deepforest/__init__.py +++ b/deepforest/__init__.py @@ -1,14 +1,19 @@ -from .cascade import CascadeForestClassifier -from .forest import RandomForestClassifier -from .forest import ExtraTreesClassifier -from .tree import DecisionTreeClassifier -from .tree import ExtraTreeClassifier +from .cascade import CascadeForestClassifier, CascadeForestRegressor +from .forest import RandomForestClassifier, RandomForestRegressor +from .forest import ExtraTreesClassifier, ExtraTreesRegressor +from .tree import DecisionTreeClassifier, DecisionTreeRegressor +from .tree import ExtraTreeClassifier, ExtraTreeRegressor __all__ = [ "CascadeForestClassifier", + "CascadeForestRegressor", "RandomForestClassifier", + "RandomForestRegressor", "ExtraTreesClassifier", + "ExtraTreesRegressor", "DecisionTreeClassifier", + "DecisionTreeRegressor", "ExtraTreeClassifier", + "ExtraTreeRegressor", ] diff --git a/deepforest/_estimator.py b/deepforest/_estimator.py index 35917e5..501c5c9 100644 --- a/deepforest/_estimator.py +++ b/deepforest/_estimator.py @@ -3,10 +3,15 @@ __all__ = ["Estimator"] -from .forest import RandomForestClassifier, ExtraTreesClassifier +from .forest import ( + RandomForestClassifier, + ExtraTreesClassifier, + RandomForestRegressor, + ExtraTreesRegressor, +) -def make_estimator( +def make_classifier_estimator( name, n_trees=100, max_depth=None, @@ -39,6 +44,39 @@ def make_estimator( return estimator +def make_regressor_estimator( + name, + n_trees=100, + max_depth=None, + min_samples_leaf=1, + n_jobs=None, + random_state=None, +): + # RandomForestRegressor + if name == "rf": + estimator = RandomForestRegressor( + n_estimators=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + n_jobs=n_jobs, + random_state=random_state, + ) + # ExtraTreesRegressor + elif name == "erf": + estimator = ExtraTreesRegressor( + n_estimators=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + n_jobs=n_jobs, + random_state=random_state, + ) + else: + msg = "Unknown type of estimator, which should be one of {{rf, erf}}." + raise NotImplementedError(msg) + + return estimator + + class Estimator(object): def __init__( self, @@ -48,10 +86,28 @@ def __init__( min_samples_leaf=1, n_jobs=None, random_state=None, + is_classifier=True, ): - self.estimator_ = make_estimator( - name, n_trees, max_depth, min_samples_leaf, n_jobs, random_state - ) + + self.is_classifier = is_classifier + if self.is_classifier: + self.estimator_ = make_classifier_estimator( + name, + n_trees, + max_depth, + min_samples_leaf, + n_jobs, + random_state, + ) + else: + self.estimator_ = make_regressor_estimator( + name, + n_trees, + max_depth, + min_samples_leaf, + n_jobs, + random_state, + ) @property def oob_decision_function_(self): @@ -64,8 +120,11 @@ def fit_transform(self, X, y, sample_weight=None): return X_aug def transform(self, X): - - return self.estimator_.predict_proba(X) + if self.is_classifier: + return self.estimator_.predict_proba(X) + return self.estimator_.predict(X) def predict(self, X): - return self.estimator_.predict_proba(X) + if self.is_classifier: + return self.estimator_.predict_proba(X) + return self.estimator_.predict(X) diff --git a/deepforest/_layer.py b/deepforest/_layer.py index 2c80093..ee76956 100644 --- a/deepforest/_layer.py +++ b/deepforest/_layer.py @@ -4,7 +4,7 @@ __all__ = ["Layer"] import numpy as np -from sklearn.metrics import accuracy_score +from sklearn.metrics import accuracy_score, mean_squared_error from . import _utils from ._estimator import Estimator @@ -56,6 +56,7 @@ def __init__( n_jobs=None, random_state=None, verbose=1, + is_classifier=True, ): self.layer_idx = layer_idx self.n_classes = n_classes @@ -68,7 +69,7 @@ def __init__( self.n_jobs = n_jobs self.random_state = random_state self.verbose = verbose - + self.is_classifier = is_classifier # Internal container self.estimators_ = {} @@ -93,6 +94,7 @@ def _make_estimator(self, estimator_idx, estimator_name): min_samples_leaf=self.min_samples_leaf, n_jobs=self.n_jobs, random_state=random_state, + is_classifier=self.is_classifier, ) return estimator @@ -113,7 +115,10 @@ def fit_transform(self, X, y, sample_weight=None): n_samples, _ = X.shape X_aug = [] - oob_decision_function = np.zeros((n_samples, self.n_classes)) + if self.is_classifier: + oob_decision_function = np.zeros((n_samples, self.n_classes)) + else: + oob_decision_function = np.zeros((n_samples, 1)) # A random forest and an extremely random forest will be fitted for estimator_idx in range(self.n_estimators // 2): @@ -154,18 +159,29 @@ def fit_transform(self, X, y, sample_weight=None): # Set the OOB estimations and validation accuracy self.oob_decision_function_ = oob_decision_function / self.n_estimators - y_pred = np.argmax(oob_decision_function, axis=1) - self.val_acc_ = accuracy_score(y, y_pred, sample_weight=sample_weight) + if self.is_classifier: + y_pred = np.argmax(oob_decision_function, axis=1) + self.val_acc_ = accuracy_score( + y, y_pred, sample_weight=sample_weight + ) + else: + y_pred = self.oob_decision_function_ + self.val_acc_ = mean_squared_error( + y, y_pred, sample_weight=sample_weight + ) X_aug = np.hstack(X_aug) return X_aug - def transform(self, X): + def transform(self, X, is_classifier): """ Return the concatenated transformation results from all base estimators.""" n_samples, _ = X.shape - X_aug = np.zeros((n_samples, self.n_classes * self.n_estimators)) + if is_classifier: + X_aug = np.zeros((n_samples, self.n_classes * self.n_estimators)) + else: + X_aug = np.zeros((n_samples, self.n_estimators)) for idx, (key, estimator) in enumerate(self.estimators_.items()): if self.verbose > 1: msg = "{} - Evaluating estimator = {:<5} in layer = {}" @@ -175,15 +191,21 @@ def transform(self, X): # Load the estimator from the buffer estimator = self.buffer.load_estimator(estimator) - left, right = self.n_classes * idx, self.n_classes * (idx + 1) - X_aug[:, left:right] += estimator.transform(X) + if is_classifier: + left, right = self.n_classes * idx, self.n_classes * (idx + 1) + else: + left, right = idx, (idx + 1) + X_aug[:, left:right] += estimator.predict(X) return X_aug - def predict_full(self, X): + def predict_full(self, X, is_classifier): """Return the concatenated predictions from all base estimators.""" n_samples, _ = X.shape - pred = np.zeros((n_samples, self.n_classes * self.n_estimators)) + if is_classifier: + pred = np.zeros((n_samples, self.n_classes * self.n_estimators)) + else: + pred = np.zeros((n_samples, self.n_estimators)) for idx, (key, estimator) in enumerate(self.estimators_.items()): if self.verbose > 1: msg = "{} - Evaluating estimator = {:<5} in layer = {}" @@ -193,7 +215,10 @@ def predict_full(self, X): # Load the estimator from the buffer estimator = self.buffer.load_estimator(estimator) - left, right = self.n_classes * idx, self.n_classes * (idx + 1) + if is_classifier: + left, right = self.n_classes * idx, self.n_classes * (idx + 1) + else: + left, right = idx, (idx + 1) pred[:, left:right] += estimator.predict(X) return pred diff --git a/deepforest/cascade.py b/deepforest/cascade.py index c7480d2..8d25485 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -1,7 +1,7 @@ """Implementation of Deep Forest.""" -__all__ = ["CascadeForestClassifier"] +__all__ = ["CascadeForestClassifier", "CascadeForestRegressor"] import time import numbers @@ -9,7 +9,8 @@ from abc import ABCMeta, abstractmethod from sklearn.preprocessing import LabelEncoder from sklearn.utils.multiclass import type_of_target -from sklearn.base import BaseEstimator, ClassifierMixin +from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin +from sklearn.base import is_classifier from . import _utils from . import _io @@ -25,7 +26,7 @@ def _get_predictor_kwargs(predictor_kwargs, **kwargs) -> dict: return predictor_kwargs -def _build_predictor( +def _build_classifier_predictor( predictor_name, n_estimators, n_outputs, @@ -107,7 +108,89 @@ def _build_predictor( return predictor -__model_doc = """ +def _build_regressor_predictor( + predictor_name, + n_estimators, + n_outputs, + max_depth=None, + min_samples_leaf=1, + n_jobs=None, + random_state=None, + predictor_kwargs={}, +): + """Build the predictor concatenated to the deep forest.""" + predictor_name = predictor_name.lower() + + # Random Forest + if predictor_name == "forest": + from .forest import RandomForestRegressor + + predictor = RandomForestRegressor( + **_get_predictor_kwargs( + predictor_kwargs, + n_estimators=n_estimators, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + n_jobs=n_jobs, + random_state=random_state, + ) + ) + # XGBoost + elif predictor_name == "xgboost": + try: + xgb = __import__("xgboost.sklearn") + except ModuleNotFoundError: + msg = ( + "Cannot load the module XGBoost when building the predictor." + " Please make sure that XGBoost is installed." + ) + raise ModuleNotFoundError(msg) + + # The argument `tree_method` is always set as `hist` for XGBoost, + # because the exact mode of XGBoost is too slow. + objective = "reg:squarederror" + predictor = xgb.sklearn.XGBRegressor( + **_get_predictor_kwargs( + predictor_kwargs, + objective=objective, + n_estimators=n_estimators, + tree_method="hist", + n_jobs=n_jobs, + random_state=random_state, + ) + ) + # LightGBM + elif predictor_name == "lightgbm": + try: + lgb = __import__("lightgbm.sklearn") + except ModuleNotFoundError: + msg = ( + "Cannot load the module LightGBM when building the predictor." + " Please make sure that LightGBM is installed." + ) + raise ModuleNotFoundError(msg) + + objective = "regression" + predictor = lgb.LGBMRegressor( + **_get_predictor_kwargs( + predictor_kwargs, + objective=objective, + n_estimators=n_estimators, + n_jobs=n_jobs, + random_state=random_state, + ) + ) + else: + msg = ( + "The name of the predictor should be one of {{forest, xgboost," + " lightgbm}}, but got {} instead." + ) + raise NotImplementedError(msg.format(predictor_name)) + + return predictor + + +__classifier_model_doc = """ Parameters ---------- n_bins : :obj:`int`, default=255 @@ -184,7 +267,7 @@ def _build_predictor( """ -__fit_doc = """ +__classifier_fit_doc = """ .. note:: @@ -213,6 +296,111 @@ def _build_predictor( Sample weights. If ``None``, then samples are equally weighted. """ +__regressor_model_doc = """ + Parameters + ---------- + n_bins : :obj:`int`, default=255 + The number of bins used for non-missing values. In addition to the + ``n_bins`` bins, one more bin is reserved for missing values. Its + value must be no smaller than 2 and no greater than 255. + bin_subsample : :obj:`int`, default=2e5 + The number of samples used to construct feature discrete bins. If + the size of training set is smaller than ``bin_subsample``, then all + training samples will be used. + max_layers : :obj:`int`, default=20 + The maximum number of cascade layers in the deep forest. Notice that + the actual number of layers can be smaller than ``max_layers`` because + of the internal early stopping stage. + n_estimators : :obj:`int`, default=2 + The number of estimator in each cascade layer. It will be multiplied + by 2 internally because each estimator contains a + :class:`RandomForestRegressor` and a :class:`ExtraTreesRegressor`, + respectively. + n_trees : :obj:`int`, default=100 + The number of trees in each estimator. + max_depth : :obj:`int`, default=None + The maximum depth of each tree. ``None`` indicates no constraint. + min_samples_leaf : :obj:`int`, default=1 + The minimum number of samples required to be at a leaf node. + use_predictor : :obj:`bool`, default=False + Whether to build the predictor concatenated to the deep forest. Using + the predictor may improve the performance of deep forest. + predictor : :obj:`{"forest", "xgboost", "lightgbm"}`, default="forest" + The type of the predictor concatenated to the deep forest. If + ``use_predictor`` is False, this parameter will have no effect. + predictor_kwargs : :obj:`dict`, default={} + The configuration of the predictor concatenated to the deep forest. + Specifying this will extend/overwrite the original parameters inherit + from deep forest. + If ``use_predictor`` is False, this parameter will have no effect. + n_tolerant_rounds : :obj:`int`, default=2 + Specify when to conduct early stopping. The training process + terminates when the validation performance on the training set does + not improve compared against the best validation performance achieved + so far for ``n_tolerant_rounds`` rounds. + delta : :obj:`float`, default=1e-5 + Specify the threshold on early stopping. The counting on + ``n_tolerant_rounds`` is triggered if the performance of a fitted + cascade layer does not improve by ``delta`` compared against the best + validation performance achieved so far. + partial_mode : :obj:`bool`, default=False + Whether to train the deep forest in partial mode. For large + datasets, it is recommended to use the partial mode. + + - If ``True``, the partial mode is activated and all fitted + estimators will be dumped in a local buffer; + - If ``False``, all fitted estimators are directly stored in the + memory. + n_jobs : :obj:`int` or ``None``, default=None + The number of jobs to run in parallel for both :meth:`fit` and + :meth:`predict`. None means 1 unless in a + :obj:`joblib.parallel_backend` context. ``-1`` means using all + processors. + random_state : :obj:`int` or ``None``, default=None + + - If :obj:`int`, ``random_state`` is the seed used by the random + number generator; + - If ``None``, the random number generator is the RandomState + instance used by :mod:`np.random`. + verbose : :obj:`int`, default=1 + Controls the verbosity when fitting and predicting. + + - If ``<= 0``, silent mode, which means no logging information will + be displayed; + - If ``1``, logging information on the cascade layer level will be + displayed; + - If ``> 1``, full logging information will be displayed. +""" + +__regressor_fit_doc = """ + + .. note:: + + Deep forest supports two kinds of modes for training: + + - **Full memory mode**, in which the training / testing data and + all fitted estimators are directly stored in the memory. + - **Partial mode**, in which after fitting each estimator using + the training data, it will be dumped in the buffer. During the + evaluating stage, the dumped estimators are reloaded into the + memory sequentially to evaluate the testing data. + + By setting the ``partial_mode`` to ``True``, the partial mode is + activated, and a local buffer will be created at the current + directory. The partial mode is able to reduce the running memory + cost when training the deep forest. + + Parameters + ---------- + X : :obj:`numpy.ndarray` of shape (n_samples, n_features) + The training data. Internally, it will be converted to + ``np.uint8``. + y : :obj:`numpy.ndarray` of shape (n_samples,) + The target of input samples. + sample_weight : :obj:`numpy.ndarray` of shape (n_samples,), default=None + Sample weights. If ``None``, then samples are equally weighted. +""" + def deepforest_model_doc(header, item): """ @@ -228,7 +416,13 @@ def deepforest_model_doc(header, item): def get_doc(item): """Return the selected item.""" - __doc = {"model": __model_doc, "fit": __fit_doc} + __doc = { + "regressor_model": __regressor_model_doc, + "regressor_fit": __regressor_fit_doc, + "classifier_model": __classifier_model_doc, + "classifier_fit": __classifier_fit_doc, + } + return __doc[item] def adddoc(cls): @@ -298,8 +492,10 @@ def __getitem__(self, index): def _get_n_output(self, y): """Return the number of output inferred from the training labels.""" - n_output = np.unique(y).shape[0] # classification - return n_output + if is_classifier(self): + n_output = np.unique(y).shape[0] # classification + return n_output + return 1 # this parameter are not used in regression def _get_layer(self, layer_idx): """Get the layer from the internal container according to the index.""" @@ -460,6 +656,13 @@ def _handle_early_stopping(self): msg = "{} The optimal number of layers: {}" print(msg.format(_utils.ctime(), self.n_layers_)) + def _if_improved(self, new_pivot, pivot, delta, is_classifier): + """ + Return true if new vlidation result is better than previous""" + if is_classifier: + return new_pivot >= pivot + delta + return new_pivot <= pivot - delta + @abstractmethod def _repr_performance(self, pivot): """Format the printting information on training performance.""" @@ -474,7 +677,9 @@ def predict(self, X): @property def n_aug_features_(self): - return 2 * self.n_estimators * self.n_outputs_ + if is_classifier(self): + return 2 * self.n_estimators * self.n_outputs_ + return 2 * self.n_estimators # flake8: noqa: E501 def fit(self, X, y, sample_weight=None): @@ -514,13 +719,16 @@ def fit(self, X, y, sample_weight=None): self.n_jobs, self.random_state, self.verbose, + is_classifier(self), ) if self.verbose > 0: print("{} Fitting cascade layer = {:<2}".format(_utils.ctime(), 0)) tic = time.time() - X_aug_train_ = layer_.fit_transform(X_train_, y, sample_weight) + X_aug_train_ = layer_.fit_transform( + X_train_, y, sample_weight=sample_weight + ) toc = time.time() training_time = toc - tic @@ -586,6 +794,7 @@ def fit(self, X, y, sample_weight=None): self.n_jobs, self.random_state, self.verbose, + is_classifier(self), ) X_middle_train_ = self.buffer_.cache_data( @@ -598,7 +807,7 @@ def fit(self, X, y, sample_weight=None): tic = time.time() X_aug_train_ = layer_.fit_transform( - X_middle_train_, y, sample_weight + X_middle_train_, y, sample_weight=sample_weight ) toc = time.time() training_time = toc - tic @@ -622,7 +831,9 @@ def fit(self, X, y, sample_weight=None): # training stage will terminate before reaching the maximum number # of layers. - if new_pivot >= pivot + self.delta: + if self._if_improved( + new_pivot, pivot, self.delta, is_classifier(self) + ): # Update the cascade layer self._set_layer(layer_idx, layer_) @@ -667,16 +878,28 @@ def fit(self, X, y, sample_weight=None): # Build the predictor if `self.use_predictor` is True if self.use_predictor: - self.predictor_ = _build_predictor( - self.predictor_name, - self.n_trees, - self.n_outputs_, - self.max_depth, - self.min_samples_leaf, - self.n_jobs, - self.random_state, - self.predictor_kwargs, - ) + if is_classifier(self): + self.predictor_ = _build_classifier_predictor( + self.predictor_name, + self.n_trees, + self.n_outputs_, + self.max_depth, + self.min_samples_leaf, + self.n_jobs, + self.random_state, + self.predictor_kwargs, + ) + else: + self.predictor_ = _build_regressor_predictor( + self.predictor_name, + self.n_trees, + self.n_outputs_, + self.max_depth, + self.min_samples_leaf, + self.n_jobs, + self.random_state, + self.predictor_kwargs, + ) binner_ = Binner( n_bins=self.n_bins, @@ -874,7 +1097,8 @@ def clean(self): @deepforest_model_doc( - """Implementation of the deep forest for classification.""", "model" + """Implementation of the deep forest for classification.""", + "classifier_model", ) class CascadeForestClassifier(BaseCascadeForest, ClassifierMixin): def __init__( @@ -957,7 +1181,7 @@ def _repr_performance(self, pivot): return msg.format(pivot * 100) @deepforest_model_doc( - """Build a deep forest using the training data.""", "fit" + """Build a deep forest using the training data.""", "classifier_fit" ) def fit(self, X, y, sample_weight=None): @@ -1000,7 +1224,7 @@ def predict_proba(self, X): print(msg.format(_utils.ctime(), layer_idx)) if layer_idx == 0: - X_aug_test_ = layer.transform(X_test) + X_aug_test_ = layer.transform(X_test, is_classifier(self)) elif layer_idx < self.n_layers_ - 1: binner_ = self._get_binner(layer_idx) X_aug_test_ = self._bin_data( @@ -1009,7 +1233,9 @@ def predict_proba(self, X): X_middle_test_ = _utils.merge_array( X_middle_test_, X_aug_test_, self.n_features_ ) - X_aug_test_ = layer.transform(X_middle_test_) + X_aug_test_ = layer.transform( + X_middle_test_, is_classifier(self) + ) else: binner_ = self._get_binner(layer_idx) X_aug_test_ = self._bin_data( @@ -1021,7 +1247,9 @@ def predict_proba(self, X): # Skip calling the `transform` if not using the predictor if self.use_predictor: - X_aug_test_ = layer.transform(X_middle_test_) + X_aug_test_ = layer.transform( + X_middle_test_, is_classifier(self) + ) if self.use_predictor: @@ -1039,7 +1267,7 @@ def predict_proba(self, X): predictor = self.buffer_.load_predictor(self.predictor_) proba = predictor.predict_proba(X_middle_test_) else: - proba = layer.predict_full(X_middle_test_) + proba = layer.predict_full(X_middle_test_, is_classifier(self)) proba = _utils.merge_proba(proba, self.n_outputs_) return proba @@ -1062,3 +1290,139 @@ def predict(self, X): proba = self.predict_proba(X) y = self._decode_class_labels(np.argmax(proba, axis=1)) return y + + +@deepforest_model_doc( + """Implementation of the deep forest for regression.""", "regressor_model" +) +class CascadeForestRegressor(BaseCascadeForest, RegressorMixin): + def __init__( + self, + n_bins=255, + bin_subsample=2e5, + bin_type="percentile", + max_layers=20, + n_estimators=2, + n_trees=100, + max_depth=None, + min_samples_leaf=1, + use_predictor=False, + predictor="forest", + predictor_kwargs={}, + n_tolerant_rounds=2, + delta=1e-5, + partial_mode=False, + n_jobs=None, + random_state=None, + verbose=1, + ): + super().__init__( + n_bins=n_bins, + bin_subsample=bin_subsample, + bin_type=bin_type, + max_layers=max_layers, + n_estimators=n_estimators, + n_trees=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + use_predictor=use_predictor, + predictor=predictor, + predictor_kwargs=predictor_kwargs, + n_tolerant_rounds=n_tolerant_rounds, + delta=delta, + partial_mode=partial_mode, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + ) + + def _repr_performance(self, pivot): + msg = "Val Acc = {:.3f}" + return msg.format(pivot) + + @deepforest_model_doc( + """Build a deep forest using the training data.""", "regressor_fit" + ) + def fit(self, X, y, sample_weight=None): + super().fit(X, y, sample_weight) + + def predict(self, X): + """ + Predict regression target for X. + + Parameters + ---------- + X : :obj:`numpy.ndarray` of shape (n_samples, n_features) + The input samples. Internally, its dtype will be converted to + ``np.uint8``. + + Returns + ------- + y : :obj:`numpy.ndarray` of shape (n_samples,) + The predicted values. + """ + if not self.is_fitted_: + raise AttributeError("Please fit the model first.") + self._check_input(X) + + if self.verbose > 0: + print("{} Start to evalute the model:".format(_utils.ctime())) + + binner_ = self._get_binner(0) + X_test = self._bin_data(binner_, X, is_training_data=False) + X_middle_test_ = _utils.init_array(X_test, self.n_aug_features_) + + for layer_idx in range(self.n_layers_): + layer = self._get_layer(layer_idx) + + if self.verbose > 0: + msg = "{} Evaluating cascade layer = {:<2}" + print(msg.format(_utils.ctime(), layer_idx)) + + if layer_idx == 0: + X_aug_test_ = layer.transform(X_test, is_classifier(self)) + elif layer_idx < self.n_layers_ - 1: + binner_ = self._get_binner(layer_idx) + X_aug_test_ = self._bin_data( + binner_, X_aug_test_, is_training_data=False + ) + X_middle_test_ = _utils.merge_array( + X_middle_test_, X_aug_test_, self.n_features_ + ) + X_aug_test_ = layer.transform( + X_middle_test_, is_classifier(self) + ) + else: + binner_ = self._get_binner(layer_idx) + X_aug_test_ = self._bin_data( + binner_, X_aug_test_, is_training_data=False + ) + X_middle_test_ = _utils.merge_array( + X_middle_test_, X_aug_test_, self.n_features_ + ) + + # Skip calling the `transform` if not using the predictor + if self.use_predictor: + X_aug_test_ = layer.transform( + X_middle_test_, is_classifier(self) + ) + + if self.use_predictor: + + if self.verbose > 0: + print("{} Evaluating the predictor".format(_utils.ctime())) + + binner_ = self._get_binner(self.n_layers_) + X_aug_test_ = self._bin_data( + binner_, X_aug_test_, is_training_data=False + ) + X_middle_test_ = _utils.merge_array( + X_middle_test_, X_aug_test_, self.n_features_ + ) + + predictor = self.buffer_.load_predictor(self.predictor_) + _y = predictor.predict(X_middle_test_) + else: + _y = layer.predict_full(X_middle_test_, is_classifier(self)) + _y = _y.sum(axis=1) / _y.shape[1] + return _y diff --git a/deepforest/forest.py b/deepforest/forest.py index 030c5d7..203bdb8 100644 --- a/deepforest/forest.py +++ b/deepforest/forest.py @@ -6,7 +6,12 @@ """ -__all__ = ["RandomForestClassifier", "ExtraTreesClassifier"] +__all__ = [ + "RandomForestClassifier", + "RandomForestRegressor", + "ExtraTreesClassifier", + "ExtraTreesRegressor", +] import numbers from warnings import warn @@ -22,7 +27,8 @@ from sklearn.base import clone from sklearn.base import BaseEstimator from sklearn.base import MetaEstimatorMixin -from sklearn.base import ClassifierMixin, MultiOutputMixin +from sklearn.base import is_classifier +from sklearn.base import ClassifierMixin, RegressorMixin, MultiOutputMixin from sklearn.utils import check_random_state, compute_sample_weight from sklearn.exceptions import DataConversionWarning from sklearn.utils.fixes import _joblib_parallel_args @@ -32,7 +38,12 @@ from . import _cutils as _LIB from . import _forest as _C_FOREST -from .tree import DecisionTreeClassifier, ExtraTreeClassifier +from .tree import ( + DecisionTreeClassifier, + DecisionTreeRegressor, + ExtraTreeClassifier, + ExtraTreeRegressor, +) from .tree._tree import DOUBLE @@ -96,6 +107,8 @@ def _parallel_build_trees( n_samples_bootstrap, sample_weight, out, + mask, + is_classifier, lock, ): """ @@ -123,14 +136,17 @@ def _parallel_build_trees( value = np.ascontiguousarray(value) value = np.squeeze(value, axis=1) - value /= value.sum(axis=1)[:, np.newaxis] + + if is_classifier: + value /= value.sum(axis=1)[:, np.newaxis] # Set the OOB predictions oob_prediction = _C_FOREST.predict( X[~sample_mask, :], feature, threshold, children, value ) - with lock: + + mask += ~sample_mask out[~sample_mask, :] += oob_prediction return feature, threshold, children, value @@ -194,7 +210,6 @@ def _partition_estimators(n_estimators, n_jobs): def _accumulate_prediction(feature, threshold, children, value, X, out, lock): """This is a utility function for joblib's Parallel.""" prediction = _C_FOREST.predict(X, feature, threshold, children, value) - with lock: if len(out) == 1: out[0] += prediction @@ -284,8 +299,9 @@ def _make_estimator(self, append=True, random_state=None): ) # Pass the inferred class information to avoid redudant finding. - estimator.classes_ = self.classes_ - estimator.n_classes_ = np.array(self.n_classes_, dtype=np.int32) + if is_classifier(estimator): + estimator.classes_ = self.classes_ + estimator.n_classes_ = np.array(self.n_classes_, dtype=np.int32) if random_state is not None: _set_random_states(estimator, random_state) @@ -411,6 +427,12 @@ def fit(self, X, y, sample_weight=None): if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous: y = np.ascontiguousarray(y, dtype=DOUBLE) + if expanded_class_weight is not None: + if sample_weight is not None: + sample_weight = sample_weight * expanded_class_weight + else: + sample_weight = expanded_class_weight + # Get bootstrap sample size n_samples_bootstrap = _get_n_samples_bootstrap( n_samples=X.shape[0], max_samples=self.max_samples @@ -427,9 +449,13 @@ def fit(self, X, y, sample_weight=None): ] # Pre-allocate OOB estimations - oob_decision_function = np.zeros( - (n_samples, self.classes_[0].shape[0]) - ) + if is_classifier(self): + oob_decision_function = np.zeros( + (n_samples, self.classes_[0].shape[0]) + ) + else: + oob_decision_function = np.zeros((n_samples, 1)) + mask = np.zeros(n_samples) lock = threading.Lock() rets = Parallel( @@ -444,11 +470,12 @@ def fit(self, X, y, sample_weight=None): n_samples_bootstrap, sample_weight, oob_decision_function, + mask, + is_classifier(self), lock, ) for i, t in enumerate(trees) ) - # Collect newly grown trees for feature, threshold, children, value in rets: @@ -460,17 +487,22 @@ def fit(self, X, y, sample_weight=None): self.values.append(value) # Check the OOB predictions - if (oob_decision_function.sum(axis=1) == 0).any(): + if ( + is_classifier(self) + and (oob_decision_function.sum(axis=1) == 0).any() + ): warn( "Some inputs do not have OOB predictions. " "This probably means too few trees were used " "to compute any reliable oob predictions." ) - - prediction = ( - oob_decision_function - / oob_decision_function.sum(axis=1)[:, np.newaxis] - ) + if is_classifier(self): + prediction = ( + oob_decision_function + / oob_decision_function.sum(axis=1)[:, np.newaxis] + ) + else: + prediction = oob_decision_function / mask.reshape(-1, 1) self.oob_decision_function_ = prediction @@ -701,3 +733,206 @@ def __init__( self.max_features = max_features self.min_impurity_decrease = min_impurity_decrease self.min_impurity_split = min_impurity_split + + +class ForestRegressor(RegressorMixin, BaseForest, metaclass=ABCMeta): + """ + Base class for forest of trees-based regressors. + + Warning: This class should not be used directly. Use derived classes + instead. + """ + + @abstractmethod + def __init__( + self, + base_estimator, + n_estimators=100, + *, + estimator_params=tuple(), + n_jobs=None, + random_state=None, + verbose=0, + max_samples=None + ): + super().__init__( + base_estimator, + n_estimators=n_estimators, + estimator_params=estimator_params, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + max_samples=max_samples, + ) + + def predict(self, X): + """ + Predict regression target for X. + + The predicted regression target of an input sample is computed as the + mean predicted regression targets of the trees in the forest. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The input samples. Internally, its dtype will be converted to + ``dtype=np.float32``. If a sparse matrix is provided, it will be + converted into a sparse ``csr_matrix``. + + Returns + ------- + y : ndarray of shape (n_samples,) or (n_samples, n_outputs) + The predicted values. + """ + check_is_fitted(self) + + # Assign chunk of trees to jobs + n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs) + + # avoid storing the output of every estimator by summing them here + y_hat = np.zeros((X.shape[0], 1), dtype=np.float64) + + # Parallel loop + lock = threading.Lock() + Parallel( + n_jobs=n_jobs, + verbose=self.verbose, + **_joblib_parallel_args(require="sharedmem") + )( + delayed(_accumulate_prediction)( + self.features[i], + self.thresholds[i], + self.childrens[i], + self.values[i], + X, + [y_hat], + lock, + ) + for i in range(self.n_estimators) + ) + + y_hat /= self.n_estimators + return y_hat + + @staticmethod + def _get_oob_predictions(tree, X): + """Compute the OOB predictions for an individual tree. + + Parameters + ---------- + tree : DecisionTreeRegressor object + A single decision tree regressor. + X : ndarray of shape (n_samples, n_features) + The OOB samples. + + Returns + ------- + y_pred : ndarray of shape (n_samples, 1, n_outputs) + The OOB associated predictions. + """ + y_pred = tree.predict(X, check_input=False) + if y_pred.ndim == 1: + # single output regression + y_pred = y_pred[:, np.newaxis, np.newaxis] + else: + # multioutput regression + y_pred = y_pred[:, np.newaxis, :] + return y_pred + + +class RandomForestRegressor(ForestRegressor): + @_deprecate_positional_args + def __init__( + self, + n_estimators=100, + *, + criterion="mse", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features="auto", + min_impurity_decrease=0.0, + min_impurity_split=None, + n_jobs=None, + random_state=None, + verbose=0, + max_samples=None + ): + super().__init__( + base_estimator=DecisionTreeRegressor(), + n_estimators=n_estimators, + estimator_params=( + "criterion", + "max_depth", + "min_samples_split", + "min_samples_leaf", + "min_weight_fraction_leaf", + "max_features", + "min_impurity_decrease", + "min_impurity_split", + "random_state", + ), + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + max_samples=max_samples, + ) + + self.criterion = criterion + self.max_depth = max_depth + self.min_samples_split = min_samples_split + self.min_samples_leaf = min_samples_leaf + self.min_weight_fraction_leaf = min_weight_fraction_leaf + self.max_features = max_features + self.min_impurity_decrease = min_impurity_decrease + self.min_impurity_split = min_impurity_split + + +class ExtraTreesRegressor(ForestRegressor): + @_deprecate_positional_args + def __init__( + self, + n_estimators=100, + *, + criterion="mse", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features="auto", + min_impurity_decrease=0.0, + min_impurity_split=None, + n_jobs=None, + random_state=None, + verbose=0, + max_samples=None + ): + super().__init__( + base_estimator=ExtraTreeRegressor(), + n_estimators=n_estimators, + estimator_params=( + "criterion", + "max_depth", + "min_samples_split", + "min_samples_leaf", + "min_weight_fraction_leaf", + "max_features", + "min_impurity_decrease", + "min_impurity_split", + "random_state", + ), + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + max_samples=max_samples, + ) + + self.criterion = criterion + self.max_depth = max_depth + self.min_samples_split = min_samples_split + self.min_samples_leaf = min_samples_leaf + self.min_weight_fraction_leaf = min_weight_fraction_leaf + self.max_features = max_features + self.min_impurity_decrease = min_impurity_decrease + self.min_impurity_split = min_impurity_split diff --git a/deepforest/tree/__init__.py b/deepforest/tree/__init__.py index 3fdfb3c..e1b0767 100644 --- a/deepforest/tree/__init__.py +++ b/deepforest/tree/__init__.py @@ -1,6 +1,13 @@ from .tree import BaseDecisionTree from .tree import DecisionTreeClassifier +from .tree import DecisionTreeRegressor from .tree import ExtraTreeClassifier +from .tree import ExtraTreeRegressor - -__all__ = ["BaseDecisionTree", "DecisionTreeClassifier", "ExtraTreeClassifier"] +__all__ = [ + "BaseDecisionTree", + "DecisionTreeClassifier", + "DecisionTreeRegressor", + "ExtraTreeClassifier", + "ExtraTreeRegressor", +] diff --git a/deepforest/tree/tree.py b/deepforest/tree/tree.py index c73792b..ead6dc2 100644 --- a/deepforest/tree/tree.py +++ b/deepforest/tree/tree.py @@ -6,7 +6,12 @@ """ -__all__ = ["DecisionTreeClassifier", "ExtraTreeClassifier"] +__all__ = [ + "DecisionTreeClassifier", + "DecisionTreeRegressor", + "ExtraTreeClassifier", + "ExtraTreeRegressor", +] import numbers import warnings @@ -18,6 +23,7 @@ from sklearn.base import BaseEstimator from sklearn.base import ClassifierMixin +from sklearn.base import RegressorMixin from sklearn.base import is_classifier from sklearn.base import MultiOutputMixin from sklearn.utils import check_array @@ -43,6 +49,7 @@ DOUBLE = _tree.DOUBLE CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy} +CRITERIA_REG = {"mse": _criterion.MSE} DENSE_SPLITTERS = { "best": _splitter.BestSplitter, @@ -77,7 +84,7 @@ def __init__( min_impurity_decrease, min_impurity_split, class_weight=None, - presort="deprecated" + presort="deprecated", ): self.criterion = criterion self.splitter = splitter @@ -164,7 +171,7 @@ def fit( self.n_outputs_ = y.shape[1] # `classes_` and `n_classes_` were set by the forest. - if not hasattr(self, "classes_"): + if not hasattr(self, "classes_") and is_classifier(self): check_classification_targets(y) y = np.copy(y) @@ -323,9 +330,14 @@ def fit( # Build tree criterion = self.criterion if not isinstance(criterion, Criterion): - criterion = CRITERIA_CLF[self.criterion]( - self.n_outputs_, self.n_classes_ - ) + if is_classifier(self): + criterion = CRITERIA_CLF[self.criterion]( + self.n_outputs_, self.n_classes_ + ) + else: + criterion = CRITERIA_REG[self.criterion]( + self.n_outputs_, n_samples + ) SPLITTERS = DENSE_SPLITTERS @@ -339,7 +351,17 @@ def fit( random_state, ) - self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_) + if is_classifier(self): + self.tree_ = Tree( + self.n_features_, self.n_classes_, self.n_outputs_ + ) + else: + self.tree_ = Tree( + self.n_features_, + # TODO: tree should't need this in this case + np.array([1] * self.n_outputs_, dtype=np.int32), + self.n_outputs_, + ) builder = DepthFirstTreeBuilder( splitter, @@ -409,7 +431,12 @@ def predict(self, X, check_input=True): X = self._validate_X_predict(X, check_input) proba = self.tree_.predict(X) - return self.classes_.take(np.argmax(proba, axis=1), axis=0) + # Classification + if is_classifier(self): + return self.classes_.take(np.argmax(proba, axis=1), axis=0) + # Regression + else: + return proba[:, 0] class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree): @@ -428,7 +455,7 @@ def __init__( min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, - presort="deprecated" + presort="deprecated", ): super().__init__( @@ -472,6 +499,49 @@ def predict_proba(self, X, check_input=True): return proba +class DecisionTreeRegressor(RegressorMixin, BaseDecisionTree): + @_deprecate_positional_args + def __init__( + self, + *, + criterion="mse", + splitter="best", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=None, + random_state=None, + min_impurity_decrease=0.0, + min_impurity_split=None, + presort="deprecated", + ): + super().__init__( + criterion=criterion, + splitter=splitter, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_features=max_features, + min_impurity_decrease=min_impurity_decrease, + min_impurity_split=min_impurity_split, + random_state=random_state, + ) + + def fit( + self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None + ): + + return super().fit( + X, + y, + sample_weight=sample_weight, + check_input=check_input, + X_idx_sorted=X_idx_sorted, + ) + + class ExtraTreeClassifier(DecisionTreeClassifier): @_deprecate_positional_args def __init__( @@ -487,7 +557,7 @@ def __init__( random_state=None, min_impurity_decrease=0.0, min_impurity_split=None, - class_weight=None + class_weight=None, ): super().__init__( @@ -503,3 +573,34 @@ def __init__( min_impurity_split=min_impurity_split, random_state=random_state, ) + + +class ExtraTreeRegressor(DecisionTreeRegressor): + @_deprecate_positional_args + def __init__( + self, + *, + criterion="mse", + splitter="random", + max_depth=None, + min_samples_split=2, + min_samples_leaf=1, + min_weight_fraction_leaf=0.0, + max_features=None, + random_state=None, + min_impurity_decrease=0.0, + min_impurity_split=None, + ): + + super().__init__( + criterion=criterion, + splitter=splitter, + max_depth=max_depth, + min_samples_split=min_samples_split, + min_samples_leaf=min_samples_leaf, + min_weight_fraction_leaf=min_weight_fraction_leaf, + max_features=max_features, + min_impurity_decrease=min_impurity_decrease, + min_impurity_split=min_impurity_split, + random_state=random_state, + ) diff --git a/docs/api_reference.rst b/docs/api_reference.rst index db221bf..be7476c 100644 --- a/docs/api_reference.rst +++ b/docs/api_reference.rst @@ -12,3 +12,13 @@ CascadeForestClassifier :show-inheritance: :no-undoc-members: :member-order: bysource + +CascadeForestRegressor +----------------------- + +.. autoclass:: deepforest.CascadeForestRegressor + :members: + :inherited-members: + :show-inheritance: + :no-undoc-members: + :member-order: bysource diff --git a/docs/experiments.rst b/docs/experiments.rst index 79ba98a..803c2be 100644 --- a/docs/experiments.rst +++ b/docs/experiments.rst @@ -2,7 +2,7 @@ Experiments =========== Baseline -******** +-------- For all experiments, we used 5 popular tree-based ensemble methods as baselines. Details on the baselines are listed in the following table: +------------------+---------------------------------------------------------------+ @@ -20,7 +20,7 @@ For all experiments, we used 5 popular tree-based ensemble methods as baselines. +------------------+---------------------------------------------------------------+ Environment -*********** +----------- For all experiments, we used a single linux server. Details on the specifications are listed in the table below. All processors were used for training and evaluating. +------------------+-----------------+--------+ @@ -30,9 +30,12 @@ For all experiments, we used a single linux server. Details on the specification +------------------+-----------------+--------+ Setting -******* +------- We kept the number of decision trees the same across all baselines, while remaining hyper-parameters were set to their default values. Running scripts on reproducing all experiment results are available, please refer to this `Repo`_. +Classification +-------------- + Dataset ******* @@ -137,6 +140,60 @@ Some observations are listed as follow: * Histogram-based GBDT (e.g., :class:`HGBDT`, :class:`XGB HIST`, :class:`LightGBM`) are typically faster mainly because decision tree in GBDT tends to have a much smaller tree depth; * With the number of input dimensions increasing (e.g., on mnist and fashion-mnist), random forest and deep forest can be faster. +Regression +---------- + +Dataset +******* + +We have also collected four datasets on univariate regression for a comparison on the regression problem. + ++------------------+------------+-----------+------------+ +| Name | # Training | # Testing | # Features | ++==================+============+===========+============+ +| `abalone`_ | 2,799 | 1,378 | 8 | ++------------------+------------+-----------+------------+ +| `cpusmall`_ | 5,489 | 2,703 | 12 | ++------------------+------------+-----------+------------+ +| `boston`_ | 379 | 127 | 13 | ++------------------+------------+-----------+------------+ +| `diabetes`_ | 303 | 139 | 10 | ++------------------+------------+-----------+------------+ + +Testing Mean Squared Error +************************** + +The table below shows the testing mean squared error of each method, with the best result on each dataset **bolded**. Each experiment was conducted over 5 independently trials, and the average result was reported. + ++----------+-----------+---------+-----------+----------+----------+-------------+ +| Name | RF | HGBDT | XGB EXACT | XGB HIST | LightGBM | Deep Forest | ++==========+===========+=========+===========+==========+==========+=============+ +| abalone | 4.79 | 5.40 | 5.73 | 5.75 | 5.60 | **4.66** | ++----------+-----------+---------+-----------+----------+----------+-------------+ +| cpusmall | 8.31 | 9.01 | 9.86 | 11.82 | 8.99 | **7.15** | ++----------+-----------+---------+-----------+----------+----------+-------------+ +| boston | **16.61** | 20.68 | 20.61 | 19.65 | 20.27 | 19.87 | ++----------+-----------+---------+-----------+----------+----------+-------------+ +| diabetes | 3796.62 | 4333.66 | 4337.15 | 4303.96 | 4435.95 | **3431.01** | ++----------+-----------+---------+-----------+----------+----------+-------------+ + +Runtime +******* + +Runtime in seconds reported in the table below covers both the training stage and evaluating stage. + ++----------+------+-------+-----------+----------+----------+-------------+ +| Name | RF | HGBDT | XGB EXACT | XGB HIST | LightGBM | Deep Forest | ++==========+======+=======+===========+==========+==========+=============+ +| abalone | 0.53 | 1.57 | 0.47 | 0.50 | 0.17 | 1.29 | ++----------+------+-------+-----------+----------+----------+-------------+ +| cpusmall | 1.87 | 3.59 | 1.71 | 1.25 | 0.36 | 2.06 | ++----------+------+-------+-----------+----------+----------+-------------+ +| boston | 0.70 | 1.75 | 0.19 | 0.22 | 0.20 | 1.45 | ++----------+------+-------+-----------+----------+----------+-------------+ +| diabetes | 0.37 | 0.66 | 0.14 | 0.18 | 0.06 | 1.09 | ++----------+------+-------+-----------+----------+----------+-------------+ + .. _`Random Forest`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html .. _`HGBDT`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingClassifier.html @@ -170,3 +227,11 @@ Some observations are listed as follow: .. _`mnist`: https://keras.io/api/datasets/mnist/ .. _`fashion mnist`: https://keras.io/api/datasets/fashion_mnist/ + +.. _`abalone`: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html#abalone + +.. _`cpusmall`: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html#cpusmall + +.. _`boston`: https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html + +.. _`diabetes`: https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_diabetes.html diff --git a/docs/index.rst b/docs/index.rst index 7c6fe9a..eceacaf 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -53,6 +53,23 @@ Quickstart print("Testing Accuracy: {:.3f} %".format(acc)) >>> Testing Accuracy: 98.667 % +.. code-block:: python + + from sklearn.datasets import load_boston + from sklearn.model_selection import train_test_split + from sklearn.metrics import mean_squared_error + + from deepforest import CascadeForestRegressor + + X, y = load_boston(return_X_y=True) + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) + model = CascadeForestRegressor(random_state=1) + model.fit(X_train, y_train) + y_pred = model.predict(X_test) + mse = mean_squared_error(y_test, y_pred) + print("\nTesting MSE: {:.3f}".format(mse)) + >>> Testing MSE: 8.068 + Resources --------- diff --git a/tests/test_forest.py b/tests/test_forest.py index ab09d71..0274980 100644 --- a/tests/test_forest.py +++ b/tests/test_forest.py @@ -2,11 +2,13 @@ from deepforest import RandomForestClassifier from deepforest import ExtraTreesClassifier +from deepforest import RandomForestRegressor +from deepforest import ExtraTreesRegressor from deepforest.forest import _get_n_samples_bootstrap # Load utils from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper -from sklearn.datasets import load_iris, load_wine +from sklearn.datasets import load_iris, load_wine, load_boston from sklearn.ensemble._forest import ( _get_n_samples_bootstrap as sklearn_get_n_samples_bootstrap, ) @@ -51,7 +53,7 @@ def test_n_samples_bootstrap_invalid_type(): @pytest.mark.parametrize("load_func", [load_iris, load_wine]) -def test_forest_workflow(load_func): +def test_forest_classifier_workflow(load_func): n_estimators = 100 # to avoid oob warning random_state = 42 @@ -77,3 +79,32 @@ def test_forest_workflow(load_func): model.fit(X_binned, y) model.predict(X_binned) + + +@pytest.mark.parametrize("load_func", [load_boston]) +def test_forest_regressor_workflow(load_func): + + n_estimators = 100 # to avoid oob warning + random_state = 42 + + X, y = load_func(return_X_y=True) + + # Data binning + binner = _BinMapper(random_state=random_state) + X_binned = binner.fit_transform(X) + + # Random Forest + model = RandomForestRegressor( + n_estimators=n_estimators, random_state=random_state + ) + + model.fit(X_binned, y) + model.predict(X_binned) + + # Extremely Random Forest + model = ExtraTreesRegressor( + n_estimators=n_estimators, random_state=random_state + ) + + model.fit(X_binned, y) + model.predict(X_binned) diff --git a/tests/test_layer_estimator.py b/tests/test_layer_estimator.py index 9673955..425f93c 100644 --- a/tests/test_layer_estimator.py +++ b/tests/test_layer_estimator.py @@ -5,7 +5,7 @@ # Load utils from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper -from sklearn.datasets import load_digits +from sklearn.datasets import load_digits, load_boston from sklearn.model_selection import train_test_split @@ -43,11 +43,11 @@ } -def test_layer_properties_after_fitting(): +def test_classifier_layer_properties_after_fitting(): layer = Layer(**layer_kwargs) X_aug = layer.fit_transform(X_train, y_train) - y_pred_full = layer.predict_full(X_test) + y_pred_full = layer.predict_full(X_test, is_classifier=True) # n_trees assert ( @@ -61,6 +61,32 @@ def test_layer_properties_after_fitting(): assert y_pred_full.shape[1] == expect_dim +def test_regressor_layer_properties_after_fitting(): + # Load data and binning + X, y = load_boston(return_X_y=True) + binner = _BinMapper(random_state=142) + X_binned = binner.fit_transform(X) + + X_train, X_test, y_train, y_test = train_test_split( + X_binned, y, test_size=0.42, random_state=42 + ) + layer = Layer(**layer_kwargs) + layer.is_classifier = False + X_aug = layer.fit_transform(X_train, y_train) + y_pred_full = layer.predict_full(X_test, is_classifier=False) + + # n_trees + assert ( + layer.n_trees_ + == 2 * layer_kwargs["n_estimators"] * layer_kwargs["n_trees"] + ) + + # Output dim + expect_dim = 2 * layer_kwargs["n_estimators"] + assert X_aug.shape[1] == expect_dim + assert y_pred_full.shape[1] == expect_dim + + @pytest.mark.parametrize( "param", [(0, {"n_estimators": 0}), (1, {"n_trees": 0})] ) diff --git a/tests/test_model.py b/tests/test_model_classifier.py similarity index 95% rename from tests/test_model.py rename to tests/test_model_classifier.py index 582af0f..b304de4 100644 --- a/tests/test_model.py +++ b/tests/test_model_classifier.py @@ -170,8 +170,8 @@ def test_model_sample_weight(): assert_array_equal(y_pred_no_sample_weight, y_pred_equal_sample_weight) model = CascadeForestClassifier(**case_kwargs) - sample_weight = np.where(y_train == 0, 0.1, y_train) - model.fit(X_train, y_train, sample_weight=y_train) + sample_weight = np.where(y_train == 0, 1, 10) + model.fit(X_train, y_train, sample_weight=sample_weight) y_pred_skewed_sample_weight = model.predict(X_test) # Make sure the different predictions with None and equal sample_weight @@ -237,13 +237,15 @@ def test_model_invalid_training_params(param): @pytest.mark.parametrize("predictor", ["forest", "xgboost", "lightgbm"]) -def test_predictor_normal(predictor): - deepforest.cascade._build_predictor(predictor, n_estimators=1, n_outputs=2) +def test_classifier_predictor_normal(predictor): + deepforest.cascade._build_classifier_predictor( + predictor, n_estimators=1, n_outputs=2 + ) -def test_predictor_unknown(): +def test_classifier_predictor_unknown(): with pytest.raises(NotImplementedError) as excinfo: - deepforest.cascade._build_predictor( + deepforest.cascade._build_classifier_predictor( "unknown", n_estimators=1, n_outputs=2 ) assert "name of the predictor should be one of" in str(excinfo.value) diff --git a/tests/test_model_regressor.py b/tests/test_model_regressor.py new file mode 100644 index 0000000..1243e09 --- /dev/null +++ b/tests/test_model_regressor.py @@ -0,0 +1,231 @@ +import copy +import pytest +import shutil +import numpy as np +from numpy.testing import assert_array_equal +from sklearn.datasets import load_boston +from sklearn.model_selection import train_test_split + +import deepforest +from deepforest import CascadeForestRegressor +from deepforest.cascade import _get_predictor_kwargs + + +save_dir = "./tmp" + +# Load data +X, y = load_boston(return_X_y=True) +X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.42, random_state=42 +) + +# Parameters +toy_kwargs = { + "n_bins": 10, + "bin_subsample": 2e5, + "max_layers": 10, + "n_estimators": 1, + "n_trees": 100, + "max_depth": 3, + "min_samples_leaf": 1, + "use_predictor": True, + "predictor": "forest", + "predictor_kwargs": {}, + "n_tolerant_rounds": 2, + "delta": 1e-5, + "n_jobs": -1, + "random_state": 0, + "verbose": 2, +} + +kwargs = { + "n_bins": 255, + "bin_subsample": 2e5, + "max_layers": 10, + "n_estimators": 2, + "n_trees": 100, + "max_depth": None, + "min_samples_leaf": 1, + "use_predictor": True, + "predictor": "forest", + "predictor_kwargs": {}, + "n_tolerant_rounds": 2, + "delta": 1e-5, + "n_jobs": -1, + "random_state": 0, + "verbose": 2, +} + + +@pytest.mark.parametrize( + "test_input,expected", + [ + ( + {"predictor_kwargs": {}, "n_job": 2}, + {"n_job": 2}, + ), + ( + {"predictor_kwargs": {"n_job": 3}, "n_job": 2}, + {"n_job": 3}, + ), + ( + {"predictor_kwargs": {"iter": 4}, "n_job": 2}, + {"iter": 4, "n_job": 2}, + ), + ], +) +def test_predictor_kwargs_overwrite(test_input, expected): + assert _get_predictor_kwargs(**test_input) == expected + + +def test_model_properties_after_fitting(): + """Check the model properties after fitting a deep forest model.""" + model = CascadeForestRegressor(**toy_kwargs) + model.fit(X_train, y_train) + + assert len(model) == model.n_layers_ + + assert model[0] is model._get_layer(0) + + with pytest.raises(ValueError) as excinfo: + model._get_layer(model.n_layers_) + assert "The layer index should be in the range" in str(excinfo.value) + + with pytest.raises(RuntimeError) as excinfo: + model._set_layer(0, None) + assert "already exists in the internal container" in str(excinfo.value) + + with pytest.raises(ValueError) as excinfo: + model._get_binner(model.n_layers_ + 1) + assert "The binner index should be in the range" in str(excinfo.value) + + with pytest.raises(RuntimeError) as excinfo: + model._set_binner(0, None) + assert "already exists in the internal container" in str(excinfo.value) + + +def test_model_workflow_partial_mode(): + """Run the workflow of deep forest with a local buffer.""" + + case_kwargs = copy.deepcopy(kwargs) + case_kwargs.update({"partial_mode": True}) + + model = CascadeForestRegressor(**case_kwargs) + model.fit(X_train, y_train) + + # Predictions before saving + y_pred_before = model.predict(X_test).astype(np.float32) + + # Save and Reload + model.save(save_dir) + + model = CascadeForestRegressor(**case_kwargs) + model.load(save_dir) + + # Predictions after loading + y_pred_after = model.predict(X_test).astype(np.float32) + + # Make sure the same predictions before and after model serialization + assert_array_equal(y_pred_before, y_pred_after) + + model.clean() # clear the buffer + shutil.rmtree(save_dir) + + +def test_model_workflow_in_memory(): + """Run the workflow of deep forest with in-memory mode.""" + + case_kwargs = copy.deepcopy(kwargs) + case_kwargs.update({"partial_mode": False}) + + model = CascadeForestRegressor(**case_kwargs) + model.fit(X_train, y_train) + + # Predictions before saving + y_pred_before = model.predict(X_test).astype(np.float32) + + # Save and Reload + model.save(save_dir) + + model = CascadeForestRegressor(**case_kwargs) + model.load(save_dir) + + # Make sure the same predictions before and after model serialization + y_pred_after = model.predict(X_test).astype(np.float32) + + assert_array_equal(y_pred_before, y_pred_after) + + shutil.rmtree(save_dir) + + +@pytest.mark.parametrize( + "param", + [ + (0, {"max_layers": 0}), + (1, {"n_tolerant_rounds": 0}), + (2, {"delta": -1}), + ], +) +def test_model_invalid_training_params(param): + case_kwargs = copy.deepcopy(toy_kwargs) + case_kwargs.update(param[1]) + + model = CascadeForestRegressor(**case_kwargs) + + with pytest.raises(ValueError) as excinfo: + model.fit(X_train, y_train) + + if param[0] == 0: + assert "max_layers" in str(excinfo.value) + elif param[0] == 1: + assert "n_tolerant_rounds" in str(excinfo.value) + elif param[0] == 2: + assert "delta " in str(excinfo.value) + + +@pytest.mark.parametrize("predictor", ["forest", "xgboost", "lightgbm"]) +def test_regressor_predictor_normal(predictor): + deepforest.cascade._build_regressor_predictor( + predictor, n_estimators=1, n_outputs=2 + ) + + +def test_regressor_predictor_unknown(): + with pytest.raises(NotImplementedError) as excinfo: + deepforest.cascade._build_regressor_predictor( + "unknown", n_estimators=1, n_outputs=2 + ) + assert "name of the predictor should be one of" in str(excinfo.value) + + +def test_model_n_trees_non_positive(): + case_kwargs = copy.deepcopy(toy_kwargs) + case_kwargs.update({"n_trees": 0}) + model = CascadeForestRegressor(**case_kwargs) + with pytest.raises(ValueError) as excinfo: + model._set_n_trees(0) + assert "should be strictly positive." in str(excinfo.value) + + +def test_model_n_trees_auto(): + case_kwargs = copy.deepcopy(toy_kwargs) + case_kwargs.update({"n_trees": "auto"}) + model = CascadeForestRegressor(**case_kwargs) + + n_trees = model._set_n_trees(0) + assert n_trees == 100 + + n_trees = model._set_n_trees(2) + assert n_trees == 300 + + n_trees = model._set_n_trees(10) + assert n_trees == 500 + + +def test_model_n_trees_invalid(): + case_kwargs = copy.deepcopy(toy_kwargs) + case_kwargs.update({"n_trees": [42]}) + model = CascadeForestRegressor(**case_kwargs) + with pytest.raises(ValueError) as excinfo: + model._set_n_trees(0) + assert "Invalid value for n_trees." in str(excinfo.value) diff --git a/tests/test_tree regressor.py b/tests/test_tree regressor.py new file mode 100644 index 0000000..6baeb33 --- /dev/null +++ b/tests/test_tree regressor.py @@ -0,0 +1,71 @@ +import pytest +from sklearn.datasets import load_boston +from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper + +from deepforest import DecisionTreeRegressor + + +X, y = load_boston(return_X_y=True) + +# Data binning +binner = _BinMapper(random_state=42) +X_binned = binner.fit_transform(X) + + +def test_tree_properties_after_fitting(): + tree = DecisionTreeRegressor() + tree.fit(X_binned, y) + + assert tree.get_depth() == tree.tree_.max_depth + assert tree.n_leaves == tree.tree_.n_leaves + assert tree.n_internals == tree.tree_.n_internals + + +def test_tree_fit_invalid_dtype(): + tree = DecisionTreeRegressor() + + with pytest.raises(RuntimeError) as execinfo: + tree.fit(X, y) + assert "The dtype of `X` should be `np.uint8`" in str(execinfo.value) + + +def test_tree_fit_invalid_training_params(): + tree = DecisionTreeRegressor(min_samples_leaf=0) + with pytest.raises(ValueError) as execinfo: + tree.fit(X_binned, y) + assert "min_samples_leaf must be at least 1" in str(execinfo.value) + + tree = DecisionTreeRegressor(min_samples_leaf=0.6) + with pytest.raises(ValueError) as execinfo: + tree.fit(X_binned, y) + assert "or in (0, 0.5]" in str(execinfo.value) + + tree = DecisionTreeRegressor(min_samples_split=1) + with pytest.raises(ValueError) as execinfo: + tree.fit(X_binned, y) + assert "min_samples_split must be an integer" in str(execinfo.value) + + tree = DecisionTreeRegressor(max_features="unknown") + with pytest.raises(ValueError) as execinfo: + tree.fit(X_binned, y) + assert "Invalid value for max_features." in str(execinfo.value) + + tree = DecisionTreeRegressor() + with pytest.raises(ValueError) as execinfo: + tree.fit(X_binned, y[:1]) + assert "Number of labels=" in str(execinfo.value) + + tree = DecisionTreeRegressor(min_weight_fraction_leaf=0.6) + with pytest.raises(ValueError) as execinfo: + tree.fit(X_binned, y) + assert "min_weight_fraction_leaf must in [0, 0.5]" in str(execinfo.value) + + tree = DecisionTreeRegressor(max_depth=0) + with pytest.raises(ValueError) as execinfo: + tree.fit(X_binned, y) + assert "max_depth must be greater than zero." in str(execinfo.value) + + +if __name__ == "__main__": + + test_tree_properties_after_fitting() diff --git a/tests/test_tree.py b/tests/test_tree_classifier.py similarity index 100% rename from tests/test_tree.py rename to tests/test_tree_classifier.py diff --git a/tests/test_tree_same.py b/tests/test_tree_same.py index d380b05..a7909bf 100644 --- a/tests/test_tree_same.py +++ b/tests/test_tree_same.py @@ -9,18 +9,23 @@ from sklearn.tree import ( DecisionTreeClassifier as sklearn_DecisionTreeClassifier, ) +from sklearn.tree import ( + DecisionTreeRegressor as sklearn_DecisionTreeRegressor, +) from sklearn.tree import ExtraTreeClassifier as sklearn_ExtraTreeClassifier +from sklearn.tree import ExtraTreeRegressor as sklearn_ExtraTreeRegressor # Load utils from sklearn.model_selection import train_test_split from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper # Toy classification datasets -from sklearn.datasets import load_iris, load_wine +from sklearn.datasets import load_iris, load_wine, load_boston from deepforest import DecisionTreeClassifier from deepforest import ExtraTreeClassifier - +from deepforest import DecisionTreeRegressor +from deepforest import ExtraTreeRegressor test_size = 0.42 random_state = 42 @@ -81,3 +86,54 @@ def test_extra_tree_classifier_proba(load_func): assert_array_equal(actual_pred, expected_pred) assert_array_equal(actual_proba, expected_proba) + + +@pytest.mark.parametrize("load_func", [load_boston]) +def test_tree_regressor_pred(load_func): + + X, y = load_func(return_X_y=True) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=test_size, random_state=random_state + ) + + # Data binning + binner = _BinMapper(random_state=random_state) + X_train_binned = binner.fit_transform(X_train) + X_test_binned = binner.transform(X_test) + + # Ours + model = DecisionTreeRegressor(random_state=random_state) + model.fit(X_train_binned, y_train) + actual_pred = model.predict(X_test_binned) + + # Sklearn + model = sklearn_DecisionTreeRegressor(random_state=random_state) + model.fit(X_train_binned, y_train) + expected_pred = model.predict(X_test_binned) + + assert_array_equal(actual_pred, expected_pred) + + +@pytest.mark.parametrize("load_func", [load_boston]) +def test_extra_tree_regressor_pred(load_func): + X, y = load_func(return_X_y=True) + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=test_size, random_state=random_state + ) + + # Data binning + binner = _BinMapper(random_state=random_state) + X_train_binned = binner.fit_transform(X_train) + X_test_binned = binner.transform(X_test) + + # Ours + model = ExtraTreeRegressor(random_state=random_state) + model.fit(X_train_binned, y_train) + actual_pred = model.predict(X_test_binned) + + # Sklearn + model = sklearn_ExtraTreeRegressor(random_state=random_state) + model.fit(X_train_binned, y_train) + expected_pred = model.predict(X_test_binned) + + assert_array_equal(actual_pred, expected_pred) From d20a2fa6a25cd7cbfdf36c3110f92b4d458404cc Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Thu, 11 Feb 2021 11:18:49 +0800 Subject: [PATCH 19/94] [MNT] Update version number --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 79f3897..5198c33 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ MAINTAINER = "Yi-Xuan Xu" MAINTAINER_EMAIL = "xuyx@lamda.nju.edu.cn" URL = "https://github.com/LAMDA-NJU/Deep-Forest" -VERSION = "0.1.1" +VERSION = "0.1.2" def configuration(parent_package="", top_path=None): From 784e4b4b2280f23d535c990a504173b01d69b748 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Thu, 11 Feb 2021 18:37:11 +0800 Subject: [PATCH 20/94] [DOC] Update README.rst --- README.rst | 6 ++++++ docs/index.rst | 20 +++++++++----------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/README.rst b/README.rst index 9c065db..c689d3f 100644 --- a/README.rst +++ b/README.rst @@ -44,6 +44,9 @@ The package is available via PyPI using: Quickstart ---------- +Classification +************** + .. code-block:: python from sklearn.datasets import load_digits @@ -61,6 +64,9 @@ Quickstart print("\nTesting Accuracy: {:.3f} %".format(acc)) >>> Testing Accuracy: 98.667 % +Regression +********** + .. code-block:: python from sklearn.datasets import load_boston diff --git a/docs/index.rst b/docs/index.rst index eceacaf..ab4cb86 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -28,31 +28,29 @@ The package is available via `PyPI `__ us Quickstart ---------- -.. code-block:: python +Classification +************** - from deepforest import CascadeForestClassifier +.. code-block:: python - # Load utils from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score - # Load data + from deepforest import CascadeForestClassifier + X, y = load_digits(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) - model = CascadeForestClassifier(random_state=1) - - # Train model.fit(X_train, y_train) - - # Evaluate y_pred = model.predict(X_test) acc = accuracy_score(y_test, y_pred) * 100 - - print("Testing Accuracy: {:.3f} %".format(acc)) + print("\nTesting Accuracy: {:.3f} %".format(acc)) >>> Testing Accuracy: 98.667 % +Regression +********** + .. code-block:: python from sklearn.datasets import load_boston From 8f5124ed95cf22c2225feccc86c6650c667337da Mon Sep 17 00:00:00 2001 From: Dwaipayan Munshi <53687927+dwaipayan05@users.noreply.github.com> Date: Fri, 12 Feb 2021 13:26:07 +0530 Subject: [PATCH 21/94] [DOC] Minor Changes to README.rst (#31) * Minor Changes to README.rest - Changed the Introduction Line a bit made it more comprehensible - And a few more changes . . . Will add more changes * Update index.rst * Update README.rst --- README.rst | 4 ++-- docs/index.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index c689d3f..c354b39 100644 --- a/README.rst +++ b/README.rst @@ -28,14 +28,14 @@ Deep Forest (DF) 21 - **Efficient**: Fast training speed and high efficiency. - **Scalable**: Capable of handling large-scale data. -Whenever one used tree-based machine learning approaches such as Random Forest or GBDT, DF21 may offer a new powerful option. +DF21 offers an effective & powerful option to the tree-based machine learning algorithms such as Random Forest or GBDT. For a quick start, please refer to `How to Get Started `__. For a detailed guidance on parameter tunning, please refer to `Parameters Tunning `__. Installation ------------ -The package is available via PyPI using: +DF21 can be installed using pip via `PyPI `__ which is the package installer for Python. You can use pip to install packages from the Python Package Index and other indexes. Refer `this `__ for the documentation of pip. Use this command to download DF21 : .. code-block:: bash diff --git a/docs/index.rst b/docs/index.rst index ab4cb86..e30e6d8 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,7 +8,7 @@ DF21 Documentation - **Efficient**: Fast training speed and high efficiency. - **Scalable**: Capable of handling large-scale data. -Whenever one used tree-based machine learning approaches such as Random Forest or GBDT, DF21 may offer a new powerful option. This package is actively being developed, and any help would be welcomed. Please check the homepage on `Gitee `__ or `Github `__ for details. +DF21 offers an effective & powerful option to the tree-based machine learning algorithms such as Random Forest or GBDT. This package is actively being developed, and any help would be welcomed. Please check the homepage on `Gitee `__ or `Github `__ for details. Guidepost --------- @@ -19,7 +19,7 @@ Guidepost Installation ------------ -The package is available via `PyPI `__ using: +DF21 can be installed using pip via `PyPI `__ which is the package installer for Python. You can use pip to install packages from the Python Package Index and other indexes. Refer `this `__ for the documentation of pip. Use this command to download DF21 : .. code-block:: bash From 11d4d2aec65a72b8de6d2b12b9a43a3cf806775d Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Fri, 12 Feb 2021 16:05:54 +0800 Subject: [PATCH 22/94] [MNT] Update .gitignore --- .all-contributorsrc | 65 +++++++++++++++++++++++++++++++++++++++++++++ .gitignore | 2 ++ CONTRIBUTORS.md | 24 +++++++++++++++++ 3 files changed, 91 insertions(+) create mode 100644 .all-contributorsrc create mode 100644 CONTRIBUTORS.md diff --git a/.all-contributorsrc b/.all-contributorsrc new file mode 100644 index 0000000..98f53fa --- /dev/null +++ b/.all-contributorsrc @@ -0,0 +1,65 @@ +{ + "projectName": "Deep-Forest", + "projectOwner": "LAMDA-NJU", + "repoType": "github", + "repoHost": "https://github.com", + "files": [ + "CONTRIBUTORS.md" + ], + "imageSize": 100, + "commit": false, + "commitConvention": "none", + "contributors": [ + { + "login": "xuyxu", + "name": "Yi-Xuan Xu", + "avatar_url": "https://avatars.githubusercontent.com/u/22359569?v=4", + "profile": "https://github.com/xuyxu", + "contributions": [ + "code", + "doc", + "test" + ] + }, + { + "login": "tczhao", + "name": "tczhao", + "avatar_url": "https://avatars.githubusercontent.com/u/20961507?v=4", + "profile": "https://www.linkedin.com/in/tczhao/", + "contributions": [ + "code", + "doc", + "test" + ] + }, + { + "login": "NiMaZi", + "name": "NiMaZi", + "avatar_url": "https://avatars.githubusercontent.com/u/19431549?v=4", + "profile": "https://github.com/NiMaZi", + "contributions": [ + "code", + "test" + ] + }, + { + "login": "pjgao", + "name": "Joey Gao", + "avatar_url": "https://avatars.githubusercontent.com/u/22350313?v=4", + "profile": "https://github.com/pjgao", + "contributions": [ + "code" + ] + }, + { + "login": "dwaipayan05", + "name": "Dwaipayan Munshi", + "avatar_url": "https://avatars.githubusercontent.com/u/53687927?v=4", + "profile": "https://github.com/dwaipayan05", + "contributions": [ + "doc" + ] + } + ], + "contributorsPerLine": 7 +} diff --git a/.gitignore b/.gitignore index 2f5060e..bbe467a 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ .coverage .DS_Store +package-lock.json .idea/ .vscode/ .pytest_cache/ @@ -22,3 +23,4 @@ dist/ deep_forest.egg-info/ cache/ tmp/ +node_modules/ diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md new file mode 100644 index 0000000..ee1599a --- /dev/null +++ b/CONTRIBUTORS.md @@ -0,0 +1,24 @@ + +## Contributors ✨ + +Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): + + + + + + + + + + + + +

Yi-Xuan Xu

💻 📖 ⚠️

tczhao

💻 📖 ⚠️

NiMaZi

💻 ⚠️

Joey Gao

💻

Dwaipayan Munshi

📖
+ + + + + + +This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! \ No newline at end of file From 0724efab80c9d878fa9f230567551f302b1333e8 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Fri, 12 Feb 2021 16:06:06 +0800 Subject: [PATCH 23/94] Revert "[MNT] Update .gitignore" This reverts commit 11d4d2aec65a72b8de6d2b12b9a43a3cf806775d. --- .all-contributorsrc | 65 --------------------------------------------- .gitignore | 2 -- CONTRIBUTORS.md | 24 ----------------- 3 files changed, 91 deletions(-) delete mode 100644 .all-contributorsrc delete mode 100644 CONTRIBUTORS.md diff --git a/.all-contributorsrc b/.all-contributorsrc deleted file mode 100644 index 98f53fa..0000000 --- a/.all-contributorsrc +++ /dev/null @@ -1,65 +0,0 @@ -{ - "projectName": "Deep-Forest", - "projectOwner": "LAMDA-NJU", - "repoType": "github", - "repoHost": "https://github.com", - "files": [ - "CONTRIBUTORS.md" - ], - "imageSize": 100, - "commit": false, - "commitConvention": "none", - "contributors": [ - { - "login": "xuyxu", - "name": "Yi-Xuan Xu", - "avatar_url": "https://avatars.githubusercontent.com/u/22359569?v=4", - "profile": "https://github.com/xuyxu", - "contributions": [ - "code", - "doc", - "test" - ] - }, - { - "login": "tczhao", - "name": "tczhao", - "avatar_url": "https://avatars.githubusercontent.com/u/20961507?v=4", - "profile": "https://www.linkedin.com/in/tczhao/", - "contributions": [ - "code", - "doc", - "test" - ] - }, - { - "login": "NiMaZi", - "name": "NiMaZi", - "avatar_url": "https://avatars.githubusercontent.com/u/19431549?v=4", - "profile": "https://github.com/NiMaZi", - "contributions": [ - "code", - "test" - ] - }, - { - "login": "pjgao", - "name": "Joey Gao", - "avatar_url": "https://avatars.githubusercontent.com/u/22350313?v=4", - "profile": "https://github.com/pjgao", - "contributions": [ - "code" - ] - }, - { - "login": "dwaipayan05", - "name": "Dwaipayan Munshi", - "avatar_url": "https://avatars.githubusercontent.com/u/53687927?v=4", - "profile": "https://github.com/dwaipayan05", - "contributions": [ - "doc" - ] - } - ], - "contributorsPerLine": 7 -} diff --git a/.gitignore b/.gitignore index bbe467a..2f5060e 100644 --- a/.gitignore +++ b/.gitignore @@ -13,7 +13,6 @@ .coverage .DS_Store -package-lock.json .idea/ .vscode/ .pytest_cache/ @@ -23,4 +22,3 @@ dist/ deep_forest.egg-info/ cache/ tmp/ -node_modules/ diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md deleted file mode 100644 index ee1599a..0000000 --- a/CONTRIBUTORS.md +++ /dev/null @@ -1,24 +0,0 @@ - -## Contributors ✨ - -Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): - - - - - - - - - - - - -

Yi-Xuan Xu

💻 📖 ⚠️

tczhao

💻 📖 ⚠️

NiMaZi

💻 ⚠️

Joey Gao

💻

Dwaipayan Munshi

📖
- - - - - - -This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! \ No newline at end of file From fa53e5a9106af707ebbb54c6293876449f2f64de Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Fri, 12 Feb 2021 16:08:27 +0800 Subject: [PATCH 24/94] [MNT] Update .gitignore --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 2f5060e..e41905c 100644 --- a/.gitignore +++ b/.gitignore @@ -11,8 +11,9 @@ *.html *.xml .coverage - .DS_Store +package-lock.json +node_modules/ .idea/ .vscode/ .pytest_cache/ From 6f8ceb51ce3a5fffa61c9403f9be8011e50b38d3 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Fri, 12 Feb 2021 16:08:44 +0800 Subject: [PATCH 25/94] [DOC] Add all contributors --- .all-contributorsrc | 65 +++++++++++++++++++++++++++++++++++++++++++++ CONTRIBUTORS.md | 24 +++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 .all-contributorsrc create mode 100644 CONTRIBUTORS.md diff --git a/.all-contributorsrc b/.all-contributorsrc new file mode 100644 index 0000000..98f53fa --- /dev/null +++ b/.all-contributorsrc @@ -0,0 +1,65 @@ +{ + "projectName": "Deep-Forest", + "projectOwner": "LAMDA-NJU", + "repoType": "github", + "repoHost": "https://github.com", + "files": [ + "CONTRIBUTORS.md" + ], + "imageSize": 100, + "commit": false, + "commitConvention": "none", + "contributors": [ + { + "login": "xuyxu", + "name": "Yi-Xuan Xu", + "avatar_url": "https://avatars.githubusercontent.com/u/22359569?v=4", + "profile": "https://github.com/xuyxu", + "contributions": [ + "code", + "doc", + "test" + ] + }, + { + "login": "tczhao", + "name": "tczhao", + "avatar_url": "https://avatars.githubusercontent.com/u/20961507?v=4", + "profile": "https://www.linkedin.com/in/tczhao/", + "contributions": [ + "code", + "doc", + "test" + ] + }, + { + "login": "NiMaZi", + "name": "NiMaZi", + "avatar_url": "https://avatars.githubusercontent.com/u/19431549?v=4", + "profile": "https://github.com/NiMaZi", + "contributions": [ + "code", + "test" + ] + }, + { + "login": "pjgao", + "name": "Joey Gao", + "avatar_url": "https://avatars.githubusercontent.com/u/22350313?v=4", + "profile": "https://github.com/pjgao", + "contributions": [ + "code" + ] + }, + { + "login": "dwaipayan05", + "name": "Dwaipayan Munshi", + "avatar_url": "https://avatars.githubusercontent.com/u/53687927?v=4", + "profile": "https://github.com/dwaipayan05", + "contributions": [ + "doc" + ] + } + ], + "contributorsPerLine": 7 +} diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md new file mode 100644 index 0000000..ee1599a --- /dev/null +++ b/CONTRIBUTORS.md @@ -0,0 +1,24 @@ + +## Contributors ✨ + +Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)): + + + + + + + + + + + + +

Yi-Xuan Xu

💻 📖 ⚠️

tczhao

💻 📖 ⚠️

NiMaZi

💻 ⚠️

Joey Gao

💻

Dwaipayan Munshi

📖
+ + + + + + +This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! \ No newline at end of file From be63e0ab4e79e21ced17984b48fe0ea228f5a3be Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Fri, 12 Feb 2021 16:20:35 +0800 Subject: [PATCH 26/94] [DOC] Add all-contributors in documentation --- docs/conf.py | 5 ++++- docs/contributors.rst | 1 + docs/index.rst | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 docs/contributors.rst diff --git a/docs/conf.py b/docs/conf.py index 7454185..9f754fc 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -45,9 +45,12 @@ 'sphinx.ext.todo', 'sphinx.ext.napoleon', 'sphinx_panels', - 'sphinx_copybutton' + 'sphinx_copybutton', + "m2r2" ] +source_suffix = ['.rst', '.md'] + autoapi_dirs = ['../deepforest'] autodoc_member_order = 'bysource' diff --git a/docs/contributors.rst b/docs/contributors.rst new file mode 100644 index 0000000..ed3bf36 --- /dev/null +++ b/docs/contributors.rst @@ -0,0 +1 @@ +.. mdinclude:: ../CONTRIBUTORS.md \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index e30e6d8..a3fa4bb 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -110,6 +110,7 @@ Reference :maxdepth: 1 :caption: For Developers + Contributors Changelog .. toctree:: From d5c2dbee9e4cddfa0dee589a12621a53467255ef Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Fri, 12 Feb 2021 16:25:47 +0800 Subject: [PATCH 27/94] [MNT] Update package dependencies for documentation --- docs/requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index b6aecca..2dbaa5f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,5 @@ sphinx==3.1.2 sphinx_rtd_theme==0.5.0 sphinx-panels==0.5.* -sphinx-copybutton \ No newline at end of file +sphinx-copybutton +m2r2==0.2.7 \ No newline at end of file From 73cb0174bd984f7473caff5de8d579a129809507 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Fri, 12 Feb 2021 17:30:41 +0800 Subject: [PATCH 28/94] [MNT] Update all-contributors config --- .all-contributorsrc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index 98f53fa..e7ec097 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -61,5 +61,6 @@ ] } ], - "contributorsPerLine": 7 -} + "contributorsPerLine": 7, + "contributorsSortAlphabetically": true +} \ No newline at end of file From 39e61bf1e09be197f6bdf4c69e5e1b569f8a9c1f Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Fri, 12 Feb 2021 17:30:59 +0800 Subject: [PATCH 29/94] [DOC] Update CONTRIBUTORS.md --- CONTRIBUTORS.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index ee1599a..1a94649 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -8,11 +8,11 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d + + + - - -

Dwaipayan Munshi

📖

Joey Gao

💻

NiMaZi

💻 ⚠️

Yi-Xuan Xu

💻 📖 ⚠️

tczhao

💻 📖 ⚠️

NiMaZi

💻 ⚠️

Joey Gao

💻

Dwaipayan Munshi

📖
From 313a5fab7df6099d08a3734c315a1dd04fd163f9 Mon Sep 17 00:00:00 2001 From: tczhao Date: Fri, 12 Feb 2021 21:23:46 +1100 Subject: [PATCH 30/94] [END] Add criterion for cascade forest (#28) * feat(model): add criterion * doc(changelog): configurable criterion --- CHANGELOG.rst | 1 + deepforest/_estimator.py | 9 +++++ deepforest/_io.py | 1 + deepforest/_layer.py | 3 ++ deepforest/cascade.py | 23 +++++++++++++ deepforest/tree/tree.py | 2 +- tests/test_layer_estimator.py | 62 ++++++++++++++++++++++++++++------ tests/test_model_classifier.py | 6 ++-- tests/test_model_regressor.py | 6 ++-- 9 files changed, 98 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9d5e7f4..b80450c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -31,6 +31,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| support configurable criterion (`#28 `__) @tczhao - |Feature| support regression prediction (`#25 `__) @tczhao - |Fix| fix accepted data types on the :obj:`binner` (`#23 `__) @xuyxu - |Feature| implement the :meth:`get_forest` method for efficient indexing (`#22 `__) @xuyxu diff --git a/deepforest/_estimator.py b/deepforest/_estimator.py index 501c5c9..5419af9 100644 --- a/deepforest/_estimator.py +++ b/deepforest/_estimator.py @@ -13,6 +13,7 @@ def make_classifier_estimator( name, + criterion, n_trees=100, max_depth=None, min_samples_leaf=1, @@ -22,6 +23,7 @@ def make_classifier_estimator( # RandomForestClassifier if name == "rf": estimator = RandomForestClassifier( + criterion=criterion, n_estimators=n_trees, max_depth=max_depth, min_samples_leaf=min_samples_leaf, @@ -31,6 +33,7 @@ def make_classifier_estimator( # ExtraTreesClassifier elif name == "erf": estimator = ExtraTreesClassifier( + criterion=criterion, n_estimators=n_trees, max_depth=max_depth, min_samples_leaf=min_samples_leaf, @@ -46,6 +49,7 @@ def make_classifier_estimator( def make_regressor_estimator( name, + criterion, n_trees=100, max_depth=None, min_samples_leaf=1, @@ -55,6 +59,7 @@ def make_regressor_estimator( # RandomForestRegressor if name == "rf": estimator = RandomForestRegressor( + criterion=criterion, n_estimators=n_trees, max_depth=max_depth, min_samples_leaf=min_samples_leaf, @@ -64,6 +69,7 @@ def make_regressor_estimator( # ExtraTreesRegressor elif name == "erf": estimator = ExtraTreesRegressor( + criterion=criterion, n_estimators=n_trees, max_depth=max_depth, min_samples_leaf=min_samples_leaf, @@ -81,6 +87,7 @@ class Estimator(object): def __init__( self, name, + criterion, n_trees=100, max_depth=None, min_samples_leaf=1, @@ -93,6 +100,7 @@ def __init__( if self.is_classifier: self.estimator_ = make_classifier_estimator( name, + criterion, n_trees, max_depth, min_samples_leaf, @@ -102,6 +110,7 @@ def __init__( else: self.estimator_ = make_regressor_estimator( name, + criterion, n_trees, max_depth, min_samples_leaf, diff --git a/deepforest/_io.py b/deepforest/_io.py index 0db1dea..419857d 100644 --- a/deepforest/_io.py +++ b/deepforest/_io.py @@ -319,6 +319,7 @@ def model_loadobj(dirname, obj_type, d=None): layer_ = Layer( layer_idx=layer_idx, n_classes=d["n_outputs"], + criterion=d["criterion"], n_estimators=d["n_estimators"], partial_mode=d["partial_mode"], buffer=d["buffer"], diff --git a/deepforest/_layer.py b/deepforest/_layer.py index ee76956..4af360c 100644 --- a/deepforest/_layer.py +++ b/deepforest/_layer.py @@ -47,6 +47,7 @@ def __init__( self, layer_idx, n_classes, + criterion, n_estimators=2, n_trees=100, max_depth=None, @@ -60,6 +61,7 @@ def __init__( ): self.layer_idx = layer_idx self.n_classes = n_classes + self.criterion = criterion self.n_estimators = n_estimators * 2 # internal conversion self.n_trees = n_trees self.max_depth = max_depth @@ -89,6 +91,7 @@ def _make_estimator(self, estimator_idx, estimator_name): estimator = Estimator( name=estimator_name, + criterion=self.criterion, n_trees=self.n_trees, max_depth=self.max_depth, min_samples_leaf=self.min_samples_leaf, diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 8d25485..25ed539 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -28,6 +28,7 @@ def _get_predictor_kwargs(predictor_kwargs, **kwargs) -> dict: def _build_classifier_predictor( predictor_name, + criterion, n_estimators, n_outputs, max_depth=None, @@ -46,6 +47,7 @@ def _build_classifier_predictor( predictor = RandomForestClassifier( **_get_predictor_kwargs( predictor_kwargs, + criterion=criterion, n_estimators=n_estimators, max_depth=max_depth, min_samples_leaf=min_samples_leaf, @@ -110,6 +112,7 @@ def _build_classifier_predictor( def _build_regressor_predictor( predictor_name, + criterion, n_estimators, n_outputs, max_depth=None, @@ -128,6 +131,7 @@ def _build_regressor_predictor( predictor = RandomForestRegressor( **_get_predictor_kwargs( predictor_kwargs, + criterion=criterion, n_estimators=n_estimators, max_depth=max_depth, min_samples_leaf=min_samples_leaf, @@ -205,6 +209,10 @@ def _build_regressor_predictor( The maximum number of cascade layers in the deep forest. Notice that the actual number of layers can be smaller than ``max_layers`` because of the internal early stopping stage. + criterion : :obj:`{"gini", "entropy"}`, default="gini" + The function to measure the quality of a split. Supported criteria + are ``gini`` for the Gini impurity and ``entropy`` for the information + gain. Note: this parameter is tree-specific. n_estimators : :obj:`int`, default=2 The number of estimator in each cascade layer. It will be multiplied by 2 internally because each estimator contains a @@ -311,6 +319,10 @@ def _build_regressor_predictor( The maximum number of cascade layers in the deep forest. Notice that the actual number of layers can be smaller than ``max_layers`` because of the internal early stopping stage. + criterion : :obj:`{"mse", "mae"}`, default="mse" + The function to measure the quality of a split. Supported criteria are + ``mse`` for the mean squared error, which is equal to variance reduction + as feature selection criterion, and ``mae`` for the mean absolute error. n_estimators : :obj:`int`, default=2 The number of estimator in each cascade layer. It will be multiplied by 2 internally because each estimator contains a @@ -441,6 +453,7 @@ def __init__( bin_subsample=2e5, bin_type="percentile", max_layers=20, + criterion="", n_estimators=2, n_trees=100, max_depth=None, @@ -459,6 +472,7 @@ def __init__( self.bin_subsample = bin_subsample self.bin_type = bin_type self.max_layers = max_layers + self.criterion = criterion self.n_estimators = n_estimators self.n_trees = n_trees self.max_depth = max_depth @@ -710,6 +724,7 @@ def fit(self, X, y, sample_weight=None): layer_ = Layer( 0, self.n_outputs_, + self.criterion, self.n_estimators, self._set_n_trees(0), self.max_depth, @@ -785,6 +800,7 @@ def fit(self, X, y, sample_weight=None): layer_ = Layer( layer_idx, self.n_outputs_, + self.criterion, self.n_estimators, self._set_n_trees(layer_idx), self.max_depth, @@ -881,6 +897,7 @@ def fit(self, X, y, sample_weight=None): if is_classifier(self): self.predictor_ = _build_classifier_predictor( self.predictor_name, + self.criterion, self.n_trees, self.n_outputs_, self.max_depth, @@ -892,6 +909,7 @@ def fit(self, X, y, sample_weight=None): else: self.predictor_ = _build_regressor_predictor( self.predictor_name, + self.criterion, self.n_trees, self.n_outputs_, self.max_depth, @@ -1016,6 +1034,7 @@ def save(self, dirname="model"): # Save each object sequentially d = {} d["n_estimators"] = self.n_estimators + d["criterion"] = self.criterion d["n_layers"] = self.n_layers_ d["n_features"] = self.n_features_ d["n_outputs"] = self.n_outputs_ @@ -1107,6 +1126,7 @@ def __init__( bin_subsample=2e5, bin_type="percentile", max_layers=20, + criterion="gini", n_estimators=2, n_trees=100, max_depth=None, @@ -1126,6 +1146,7 @@ def __init__( bin_subsample=bin_subsample, bin_type=bin_type, max_layers=max_layers, + criterion=criterion, n_estimators=n_estimators, n_trees=n_trees, max_depth=max_depth, @@ -1302,6 +1323,7 @@ def __init__( bin_subsample=2e5, bin_type="percentile", max_layers=20, + criterion="mse", n_estimators=2, n_trees=100, max_depth=None, @@ -1321,6 +1343,7 @@ def __init__( bin_subsample=bin_subsample, bin_type=bin_type, max_layers=max_layers, + criterion=criterion, n_estimators=n_estimators, n_trees=n_trees, max_depth=max_depth, diff --git a/deepforest/tree/tree.py b/deepforest/tree/tree.py index ead6dc2..971b1b1 100644 --- a/deepforest/tree/tree.py +++ b/deepforest/tree/tree.py @@ -49,7 +49,7 @@ DOUBLE = _tree.DOUBLE CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy} -CRITERIA_REG = {"mse": _criterion.MSE} +CRITERIA_REG = {"mse": _criterion.MSE, "mae": _criterion.MAE} DENSE_SPLITTERS = { "best": _splitter.BestSplitter, diff --git a/tests/test_layer_estimator.py b/tests/test_layer_estimator.py index 425f93c..e596689 100644 --- a/tests/test_layer_estimator.py +++ b/tests/test_layer_estimator.py @@ -19,9 +19,10 @@ ) # Parameters -layer_kwargs = { +classifier_layer_kwargs = { "layer_idx": 0, "n_classes": 10, + "criterion": "gini", "n_estimators": 1, "n_trees": 10, "max_depth": 3, @@ -33,8 +34,34 @@ "verbose": 2, } -estimator_kwargs = { +classifier_estimator_kwargs = { "name": "rf", + "criterion": "gini", + "n_trees": 10, + "max_depth": 3, + "min_samples_leaf": 10, + "n_jobs": -1, + "random_state": 42, +} + +regressor_layer_kwargs = { + "layer_idx": 0, + "n_classes": 1, + "criterion": "mse", + "n_estimators": 1, + "n_trees": 10, + "max_depth": 3, + "min_samples_leaf": 10, + "partial_mode": False, + "buffer": None, + "n_jobs": -1, + "random_state": 42, + "verbose": 2, +} + +regressor_estimator_kwargs = { + "name": "rf", + "criterion": "mse", "n_trees": 10, "max_depth": 3, "min_samples_leaf": 10, @@ -45,18 +72,24 @@ def test_classifier_layer_properties_after_fitting(): - layer = Layer(**layer_kwargs) + layer = Layer(**classifier_layer_kwargs) X_aug = layer.fit_transform(X_train, y_train) y_pred_full = layer.predict_full(X_test, is_classifier=True) # n_trees assert ( layer.n_trees_ - == 2 * layer_kwargs["n_estimators"] * layer_kwargs["n_trees"] + == 2 + * classifier_layer_kwargs["n_estimators"] + * classifier_layer_kwargs["n_trees"] ) # Output dim - expect_dim = 2 * layer_kwargs["n_classes"] * layer_kwargs["n_estimators"] + expect_dim = ( + 2 + * classifier_layer_kwargs["n_classes"] + * classifier_layer_kwargs["n_estimators"] + ) assert X_aug.shape[1] == expect_dim assert y_pred_full.shape[1] == expect_dim @@ -70,7 +103,7 @@ def test_regressor_layer_properties_after_fitting(): X_train, X_test, y_train, y_test = train_test_split( X_binned, y, test_size=0.42, random_state=42 ) - layer = Layer(**layer_kwargs) + layer = Layer(**regressor_layer_kwargs) layer.is_classifier = False X_aug = layer.fit_transform(X_train, y_train) y_pred_full = layer.predict_full(X_test, is_classifier=False) @@ -78,11 +111,13 @@ def test_regressor_layer_properties_after_fitting(): # n_trees assert ( layer.n_trees_ - == 2 * layer_kwargs["n_estimators"] * layer_kwargs["n_trees"] + == 2 + * regressor_layer_kwargs["n_estimators"] + * regressor_layer_kwargs["n_trees"] ) # Output dim - expect_dim = 2 * layer_kwargs["n_estimators"] + expect_dim = 2 * regressor_layer_kwargs["n_estimators"] assert X_aug.shape[1] == expect_dim assert y_pred_full.shape[1] == expect_dim @@ -90,7 +125,10 @@ def test_regressor_layer_properties_after_fitting(): @pytest.mark.parametrize( "param", [(0, {"n_estimators": 0}), (1, {"n_trees": 0})] ) -def test_layer_invalid_training_params(param): +@pytest.mark.parametrize( + "layer_kwargs", [(classifier_layer_kwargs), (regressor_layer_kwargs)] +) +def test_layer_invalid_training_params(param, layer_kwargs): case_kwargs = copy.deepcopy(layer_kwargs) case_kwargs.update(param[1]) @@ -105,7 +143,11 @@ def test_layer_invalid_training_params(param): layer.fit_transform(X_train, y_train) -def test_estimator_unknown(): +@pytest.mark.parametrize( + "estimator_kwargs", + [(classifier_estimator_kwargs), (regressor_estimator_kwargs)], +) +def test_estimator_unknown(estimator_kwargs): case_kwargs = copy.deepcopy(estimator_kwargs) case_kwargs.update({"name": "unknown"}) diff --git a/tests/test_model_classifier.py b/tests/test_model_classifier.py index b304de4..6546858 100644 --- a/tests/test_model_classifier.py +++ b/tests/test_model_classifier.py @@ -25,6 +25,7 @@ "bin_subsample": 2e5, "max_layers": 10, "n_estimators": 1, + "criterion": "gini", "n_trees": 100, "max_depth": 3, "min_samples_leaf": 1, @@ -43,6 +44,7 @@ "bin_subsample": 2e5, "max_layers": 10, "n_estimators": 2, + "criterion": "gini", "n_trees": 100, "max_depth": None, "min_samples_leaf": 1, @@ -239,14 +241,14 @@ def test_model_invalid_training_params(param): @pytest.mark.parametrize("predictor", ["forest", "xgboost", "lightgbm"]) def test_classifier_predictor_normal(predictor): deepforest.cascade._build_classifier_predictor( - predictor, n_estimators=1, n_outputs=2 + predictor, criterion="gini", n_estimators=1, n_outputs=2 ) def test_classifier_predictor_unknown(): with pytest.raises(NotImplementedError) as excinfo: deepforest.cascade._build_classifier_predictor( - "unknown", n_estimators=1, n_outputs=2 + "unknown", criterion="gini", n_estimators=1, n_outputs=2 ) assert "name of the predictor should be one of" in str(excinfo.value) diff --git a/tests/test_model_regressor.py b/tests/test_model_regressor.py index 1243e09..6d13916 100644 --- a/tests/test_model_regressor.py +++ b/tests/test_model_regressor.py @@ -24,6 +24,7 @@ "n_bins": 10, "bin_subsample": 2e5, "max_layers": 10, + "criterion": "mse", "n_estimators": 1, "n_trees": 100, "max_depth": 3, @@ -42,6 +43,7 @@ "n_bins": 255, "bin_subsample": 2e5, "max_layers": 10, + "criterion": "mse", "n_estimators": 2, "n_trees": 100, "max_depth": None, @@ -186,14 +188,14 @@ def test_model_invalid_training_params(param): @pytest.mark.parametrize("predictor", ["forest", "xgboost", "lightgbm"]) def test_regressor_predictor_normal(predictor): deepforest.cascade._build_regressor_predictor( - predictor, n_estimators=1, n_outputs=2 + predictor, criterion="mse", n_estimators=1, n_outputs=2 ) def test_regressor_predictor_unknown(): with pytest.raises(NotImplementedError) as excinfo: deepforest.cascade._build_regressor_predictor( - "unknown", n_estimators=1, n_outputs=2 + "unknown", criterion="mse", n_estimators=1, n_outputs=2 ) assert "name of the predictor should be one of" in str(excinfo.value) From a5eece32afa0fe6fffc11709df880f63026838f8 Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Fri, 12 Feb 2021 21:09:22 +0800 Subject: [PATCH 31/94] [DOC] Add contributor (#33) * docs: update CONTRIBUTORS.md [skip ci] * docs: update .all-contributorsrc [skip ci] Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> --- .all-contributorsrc | 14 ++++++++++++-- CONTRIBUTORS.md | 1 + 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index e7ec097..186e7a7 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -59,8 +59,18 @@ "contributions": [ "doc" ] + }, + { + "login": "Mr-memorandum", + "name": "Mr-memorandum", + "avatar_url": "https://avatars.githubusercontent.com/u/33889145?v=4", + "profile": "https://github.com/Mr-memorandum", + "contributions": [ + "bug" + ] } ], "contributorsPerLine": 7, - "contributorsSortAlphabetically": true -} \ No newline at end of file + "contributorsSortAlphabetically": true, + "skipCi": true +} diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 1a94649..9f69fda 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -10,6 +10,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
Dwaipayan Munshi

📖
Joey Gao

💻 +
Mr-memorandum

🐛
NiMaZi

💻 ⚠️
Yi-Xuan Xu

💻 📖 ⚠️
tczhao

💻 📖 ⚠️ From ce9a6dd2db952b07662fae80e3ea9895cb3f359a Mon Sep 17 00:00:00 2001 From: T-Allen-sudo <65913092+T-Allen-sudo@users.noreply.github.com> Date: Sat, 13 Feb 2021 00:39:03 +0800 Subject: [PATCH 32/94] [MNT] Update CI for mac-os (#34) * Update build-wheels.yml * correct version * Update build-wheels.yml * Update build-wheels.yml * Update build-wheels.yml * Update build-wheels.yml * update ci * add mac-os ci * rename ci on mac-os * update all-contributors Co-authored-by: Yi-Xuan Xu --- .all-contributorsrc | 10 ++++++ .github/workflows/build-wheels.yml | 10 +++--- .github/workflows/mac-os-build-and-test.yml | 39 +++++++++++++++++++++ CONTRIBUTORS.md | 1 + 4 files changed, 55 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/mac-os-build-and-test.yml diff --git a/.all-contributorsrc b/.all-contributorsrc index 186e7a7..e9f9412 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -68,6 +68,16 @@ "contributions": [ "bug" ] + }, + { + "login": "T-Allen-sudo", + "name": "T-Allen-sudo", + "avatar_url": "https://avatars.githubusercontent.com/u/65913092?v=4", + "profile": "https://github.com/T-Allen-sudo", + "contributions": [ + "maintenance", + "test" + ] } ], "contributorsPerLine": 7, diff --git a/.github/workflows/build-wheels.yml b/.github/workflows/build-wheels.yml index 3dc9674..f60f191 100644 --- a/.github/workflows/build-wheels.yml +++ b/.github/workflows/build-wheels.yml @@ -12,7 +12,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, windows-latest] + os: [ubuntu-latest, windows-latest, macos-latest] python-version: [3.6, 3.7, 3.8] steps: - uses: actions/checkout@v2 @@ -20,12 +20,12 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - - name: Install cibuildwheel - run: python -m pip install cibuildwheel==1.8.0 - name: Build wheels - run: python -m cibuildwheel --output-dir wheelhouse + uses: joerick/cibuildwheel@v1.9.0 + with: + output-dir: wheelhouse env: - CIBW_BUILD: "cp36-manylinux_x86_64 cp37-manylinux_x86_64 cp38-manylinux_x86_64 cp36-win_amd64 cp37-win_amd64 cp38-win_amd64" + CIBW_BUILD: "cp36-manylinux_x86_64 cp37-manylinux_x86_64 cp38-manylinux_x86_64 cp36-win_amd64 cp37-win_amd64 cp38-win_amd64 cp36-macosx_x86_64 cp37-macosx_x86_64 cp38-macosx_x86_64" - name: Store artifacts uses: actions/upload-artifact@v2 with: diff --git a/.github/workflows/mac-os-build-and-test.yml b/.github/workflows/mac-os-build-and-test.yml new file mode 100644 index 0000000..3914850 --- /dev/null +++ b/.github/workflows/mac-os-build-and-test.yml @@ -0,0 +1,39 @@ +name: DeepForest-CI-MacOS + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest] + python-version: [3.6, 3.7, 3.8] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Display python version + run: python -c "import sys; print(sys.version)" + - name: Install OS packages + run: brew install libomp + - name: Install package dependencies + run: | + python -m pip install --upgrade pip + pip install -r build_tools/requirements.txt + - name: Install + run: pip install --verbose --editable . + - name: Run tests + run: | + pytest ./tests --cov-config=.coveragerc --cov-report=xml --cov=deepforest deepforest + - name: Publish code coverage + uses: codecov/codecov-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + file: ./coverage.xml diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 9f69fda..fb52c53 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -12,6 +12,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
Joey Gao

💻
Mr-memorandum

🐛
NiMaZi

💻 ⚠️ +
T-Allen-sudo

🚧 ⚠️
Yi-Xuan Xu

💻 📖 ⚠️
tczhao

💻 📖 ⚠️ From e622b74a5ec2f7025471ca8b9e85b94e1eca7e2e Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sun, 14 Feb 2021 13:30:43 +0800 Subject: [PATCH 33/94] [DOC] Update RTD badge --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index c354b39..59be120 100644 --- a/README.rst +++ b/README.rst @@ -6,8 +6,8 @@ Deep Forest (DF) 21 .. |github| image:: https://github.com/LAMDA-NJU/Deep-Forest/workflows/DeepForest-CI/badge.svg .. _github: https://github.com/LAMDA-NJU/Deep-Forest/actions -.. |readthedocs| image:: https://readthedocs.org/projects/deep-forest/badge/?version=latest -.. _readthedocs: https://deep-forest.readthedocs.io/en/latest/ +.. |readthedocs| image:: https://readthedocs.org/projects/deep-forest/badge/?version=master +.. _readthedocs: deep-forest.readthedocs.io .. |codecov| image:: https://codecov.io/gh/LAMDA-NJU/Deep-Forest/branch/master/graph/badge.svg?token=5BVXOT8RPO .. _codecov: https://codecov.io/gh/LAMDA-NJU/Deep-Forest From b5d9658b9c942203ea7d17ea5f612f8e35cd0670 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sun, 14 Feb 2021 14:21:18 +0800 Subject: [PATCH 34/94] [FIX] Fix RTD link --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 59be120..27b7c36 100644 --- a/README.rst +++ b/README.rst @@ -7,7 +7,7 @@ Deep Forest (DF) 21 .. _github: https://github.com/LAMDA-NJU/Deep-Forest/actions .. |readthedocs| image:: https://readthedocs.org/projects/deep-forest/badge/?version=master -.. _readthedocs: deep-forest.readthedocs.io +.. _readthedocs: https://deep-forest.readthedocs.io .. |codecov| image:: https://codecov.io/gh/LAMDA-NJU/Deep-Forest/branch/master/graph/badge.svg?token=5BVXOT8RPO .. _codecov: https://codecov.io/gh/LAMDA-NJU/Deep-Forest From fd2546574bd3da80cce98691ba8135d35a006158 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sun, 14 Feb 2021 22:56:47 +0800 Subject: [PATCH 35/94] [DOC] Update CI for mac-os (#34) --- CHANGELOG.rst | 2 +- docs/installation_guide.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b80450c..9ba8fb4 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -24,13 +24,13 @@ Version 0.1.* .. role:: raw-latex(raw) :format: latex -.. |MajorFeature| replace:: :raw-html:`Major Feature` :raw-latex:`{\small\sc [Major Feature]}` .. |Feature| replace:: :raw-html:`Feature` :raw-latex:`{\small\sc [Feature]}` .. |Efficiency| replace:: :raw-html:`Efficiency` :raw-latex:`{\small\sc [Efficiency]}` .. |Enhancement| replace:: :raw-html:`Enhancement` :raw-latex:`{\small\sc [Enhancement]}` .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| add official support for Mac-OS (`#34 `__) @T-Allen-sudo - |Feature| support configurable criterion (`#28 `__) @tczhao - |Feature| support regression prediction (`#25 `__) @tczhao - |Fix| fix accepted data types on the :obj:`binner` (`#23 `__) @xuyxu diff --git a/docs/installation_guide.rst b/docs/installation_guide.rst index 02d1769..850c0ae 100644 --- a/docs/installation_guide.rst +++ b/docs/installation_guide.rst @@ -12,7 +12,7 @@ The stable version is available via `PyPI `__ using: The package is portable and with very few package dependencies. It is recommended to use the package environment from `Anaconda `__ since it already installs all required packages. -Notice that only the 64-bit Linux and Windows platform are officially supported. To use deep forest on Mac-OS or other platforms, you will need to build the entire package from source. +Notice that only the 64-bit Linux, Windows, and Mac-OS platform are officially supported. To use deep forest on other platforms, you will need to build the entire package from source. Building from Source -------------------- From 37c33dfce9a51e29e4e4dc89562c8156bbd8ba40 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Tue, 16 Feb 2021 15:20:03 +0800 Subject: [PATCH 36/94] [ENH] Add sklearn backend for forest (#36) * add sklearn backend * add unit tests * Update CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/_estimator.py | 145 ++++++++++++++++++++++++--------- deepforest/_layer.py | 3 + deepforest/cascade.py | 20 +++++ tests/test_model_classifier.py | 11 ++- tests/test_model_regressor.py | 11 ++- tests/test_tree regressor.py | 5 -- 7 files changed, 148 insertions(+), 48 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9ba8fb4..7821d3d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| add scikit-learn backend (`#36 `__) @xuyxu - |Feature| add official support for Mac-OS (`#34 `__) @T-Allen-sudo - |Feature| support configurable criterion (`#28 `__) @tczhao - |Feature| support regression prediction (`#25 `__) @tczhao diff --git a/deepforest/_estimator.py b/deepforest/_estimator.py index 5419af9..39fe3a0 100644 --- a/deepforest/_estimator.py +++ b/deepforest/_estimator.py @@ -3,12 +3,19 @@ __all__ = ["Estimator"] +import numpy as np from .forest import ( RandomForestClassifier, ExtraTreesClassifier, RandomForestRegressor, ExtraTreesRegressor, ) +from sklearn.ensemble import ( + RandomForestClassifier as sklearn_RandomForestClassifier, + ExtraTreesClassifier as sklearn_ExtraTreesClassifier, + RandomForestRegressor as sklearn_RandomForestRegressor, + ExtraTreesRegressor as sklearn_ExtraTreesRegressor, +) def make_classifier_estimator( @@ -17,29 +24,54 @@ def make_classifier_estimator( n_trees=100, max_depth=None, min_samples_leaf=1, + backend="custom", n_jobs=None, random_state=None, ): # RandomForestClassifier if name == "rf": - estimator = RandomForestClassifier( - criterion=criterion, - n_estimators=n_trees, - max_depth=max_depth, - min_samples_leaf=min_samples_leaf, - n_jobs=n_jobs, - random_state=random_state, - ) + if backend == "custom": + estimator = RandomForestClassifier( + criterion=criterion, + n_estimators=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + n_jobs=n_jobs, + random_state=random_state, + ) + elif backend == "sklearn": + estimator = sklearn_RandomForestClassifier( + criterion=criterion, + n_estimators=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + bootstrap=True, + oob_score=True, + n_jobs=n_jobs, + random_state=random_state, + ) # ExtraTreesClassifier elif name == "erf": - estimator = ExtraTreesClassifier( - criterion=criterion, - n_estimators=n_trees, - max_depth=max_depth, - min_samples_leaf=min_samples_leaf, - n_jobs=n_jobs, - random_state=random_state, - ) + if backend == "custom": + estimator = ExtraTreesClassifier( + criterion=criterion, + n_estimators=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + n_jobs=n_jobs, + random_state=random_state, + ) + elif backend == "sklearn": + estimator = sklearn_ExtraTreesClassifier( + criterion=criterion, + n_estimators=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + bootstrap=True, + oob_score=True, + n_jobs=n_jobs, + random_state=random_state, + ) else: msg = "Unknown type of estimator, which should be one of {{rf, erf}}." raise NotImplementedError(msg) @@ -53,29 +85,54 @@ def make_regressor_estimator( n_trees=100, max_depth=None, min_samples_leaf=1, + backend="custom", n_jobs=None, random_state=None, ): # RandomForestRegressor if name == "rf": - estimator = RandomForestRegressor( - criterion=criterion, - n_estimators=n_trees, - max_depth=max_depth, - min_samples_leaf=min_samples_leaf, - n_jobs=n_jobs, - random_state=random_state, - ) + if backend == "custom": + estimator = RandomForestRegressor( + criterion=criterion, + n_estimators=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + n_jobs=n_jobs, + random_state=random_state, + ) + elif backend == "sklearn": + estimator = sklearn_RandomForestRegressor( + criterion=criterion, + n_estimators=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + bootstrap=True, + oob_score=True, + n_jobs=n_jobs, + random_state=random_state, + ) # ExtraTreesRegressor elif name == "erf": - estimator = ExtraTreesRegressor( - criterion=criterion, - n_estimators=n_trees, - max_depth=max_depth, - min_samples_leaf=min_samples_leaf, - n_jobs=n_jobs, - random_state=random_state, - ) + if backend == "custom": + estimator = ExtraTreesRegressor( + criterion=criterion, + n_estimators=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + n_jobs=n_jobs, + random_state=random_state, + ) + elif backend == "sklearn": + estimator = sklearn_ExtraTreesRegressor( + criterion=criterion, + n_estimators=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + bootstrap=True, + oob_score=True, + n_jobs=n_jobs, + random_state=random_state, + ) else: msg = "Unknown type of estimator, which should be one of {{rf, erf}}." raise NotImplementedError(msg) @@ -91,11 +148,13 @@ def __init__( n_trees=100, max_depth=None, min_samples_leaf=1, + backend="custom", n_jobs=None, random_state=None, is_classifier=True, ): + self.backend = backend self.is_classifier = is_classifier if self.is_classifier: self.estimator_ = make_classifier_estimator( @@ -104,6 +163,7 @@ def __init__( n_trees, max_depth, min_samples_leaf, + backend, n_jobs, random_state, ) @@ -114,26 +174,33 @@ def __init__( n_trees, max_depth, min_samples_leaf, + backend, n_jobs, random_state, ) @property def oob_decision_function_(self): + # Scikit-Learn uses `oob_prediction_` for ForestRegressor + if self.backend == "sklearn" and not self.is_classifier: + oob_prediction = self.estimator_.oob_prediction_ + if len(oob_prediction.shape) == 1: + oob_prediction = np.expand_dims(oob_prediction, 1) + return oob_prediction return self.estimator_.oob_decision_function_ def fit_transform(self, X, y, sample_weight=None): self.estimator_.fit(X, y, sample_weight) - X_aug = self.estimator_.oob_decision_function_ - - return X_aug + return self.oob_decision_function_ def transform(self, X): - if self.is_classifier: - return self.estimator_.predict_proba(X) - return self.estimator_.predict(X) + """Preserved for the naming consistency.""" + return self.predict(X) def predict(self, X): if self.is_classifier: return self.estimator_.predict_proba(X) - return self.estimator_.predict(X) + pred = self.estimator_.predict(X) + if len(pred.shape) == 1: + pred = np.expand_dims(pred, 1) + return pred diff --git a/deepforest/_layer.py b/deepforest/_layer.py index 4af360c..e9d0dd9 100644 --- a/deepforest/_layer.py +++ b/deepforest/_layer.py @@ -52,6 +52,7 @@ def __init__( n_trees=100, max_depth=None, min_samples_leaf=1, + backend="custom", partial_mode=False, buffer=None, n_jobs=None, @@ -66,6 +67,7 @@ def __init__( self.n_trees = n_trees self.max_depth = max_depth self.min_samples_leaf = min_samples_leaf + self.backend = backend self.partial_mode = partial_mode self.buffer = buffer self.n_jobs = n_jobs @@ -95,6 +97,7 @@ def _make_estimator(self, estimator_idx, estimator_name): n_trees=self.n_trees, max_depth=self.max_depth, min_samples_leaf=self.min_samples_leaf, + backend=self.backend, n_jobs=self.n_jobs, random_state=random_state, is_classifier=self.is_classifier, diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 25ed539..aabc4ac 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -235,6 +235,10 @@ def _build_regressor_predictor( Specifying this will extend/overwrite the original parameters inherit from deep forest. If ``use_predictor`` is False, this parameter will have no effect. + backend : :obj:`{"custom", "sklearn"}`, default="custom" + The backend of the forest estimator. Supported backends are ``custom`` + for higher time and memory efficiency and ``sklearn`` for additional + functionality. n_tolerant_rounds : :obj:`int`, default=2 Specify when to conduct early stopping. The training process terminates when the validation performance on the training set does @@ -345,6 +349,10 @@ def _build_regressor_predictor( Specifying this will extend/overwrite the original parameters inherit from deep forest. If ``use_predictor`` is False, this parameter will have no effect. + backend : :obj:`{"custom", "sklearn"}`, default="custom" + The backend of the forest estimator. Supported backends are ``custom`` + for higher time and memory efficiency and ``sklearn`` for additional + functionality. n_tolerant_rounds : :obj:`int`, default=2 Specify when to conduct early stopping. The training process terminates when the validation performance on the training set does @@ -461,6 +469,7 @@ def __init__( use_predictor=False, predictor="forest", predictor_kwargs={}, + backend="custom", n_tolerant_rounds=2, delta=1e-5, partial_mode=False, @@ -478,6 +487,7 @@ def __init__( self.max_depth = max_depth self.min_samples_leaf = min_samples_leaf self.predictor_kwargs = predictor_kwargs + self.backend = backend self.n_tolerant_rounds = n_tolerant_rounds self.delta = delta self.partial_mode = partial_mode @@ -607,6 +617,10 @@ def _validate_params(self): msg = "max_layers = {} should be strictly positive." raise ValueError(msg.format(self.max_layers)) + if not self.backend in ("custom", "sklearn"): + msg = "backend = {} should be one of {{custom, sklearn}}." + raise ValueError(msg.format(self.backend)) + if not self.n_tolerant_rounds > 0: msg = "n_tolerant_rounds = {} should be strictly positive." raise ValueError(msg.format(self.n_tolerant_rounds)) @@ -729,6 +743,7 @@ def fit(self, X, y, sample_weight=None): self._set_n_trees(0), self.max_depth, self.min_samples_leaf, + self.backend, self.partial_mode, self.buffer_, self.n_jobs, @@ -805,6 +820,7 @@ def fit(self, X, y, sample_weight=None): self._set_n_trees(layer_idx), self.max_depth, self.min_samples_leaf, + self.backend, self.partial_mode, self.buffer_, self.n_jobs, @@ -1134,6 +1150,7 @@ def __init__( use_predictor=False, predictor="forest", predictor_kwargs={}, + backend="custom", n_tolerant_rounds=2, delta=1e-5, partial_mode=False, @@ -1154,6 +1171,7 @@ def __init__( use_predictor=use_predictor, predictor=predictor, predictor_kwargs=predictor_kwargs, + backend=backend, n_tolerant_rounds=n_tolerant_rounds, delta=delta, partial_mode=partial_mode, @@ -1331,6 +1349,7 @@ def __init__( use_predictor=False, predictor="forest", predictor_kwargs={}, + backend="custom", n_tolerant_rounds=2, delta=1e-5, partial_mode=False, @@ -1351,6 +1370,7 @@ def __init__( use_predictor=use_predictor, predictor=predictor, predictor_kwargs=predictor_kwargs, + backend=backend, n_tolerant_rounds=n_tolerant_rounds, delta=delta, partial_mode=partial_mode, diff --git a/tests/test_model_classifier.py b/tests/test_model_classifier.py index 6546858..0a32315 100644 --- a/tests/test_model_classifier.py +++ b/tests/test_model_classifier.py @@ -124,11 +124,13 @@ def test_model_properties_after_fitting(): assert "`forest_type` should be one of" in str(excinfo.value) -def test_model_workflow_partial_mode(): +@pytest.mark.parametrize("backend", ["custom", "sklearn"]) +def test_model_workflow_partial_mode(backend): """Run the workflow of deep forest with a local buffer.""" case_kwargs = copy.deepcopy(kwargs) case_kwargs.update({"partial_mode": True}) + case_kwargs.update({"backend": backend}) model = CascadeForestClassifier(**case_kwargs) model.fit(X_train, y_train) @@ -187,11 +189,13 @@ def test_model_sample_weight(): model.clean() # clear the buffer -def test_model_workflow_in_memory(): +@pytest.mark.parametrize("backend", ["custom", "sklearn"]) +def test_model_workflow_in_memory(backend): """Run the workflow of deep forest with in-memory mode.""" case_kwargs = copy.deepcopy(kwargs) case_kwargs.update({"partial_mode": False}) + case_kwargs.update({"backend": backend}) model = CascadeForestClassifier(**case_kwargs) model.fit(X_train, y_train) @@ -219,6 +223,7 @@ def test_model_workflow_in_memory(): (0, {"max_layers": 0}), (1, {"n_tolerant_rounds": 0}), (2, {"delta": -1}), + (3, {"backend": "unknown"}), ], ) def test_model_invalid_training_params(param): @@ -236,6 +241,8 @@ def test_model_invalid_training_params(param): assert "n_tolerant_rounds" in str(excinfo.value) elif param[0] == 2: assert "delta " in str(excinfo.value) + elif param[0] == 3: + assert "backend" in str(excinfo.value) @pytest.mark.parametrize("predictor", ["forest", "xgboost", "lightgbm"]) diff --git a/tests/test_model_regressor.py b/tests/test_model_regressor.py index 6d13916..c3b7887 100644 --- a/tests/test_model_regressor.py +++ b/tests/test_model_regressor.py @@ -106,11 +106,13 @@ def test_model_properties_after_fitting(): assert "already exists in the internal container" in str(excinfo.value) -def test_model_workflow_partial_mode(): +@pytest.mark.parametrize("backend", ["custom", "sklearn"]) +def test_model_workflow_partial_mode(backend): """Run the workflow of deep forest with a local buffer.""" case_kwargs = copy.deepcopy(kwargs) case_kwargs.update({"partial_mode": True}) + case_kwargs.update({"backend": backend}) model = CascadeForestRegressor(**case_kwargs) model.fit(X_train, y_train) @@ -134,11 +136,13 @@ def test_model_workflow_partial_mode(): shutil.rmtree(save_dir) -def test_model_workflow_in_memory(): +@pytest.mark.parametrize("backend", ["custom", "sklearn"]) +def test_model_workflow_in_memory(backend): """Run the workflow of deep forest with in-memory mode.""" case_kwargs = copy.deepcopy(kwargs) case_kwargs.update({"partial_mode": False}) + case_kwargs.update({"backend": backend}) model = CascadeForestRegressor(**case_kwargs) model.fit(X_train, y_train) @@ -166,6 +170,7 @@ def test_model_workflow_in_memory(): (0, {"max_layers": 0}), (1, {"n_tolerant_rounds": 0}), (2, {"delta": -1}), + (3, {"backend": "unknown"}), ], ) def test_model_invalid_training_params(param): @@ -183,6 +188,8 @@ def test_model_invalid_training_params(param): assert "n_tolerant_rounds" in str(excinfo.value) elif param[0] == 2: assert "delta " in str(excinfo.value) + elif param[0] == 3: + assert "backend" in str(excinfo.value) @pytest.mark.parametrize("predictor", ["forest", "xgboost", "lightgbm"]) diff --git a/tests/test_tree regressor.py b/tests/test_tree regressor.py index 6baeb33..2b43180 100644 --- a/tests/test_tree regressor.py +++ b/tests/test_tree regressor.py @@ -64,8 +64,3 @@ def test_tree_fit_invalid_training_params(): with pytest.raises(ValueError) as execinfo: tree.fit(X_binned, y) assert "max_depth must be greater than zero." in str(execinfo.value) - - -if __name__ == "__main__": - - test_tree_properties_after_fitting() From 2f4241bc9d9cb104baf19f2aa77ec141591f5093 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Sun, 21 Feb 2021 13:36:38 +0800 Subject: [PATCH 37/94] [ENH] Add layer-wise feature importances (#39) * add feature importances * black formatting * update unit tests * Update CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/_estimator.py | 6 ++++++ deepforest/_layer.py | 16 +++++++++++++++- deepforest/cascade.py | 28 ++++++++++++++++++++++++++++ tests/test_model_classifier.py | 16 ++++++++++++++++ 5 files changed, 66 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7821d3d..5403019 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| add layer-wise feature importances (`#39 `__) @xuyxu - |Feature| add scikit-learn backend (`#36 `__) @xuyxu - |Feature| add official support for Mac-OS (`#34 `__) @T-Allen-sudo - |Feature| support configurable criterion (`#28 `__) @tczhao diff --git a/deepforest/_estimator.py b/deepforest/_estimator.py index 39fe3a0..679c356 100644 --- a/deepforest/_estimator.py +++ b/deepforest/_estimator.py @@ -189,6 +189,12 @@ def oob_decision_function_(self): return oob_prediction return self.estimator_.oob_decision_function_ + @property + def feature_importances_(self): + """Return the impurity-based feature importances from the estimator.""" + + return self.estimator_.feature_importances_ + def fit_transform(self, X, y, sample_weight=None): self.estimator_.fit(X, y, sample_weight) return self.oob_decision_function_ diff --git a/deepforest/_layer.py b/deepforest/_layer.py index e9d0dd9..6e0888c 100644 --- a/deepforest/_layer.py +++ b/deepforest/_layer.py @@ -81,6 +81,20 @@ def __init__( def n_trees_(self): return self.n_estimators * self.n_trees + @property + def feature_importances_(self): + feature_importances_ = np.zeros((self.n_features,)) + for idx, (key, estimator) in enumerate(self.estimators_.items()): + # Partial mode + if isinstance(estimator, str): + estimator_ = self.buffer.load_estimator(estimator) + feature_importances_ += estimator_.feature_importances_ + # In-memory mode + else: + feature_importances_ += estimator.feature_importances_ + + return feature_importances_ / len(self.estimators_) + def _make_estimator(self, estimator_idx, estimator_name): """Make and configure a copy of the estimator.""" # Set the non-overlapped random state @@ -118,7 +132,7 @@ def _validate_params(self): def fit_transform(self, X, y, sample_weight=None): self._validate_params() - n_samples, _ = X.shape + n_samples, self.n_features = X.shape X_aug = [] if self.is_classifier: diff --git a/deepforest/cascade.py b/deepforest/cascade.py index aabc4ac..1ce2ea6 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -971,6 +971,34 @@ def fit(self, X, y, sample_weight=None): return self + def get_layer_feature_importances(self, layer_idx): + """ + Return the impurity-based feature importances of the ``layer_idx``-th + cascade layer, defined as the average over feature importances from + all base estimators in the cascade layer. + + Parameters + ---------- + layer_idx : :obj:`int` + The index of the cascade layer, should be in the range + ``[0, self.n_layers_-1]``. + + Returns + ------- + feature_importances_: :obj:`numpy.ndarray` of shape (n_features,) + The impurity-based feature importances of the cascade layer. + Notice that the number of input features are different between the + first cascade layer and remaining cascade layers. + """ + if self.backend == "custom": + msg = ( + "Please use the sklearn backend to get the feature" + " importances property for each cascade layer." + ) + raise RuntimeError(msg) + layer = self._get_layer(layer_idx) + return layer.feature_importances_ + def get_forest(self, layer_idx, est_idx, forest_type): """ Get the `est_idx`-th forest estimator from the `layer_idx`-th diff --git a/tests/test_model_classifier.py b/tests/test_model_classifier.py index 0a32315..c1bd9ae 100644 --- a/tests/test_model_classifier.py +++ b/tests/test_model_classifier.py @@ -135,6 +135,14 @@ def test_model_workflow_partial_mode(backend): model = CascadeForestClassifier(**case_kwargs) model.fit(X_train, y_train) + # Test feature_importances_ + if backend == "sklearn": + model.get_layer_feature_importances(0) + else: + with pytest.raises(RuntimeError) as excinfo: + model.get_layer_feature_importances(0) + assert "Please use the sklearn backend" in str(excinfo.value) + # Predictions before saving y_pred_before = model.predict(X_test) @@ -200,6 +208,14 @@ def test_model_workflow_in_memory(backend): model = CascadeForestClassifier(**case_kwargs) model.fit(X_train, y_train) + # Test feature_importances_ + if backend == "sklearn": + model.get_layer_feature_importances(0) + else: + with pytest.raises(RuntimeError) as excinfo: + model.get_layer_feature_importances(0) + assert "Please use the sklearn backend" in str(excinfo.value) + # Predictions before saving y_pred_before = model.predict(X_test) From fb280b7751f1a05211844602de61c790b971f747 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sun, 21 Feb 2021 15:15:46 +0800 Subject: [PATCH 38/94] [DOC] Improve docstrings --- deepforest/cascade.py | 28 ++++++++++++++-------------- docs/api_reference.rst | 2 ++ 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 1ce2ea6..13f6cba 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -201,7 +201,7 @@ def _build_regressor_predictor( The number of bins used for non-missing values. In addition to the ``n_bins`` bins, one more bin is reserved for missing values. Its value must be no smaller than 2 and no greater than 255. - bin_subsample : :obj:`int`, default=2e5 + bin_subsample : :obj:`int`, default=200,000 The number of samples used to construct feature discrete bins. If the size of training set is smaller than ``bin_subsample``, then all training samples will be used. @@ -209,7 +209,7 @@ def _build_regressor_predictor( The maximum number of cascade layers in the deep forest. Notice that the actual number of layers can be smaller than ``max_layers`` because of the internal early stopping stage. - criterion : :obj:`{"gini", "entropy"}`, default="gini" + criterion : :obj:`{"gini", "entropy"}`, default= :obj:`"gini"` The function to measure the quality of a split. Supported criteria are ``gini`` for the Gini impurity and ``entropy`` for the information gain. Note: this parameter is tree-specific. @@ -227,7 +227,7 @@ def _build_regressor_predictor( use_predictor : :obj:`bool`, default=False Whether to build the predictor concatenated to the deep forest. Using the predictor may improve the performance of deep forest. - predictor : :obj:`{"forest", "xgboost", "lightgbm"}`, default="forest" + predictor : :obj:`{"forest", "xgboost", "lightgbm"}`, default= :obj:`"forest"` The type of the predictor concatenated to the deep forest. If ``use_predictor`` is False, this parameter will have no effect. predictor_kwargs : :obj:`dict`, default={} @@ -235,7 +235,7 @@ def _build_regressor_predictor( Specifying this will extend/overwrite the original parameters inherit from deep forest. If ``use_predictor`` is False, this parameter will have no effect. - backend : :obj:`{"custom", "sklearn"}`, default="custom" + backend : :obj:`{"custom", "sklearn"}`, default= :obj:`"custom"` The backend of the forest estimator. Supported backends are ``custom`` for higher time and memory efficiency and ``sklearn`` for additional functionality. @@ -315,7 +315,7 @@ def _build_regressor_predictor( The number of bins used for non-missing values. In addition to the ``n_bins`` bins, one more bin is reserved for missing values. Its value must be no smaller than 2 and no greater than 255. - bin_subsample : :obj:`int`, default=2e5 + bin_subsample : :obj:`int`, default=200,000 The number of samples used to construct feature discrete bins. If the size of training set is smaller than ``bin_subsample``, then all training samples will be used. @@ -323,7 +323,7 @@ def _build_regressor_predictor( The maximum number of cascade layers in the deep forest. Notice that the actual number of layers can be smaller than ``max_layers`` because of the internal early stopping stage. - criterion : :obj:`{"mse", "mae"}`, default="mse" + criterion : :obj:`{"mse", "mae"}`, default= :obj:`"mse"` The function to measure the quality of a split. Supported criteria are ``mse`` for the mean squared error, which is equal to variance reduction as feature selection criterion, and ``mae`` for the mean absolute error. @@ -341,7 +341,7 @@ def _build_regressor_predictor( use_predictor : :obj:`bool`, default=False Whether to build the predictor concatenated to the deep forest. Using the predictor may improve the performance of deep forest. - predictor : :obj:`{"forest", "xgboost", "lightgbm"}`, default="forest" + predictor : :obj:`{"forest", "xgboost", "lightgbm"}`, default= :obj:`"forest"` The type of the predictor concatenated to the deep forest. If ``use_predictor`` is False, this parameter will have no effect. predictor_kwargs : :obj:`dict`, default={} @@ -349,7 +349,7 @@ def _build_regressor_predictor( Specifying this will extend/overwrite the original parameters inherit from deep forest. If ``use_predictor`` is False, this parameter will have no effect. - backend : :obj:`{"custom", "sklearn"}`, default="custom" + backend : :obj:`{"custom", "sklearn"}`, default= :obj:`"custom"` The backend of the forest estimator. Supported backends are ``custom`` for higher time and memory efficiency and ``sklearn`` for additional functionality. @@ -458,7 +458,7 @@ class BaseCascadeForest(BaseEstimator, metaclass=ABCMeta): def __init__( self, n_bins=255, - bin_subsample=2e5, + bin_subsample=200000, bin_type="percentile", max_layers=20, criterion="", @@ -686,7 +686,7 @@ def _handle_early_stopping(self): def _if_improved(self, new_pivot, pivot, delta, is_classifier): """ - Return true if new vlidation result is better than previous""" + Return true if new validation result is better than previous""" if is_classifier: return new_pivot >= pivot + delta return new_pivot <= pivot - delta @@ -1070,7 +1070,7 @@ def save(self, dirname="model"): .. warning:: Other methods on model serialization such as :mod:`pickle` or :mod:`joblib` are not recommended, especially when ``partial_mode`` - is set to ``True``. + is set to True. """ # Create the output directory _io.model_mkdir(dirname) @@ -1153,7 +1153,7 @@ def load(self, dirname): def clean(self): """ - Clean the buffer created by the model if ``partial_mode`` is ``True``. + Clean the buffer created by the model if ``partial_mode`` is True. """ if self.partial_mode: self.buffer_.close() @@ -1167,7 +1167,7 @@ class CascadeForestClassifier(BaseCascadeForest, ClassifierMixin): def __init__( self, n_bins=255, - bin_subsample=2e5, + bin_subsample=200000, bin_type="percentile", max_layers=20, criterion="gini", @@ -1366,7 +1366,7 @@ class CascadeForestRegressor(BaseCascadeForest, RegressorMixin): def __init__( self, n_bins=255, - bin_subsample=2e5, + bin_subsample=200000, bin_type="percentile", max_layers=20, criterion="mse", diff --git a/docs/api_reference.rst b/docs/api_reference.rst index be7476c..aefe0ae 100644 --- a/docs/api_reference.rst +++ b/docs/api_reference.rst @@ -11,6 +11,7 @@ CascadeForestClassifier :inherited-members: :show-inheritance: :no-undoc-members: + :exclude-members: set_params, get_params, score :member-order: bysource CascadeForestRegressor @@ -21,4 +22,5 @@ CascadeForestRegressor :inherited-members: :show-inheritance: :no-undoc-members: + :exclude-members: set_params, get_params, score :member-order: bysource From 2283a67c0d215a834c7f004be8ba8e106a48aa64 Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Mon, 22 Feb 2021 10:36:43 +0800 Subject: [PATCH 39/94] [DOC] Update contributors (#42) * docs: update CONTRIBUTORS.md [skip ci] * docs: update .all-contributorsrc [skip ci] Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> --- .all-contributorsrc | 9 +++++++++ CONTRIBUTORS.md | 3 +++ 2 files changed, 12 insertions(+) diff --git a/.all-contributorsrc b/.all-contributorsrc index e9f9412..807b336 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -78,6 +78,15 @@ "maintenance", "test" ] + }, + { + "login": "zhenlingcn", + "name": "zhenlingcn", + "avatar_url": "https://avatars.githubusercontent.com/u/18747119?v=4", + "profile": "https://github.com/zhenlingcn", + "contributions": [ + "bug" + ] } ], "contributorsPerLine": 7, diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index fb52c53..fc92f73 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -16,6 +16,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
Yi-Xuan Xu

💻 📖 ⚠️
tczhao

💻 📖 ⚠️ + +
zhenlingcn

🐛 + From 043940f1cfa242b478912a5809e394b2dc83477a Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Mon, 22 Feb 2021 22:14:13 +0800 Subject: [PATCH 40/94] [DOC] Update contributors (#43) * docs: update CONTRIBUTORS.md [skip ci] * docs: update .all-contributorsrc [skip ci] Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> --- .all-contributorsrc | 10 ++++++++++ CONTRIBUTORS.md | 3 ++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index 807b336..c980284 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -87,6 +87,16 @@ "contributions": [ "bug" ] + }, + { + "login": "Alex-Medium", + "name": "Alex-Medium", + "avatar_url": "https://avatars.githubusercontent.com/u/78067955?v=4", + "profile": "http://alex-medium.github.io", + "contributions": [ + "code", + "test" + ] } ], "contributorsPerLine": 7, diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index fc92f73..0f51e94 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -8,15 +8,16 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d + - +

Alex-Medium

💻 ⚠️

Dwaipayan Munshi

📖

Joey Gao

💻

Mr-memorandum

🐛

NiMaZi

💻 ⚠️

T-Allen-sudo

🚧 ⚠️

Yi-Xuan Xu

💻 📖 ⚠️

tczhao

💻 📖 ⚠️

tczhao

💻 📖 ⚠️

zhenlingcn

🐛
From 2189a9bc638d9cfab79631653e8dee1ed7d71294 Mon Sep 17 00:00:00 2001 From: Alex-Medium <78067955+Alex-Medium@users.noreply.github.com> Date: Mon, 22 Feb 2021 22:16:21 +0800 Subject: [PATCH 41/94] [ENH] Add multi-output support for CascadeForestRegressor (#40) * add multi output support * Update CHANGELOG.rst * refactor the layer class * Update _layer.py * update docstrings --- CHANGELOG.rst | 1 + deepforest/_io.py | 31 +++-- deepforest/_layer.py | 240 +++++++++++++++++++++++++--------- deepforest/cascade.py | 122 +++++++++-------- deepforest/forest.py | 10 +- deepforest/tree/tree.py | 6 +- tests/test_layer_estimator.py | 22 ++-- tests/test_tree_same.py | 70 +++++++++- 8 files changed, 343 insertions(+), 159 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 5403019..4c5610c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| add multi-output support for :obj:`CascadeForestRegressor` (`#40 `__) @Alex-Medium - |Feature| add layer-wise feature importances (`#39 `__) @xuyxu - |Feature| add scikit-learn backend (`#36 `__) @xuyxu - |Feature| add official support for Mac-OS (`#34 `__) @T-Allen-sudo diff --git a/deepforest/_io.py b/deepforest/_io.py index 419857d..6654776 100644 --- a/deepforest/_io.py +++ b/deepforest/_io.py @@ -303,7 +303,7 @@ def model_loadobj(dirname, obj_type, d=None): obj = load(os.path.join(dirname, "{}.pkl".format(obj_type))) return obj elif obj_type == "layer": - from ._layer import Layer # avoid circular import + from ._layer import ClassificationCascadeLayer, RegressionCascadeLayer if not isinstance(d, dict): msg = "Loading layers requires the dict from `param.pkl`." @@ -316,15 +316,26 @@ def model_loadobj(dirname, obj_type, d=None): for layer_idx in range(n_layers): # Build a temporary layer - layer_ = Layer( - layer_idx=layer_idx, - n_classes=d["n_outputs"], - criterion=d["criterion"], - n_estimators=d["n_estimators"], - partial_mode=d["partial_mode"], - buffer=d["buffer"], - verbose=d["verbose"], - ) + if d["is_classifier"]: + layer_ = ClassificationCascadeLayer( + layer_idx=layer_idx, + n_outputs=d["n_outputs"], + criterion=d["criterion"], + n_estimators=d["n_estimators"], + partial_mode=d["partial_mode"], + buffer=d["buffer"], + verbose=d["verbose"], + ) + else: + layer_ = RegressionCascadeLayer( + layer_idx=layer_idx, + n_outputs=d["n_outputs"], + criterion=d["criterion"], + n_estimators=d["n_estimators"], + partial_mode=d["partial_mode"], + buffer=d["buffer"], + verbose=d["verbose"], + ) for est_type in ("rf", "erf"): for est_idx in range(n_estimators): diff --git a/deepforest/_layer.py b/deepforest/_layer.py index 6e0888c..605a25e 100644 --- a/deepforest/_layer.py +++ b/deepforest/_layer.py @@ -1,10 +1,16 @@ """Implementation of the forest-based cascade layer.""" -__all__ = ["Layer"] +__all__ = [ + "BaseCascadeLayer", + "ClassificationCascadeLayer", + "RegressionCascadeLayer", +] import numpy as np +from sklearn.base import is_classifier from sklearn.metrics import accuracy_score, mean_squared_error +from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin from . import _utils from ._estimator import Estimator @@ -42,11 +48,11 @@ def _build_estimator( return X_aug_train, estimator -class Layer(object): +class BaseCascadeLayer(BaseEstimator): def __init__( self, layer_idx, - n_classes, + n_outputs, criterion, n_estimators=2, n_trees=100, @@ -58,10 +64,9 @@ def __init__( n_jobs=None, random_state=None, verbose=1, - is_classifier=True, ): self.layer_idx = layer_idx - self.n_classes = n_classes + self.n_outputs = n_outputs self.criterion = criterion self.n_estimators = n_estimators * 2 # internal conversion self.n_trees = n_trees @@ -73,7 +78,6 @@ def __init__( self.n_jobs = n_jobs self.random_state = random_state self.verbose = verbose - self.is_classifier = is_classifier # Internal container self.estimators_ = {} @@ -114,7 +118,7 @@ def _make_estimator(self, estimator_idx, estimator_name): backend=self.backend, n_jobs=self.n_jobs, random_state=random_state, - is_classifier=self.is_classifier, + is_classifier=is_classifier(self), ) return estimator @@ -129,16 +133,87 @@ def _validate_params(self): msg = "`n_trees` = {} should be strictly positive." raise ValueError(msg.format(self.n_trees)) + def transform(self, X): + """ + Return the concatenated transformation results from all base + estimators.""" + n_samples, _ = X.shape + X_aug = np.zeros((n_samples, self.n_outputs * self.n_estimators)) + for idx, (key, estimator) in enumerate(self.estimators_.items()): + if self.verbose > 1: + msg = "{} - Evaluating estimator = {:<5} in layer = {}" + key = key.split("-")[-1] + "_" + str(key.split("-")[-2]) + print(msg.format(_utils.ctime(), key, self.layer_idx)) + if self.partial_mode: + # Load the estimator from the buffer + estimator = self.buffer.load_estimator(estimator) + + left, right = self.n_outputs * idx, self.n_outputs * (idx + 1) + X_aug[:, left:right] += estimator.predict(X) + + return X_aug + + def predict_full(self, X): + """Return the concatenated predictions from all base estimators.""" + n_samples, _ = X.shape + pred = np.zeros((n_samples, self.n_outputs * self.n_estimators)) + for idx, (key, estimator) in enumerate(self.estimators_.items()): + if self.verbose > 1: + msg = "{} - Evaluating estimator = {:<5} in layer = {}" + key = key.split("-")[-1] + "_" + str(key.split("-")[-2]) + print(msg.format(_utils.ctime(), key, self.layer_idx)) + if self.partial_mode: + # Load the estimator from the buffer + estimator = self.buffer.load_estimator(estimator) + + left, right = self.n_outputs * idx, self.n_outputs * (idx + 1) + pred[:, left:right] += estimator.predict(X) + + return pred + + +class ClassificationCascadeLayer(BaseCascadeLayer, ClassifierMixin): + """Implementation of the cascade forest layer for classification.""" + + def __init__( + self, + layer_idx, + n_outputs, + criterion, + n_estimators=2, + n_trees=100, + max_depth=None, + min_samples_leaf=1, + backend="custom", + partial_mode=False, + buffer=None, + n_jobs=None, + random_state=None, + verbose=1, + ): + super().__init__( + layer_idx=layer_idx, + n_outputs=n_outputs, + criterion=criterion, + n_estimators=n_estimators, + n_trees=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + backend=backend, + partial_mode=partial_mode, + buffer=buffer, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + ) + def fit_transform(self, X, y, sample_weight=None): self._validate_params() n_samples, self.n_features = X.shape X_aug = [] - if self.is_classifier: - oob_decision_function = np.zeros((n_samples, self.n_classes)) - else: - oob_decision_function = np.zeros((n_samples, 1)) + oob_decision_function = np.zeros((n_samples, self.n_outputs)) # A random forest and an extremely random forest will be fitted for estimator_idx in range(self.n_estimators // 2): @@ -179,66 +254,101 @@ def fit_transform(self, X, y, sample_weight=None): # Set the OOB estimations and validation accuracy self.oob_decision_function_ = oob_decision_function / self.n_estimators - if self.is_classifier: - y_pred = np.argmax(oob_decision_function, axis=1) - self.val_acc_ = accuracy_score( - y, y_pred, sample_weight=sample_weight - ) - else: - y_pred = self.oob_decision_function_ - self.val_acc_ = mean_squared_error( - y, y_pred, sample_weight=sample_weight - ) + y_pred = np.argmax(oob_decision_function, axis=1) + self.val_performance_ = accuracy_score( + y, y_pred, sample_weight=sample_weight + ) X_aug = np.hstack(X_aug) return X_aug - def transform(self, X, is_classifier): - """ - Return the concatenated transformation results from all base - estimators.""" - n_samples, _ = X.shape - if is_classifier: - X_aug = np.zeros((n_samples, self.n_classes * self.n_estimators)) - else: - X_aug = np.zeros((n_samples, self.n_estimators)) - for idx, (key, estimator) in enumerate(self.estimators_.items()): - if self.verbose > 1: - msg = "{} - Evaluating estimator = {:<5} in layer = {}" - key = key.split("-")[-1] + "_" + str(key.split("-")[-2]) - print(msg.format(_utils.ctime(), key, self.layer_idx)) - if self.partial_mode: - # Load the estimator from the buffer - estimator = self.buffer.load_estimator(estimator) - if is_classifier: - left, right = self.n_classes * idx, self.n_classes * (idx + 1) - else: - left, right = idx, (idx + 1) - X_aug[:, left:right] += estimator.predict(X) +class RegressionCascadeLayer(BaseCascadeLayer, RegressorMixin): + """Implementation of the cascade forest layer for regression.""" - return X_aug + def __init__( + self, + layer_idx, + n_outputs, + criterion, + n_estimators=2, + n_trees=100, + max_depth=None, + min_samples_leaf=1, + backend="custom", + partial_mode=False, + buffer=None, + n_jobs=None, + random_state=None, + verbose=1, + ): + super().__init__( + layer_idx=layer_idx, + n_outputs=n_outputs, + criterion=criterion, + n_estimators=n_estimators, + n_trees=n_trees, + max_depth=max_depth, + min_samples_leaf=min_samples_leaf, + backend=backend, + partial_mode=partial_mode, + buffer=buffer, + n_jobs=n_jobs, + random_state=random_state, + verbose=verbose, + ) - def predict_full(self, X, is_classifier): - """Return the concatenated predictions from all base estimators.""" - n_samples, _ = X.shape - if is_classifier: - pred = np.zeros((n_samples, self.n_classes * self.n_estimators)) - else: - pred = np.zeros((n_samples, self.n_estimators)) - for idx, (key, estimator) in enumerate(self.estimators_.items()): - if self.verbose > 1: - msg = "{} - Evaluating estimator = {:<5} in layer = {}" - key = key.split("-")[-1] + "_" + str(key.split("-")[-2]) - print(msg.format(_utils.ctime(), key, self.layer_idx)) - if self.partial_mode: - # Load the estimator from the buffer - estimator = self.buffer.load_estimator(estimator) + def fit_transform(self, X, y, sample_weight=None): - if is_classifier: - left, right = self.n_classes * idx, self.n_classes * (idx + 1) - else: - left, right = idx, (idx + 1) - pred[:, left:right] += estimator.predict(X) + self._validate_params() + n_samples, self.n_features = X.shape - return pred + X_aug = [] + oob_decision_function = np.zeros((n_samples, self.n_outputs)) + + # A random forest and an extremely random forest will be fitted + for estimator_idx in range(self.n_estimators // 2): + X_aug_, _estimator = _build_estimator( + X, + y, + self.layer_idx, + estimator_idx, + "rf", + self._make_estimator(estimator_idx, "rf"), + oob_decision_function, + self.partial_mode, + self.buffer, + self.verbose, + sample_weight, + ) + X_aug.append(X_aug_) + key = "{}-{}-{}".format(self.layer_idx, estimator_idx, "rf") + self.estimators_.update({key: _estimator}) + + for estimator_idx in range(self.n_estimators // 2): + X_aug_, _estimator = _build_estimator( + X, + y, + self.layer_idx, + estimator_idx, + "erf", + self._make_estimator(estimator_idx, "erf"), + oob_decision_function, + self.partial_mode, + self.buffer, + self.verbose, + sample_weight, + ) + X_aug.append(X_aug_) + key = "{}-{}-{}".format(self.layer_idx, estimator_idx, "erf") + self.estimators_.update({key: _estimator}) + + # Set the OOB estimations and validation mean squared error + self.oob_decision_function_ = oob_decision_function / self.n_estimators + y_pred = self.oob_decision_function_ + self.val_performance_ = mean_squared_error( + y, y_pred, sample_weight=sample_weight + ) + + X_aug = np.hstack(X_aug) + return X_aug diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 13f6cba..a1969d7 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -14,7 +14,7 @@ from . import _utils from . import _io -from ._layer import Layer +from ._layer import ClassificationCascadeLayer, RegressionCascadeLayer from ._binner import Binner @@ -415,8 +415,8 @@ def _build_regressor_predictor( X : :obj:`numpy.ndarray` of shape (n_samples, n_features) The training data. Internally, it will be converted to ``np.uint8``. - y : :obj:`numpy.ndarray` of shape (n_samples,) - The target of input samples. + y : :obj:`numpy.ndarray` of shape (n_samples,) or (n_samples, n_outputs) + The target values of input samples. sample_weight : :obj:`numpy.ndarray` of shape (n_samples,), default=None Sample weights. If ``None``, then samples are equally weighted. """ @@ -519,7 +519,16 @@ def _get_n_output(self, y): if is_classifier(self): n_output = np.unique(y).shape[0] # classification return n_output - return 1 # this parameter are not used in regression + return y.shape[1] if len(y.shape) > 1 else 1 # regression + + def _make_layer(self, **layer_args): + """Make and configure a cascade layer.""" + if is_classifier(self): + layer = ClassificationCascadeLayer(**layer_args) + else: + layer = RegressionCascadeLayer(**layer_args) + + return layer def _get_layer(self, layer_idx): """Get the layer from the internal container according to the index.""" @@ -684,10 +693,10 @@ def _handle_early_stopping(self): msg = "{} The optimal number of layers: {}" print(msg.format(_utils.ctime(), self.n_layers_)) - def _if_improved(self, new_pivot, pivot, delta, is_classifier): + def _if_improved(self, new_pivot, pivot, delta): """ Return true if new validation result is better than previous""" - if is_classifier: + if is_classifier(self): return new_pivot >= pivot + delta return new_pivot <= pivot - delta @@ -705,9 +714,7 @@ def predict(self, X): @property def n_aug_features_(self): - if is_classifier(self): - return 2 * self.n_estimators * self.n_outputs_ - return 2 * self.n_estimators + return 2 * self.n_estimators * self.n_outputs_ # flake8: noqa: E501 def fit(self, X, y, sample_weight=None): @@ -735,21 +742,20 @@ def fit(self, X, y, sample_weight=None): print("{} Start to fit the model:".format(_utils.ctime())) # Build the first cascade layer - layer_ = Layer( - 0, - self.n_outputs_, - self.criterion, - self.n_estimators, - self._set_n_trees(0), - self.max_depth, - self.min_samples_leaf, - self.backend, - self.partial_mode, - self.buffer_, - self.n_jobs, - self.random_state, - self.verbose, - is_classifier(self), + layer_ = self._make_layer( + layer_idx=0, + n_outputs=self.n_outputs_, + criterion=self.criterion, + n_estimators=self.n_estimators, + n_trees=self._set_n_trees(0), + max_depth=self.max_depth, + min_samples_leaf=self.min_samples_leaf, + backend=self.backend, + partial_mode=self.partial_mode, + buffer=self.buffer_, + n_jobs=self.n_jobs, + random_state=self.random_state, + verbose=self.verbose, ) if self.verbose > 0: @@ -763,7 +769,7 @@ def fit(self, X, y, sample_weight=None): training_time = toc - tic # Set the reference performance - pivot = layer_.val_acc_ + pivot = layer_.val_performance_ if self.verbose > 0: msg = "{} layer = {:<2} | {} | Elapsed = {:.3f} s" @@ -812,21 +818,20 @@ def fit(self, X, y, sample_weight=None): # Build a cascade layer layer_idx = self.n_layers_ - layer_ = Layer( - layer_idx, - self.n_outputs_, - self.criterion, - self.n_estimators, - self._set_n_trees(layer_idx), - self.max_depth, - self.min_samples_leaf, - self.backend, - self.partial_mode, - self.buffer_, - self.n_jobs, - self.random_state, - self.verbose, - is_classifier(self), + layer_ = self._make_layer( + layer_idx=layer_idx, + n_outputs=self.n_outputs_, + criterion=self.criterion, + n_estimators=self.n_estimators, + n_trees=self._set_n_trees(0), + max_depth=self.max_depth, + min_samples_leaf=self.min_samples_leaf, + backend=self.backend, + partial_mode=self.partial_mode, + buffer=self.buffer_, + n_jobs=self.n_jobs, + random_state=self.random_state, + verbose=self.verbose, ) X_middle_train_ = self.buffer_.cache_data( @@ -844,7 +849,7 @@ def fit(self, X, y, sample_weight=None): toc = time.time() training_time = toc - tic - new_pivot = layer_.val_acc_ + new_pivot = layer_.val_performance_ if self.verbose > 0: msg = "{} layer = {:<2} | {} | Elapsed = {:.3f} s" @@ -863,9 +868,7 @@ def fit(self, X, y, sample_weight=None): # training stage will terminate before reaching the maximum number # of layers. - if self._if_improved( - new_pivot, pivot, self.delta, is_classifier(self) - ): + if self._if_improved(new_pivot, pivot, self.delta): # Update the cascade layer self._set_layer(layer_idx, layer_) @@ -1086,6 +1089,7 @@ def save(self, dirname="model"): d["buffer"] = self.buffer_ d["verbose"] = self.verbose d["use_predictor"] = self.use_predictor + d["is_classifier"] = is_classifier(self) if self.use_predictor: d["predictor_name"] = self.predictor_name @@ -1291,7 +1295,7 @@ def predict_proba(self, X): print(msg.format(_utils.ctime(), layer_idx)) if layer_idx == 0: - X_aug_test_ = layer.transform(X_test, is_classifier(self)) + X_aug_test_ = layer.transform(X_test) elif layer_idx < self.n_layers_ - 1: binner_ = self._get_binner(layer_idx) X_aug_test_ = self._bin_data( @@ -1300,9 +1304,7 @@ def predict_proba(self, X): X_middle_test_ = _utils.merge_array( X_middle_test_, X_aug_test_, self.n_features_ ) - X_aug_test_ = layer.transform( - X_middle_test_, is_classifier(self) - ) + X_aug_test_ = layer.transform(X_middle_test_) else: binner_ = self._get_binner(layer_idx) X_aug_test_ = self._bin_data( @@ -1314,9 +1316,7 @@ def predict_proba(self, X): # Skip calling the `transform` if not using the predictor if self.use_predictor: - X_aug_test_ = layer.transform( - X_middle_test_, is_classifier(self) - ) + X_aug_test_ = layer.transform(X_middle_test_) if self.use_predictor: @@ -1334,7 +1334,7 @@ def predict_proba(self, X): predictor = self.buffer_.load_predictor(self.predictor_) proba = predictor.predict_proba(X_middle_test_) else: - proba = layer.predict_full(X_middle_test_, is_classifier(self)) + proba = layer.predict_full(X_middle_test_) proba = _utils.merge_proba(proba, self.n_outputs_) return proba @@ -1408,7 +1408,7 @@ def __init__( ) def _repr_performance(self, pivot): - msg = "Val Acc = {:.3f}" + msg = "Val MSE = {:.5f}" return msg.format(pivot) @deepforest_model_doc( @@ -1429,7 +1429,7 @@ def predict(self, X): Returns ------- - y : :obj:`numpy.ndarray` of shape (n_samples,) + y : :obj:`numpy.ndarray` of shape (n_samples,) or (n_samples, n_outputs) The predicted values. """ if not self.is_fitted_: @@ -1451,7 +1451,7 @@ def predict(self, X): print(msg.format(_utils.ctime(), layer_idx)) if layer_idx == 0: - X_aug_test_ = layer.transform(X_test, is_classifier(self)) + X_aug_test_ = layer.transform(X_test) elif layer_idx < self.n_layers_ - 1: binner_ = self._get_binner(layer_idx) X_aug_test_ = self._bin_data( @@ -1460,9 +1460,7 @@ def predict(self, X): X_middle_test_ = _utils.merge_array( X_middle_test_, X_aug_test_, self.n_features_ ) - X_aug_test_ = layer.transform( - X_middle_test_, is_classifier(self) - ) + X_aug_test_ = layer.transform(X_middle_test_) else: binner_ = self._get_binner(layer_idx) X_aug_test_ = self._bin_data( @@ -1474,9 +1472,7 @@ def predict(self, X): # Skip calling the `transform` if not using the predictor if self.use_predictor: - X_aug_test_ = layer.transform( - X_middle_test_, is_classifier(self) - ) + X_aug_test_ = layer.transform(X_middle_test_) if self.use_predictor: @@ -1494,6 +1490,6 @@ def predict(self, X): predictor = self.buffer_.load_predictor(self.predictor_) _y = predictor.predict(X_middle_test_) else: - _y = layer.predict_full(X_middle_test_, is_classifier(self)) - _y = _y.sum(axis=1) / _y.shape[1] + _y = layer.predict_full(X_middle_test_) + _y = _utils.merge_proba(_y, self.n_outputs_) return _y diff --git a/deepforest/forest.py b/deepforest/forest.py index 203bdb8..2b039c6 100644 --- a/deepforest/forest.py +++ b/deepforest/forest.py @@ -135,10 +135,12 @@ def _parallel_build_trees( if not value.flags["C_CONTIGUOUS"]: value = np.ascontiguousarray(value) - value = np.squeeze(value, axis=1) - if is_classifier: + value = np.squeeze(value, axis=1) value /= value.sum(axis=1)[:, np.newaxis] + else: + if len(value.shape) == 3: + value = np.squeeze(value, axis=2) # Set the OOB predictions oob_prediction = _C_FOREST.predict( @@ -454,7 +456,7 @@ def fit(self, X, y, sample_weight=None): (n_samples, self.classes_[0].shape[0]) ) else: - oob_decision_function = np.zeros((n_samples, 1)) + oob_decision_function = np.zeros((n_samples, self.n_outputs_)) mask = np.zeros(n_samples) lock = threading.Lock() @@ -790,7 +792,7 @@ def predict(self, X): n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs) # avoid storing the output of every estimator by summing them here - y_hat = np.zeros((X.shape[0], 1), dtype=np.float64) + y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64) # Parallel loop lock = threading.Lock() diff --git a/deepforest/tree/tree.py b/deepforest/tree/tree.py index 971b1b1..a73fa12 100644 --- a/deepforest/tree/tree.py +++ b/deepforest/tree/tree.py @@ -429,14 +429,14 @@ def predict(self, X, check_input=True): """ check_is_fitted(self) X = self._validate_X_predict(X, check_input) - proba = self.tree_.predict(X) + pred = self.tree_.predict(X) # Classification if is_classifier(self): - return self.classes_.take(np.argmax(proba, axis=1), axis=0) + return self.classes_.take(np.argmax(pred, axis=1), axis=0) # Regression else: - return proba[:, 0] + return np.squeeze(pred) class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree): diff --git a/tests/test_layer_estimator.py b/tests/test_layer_estimator.py index e596689..f494fe0 100644 --- a/tests/test_layer_estimator.py +++ b/tests/test_layer_estimator.py @@ -1,6 +1,9 @@ import copy import pytest -from deepforest._layer import Layer +from deepforest._layer import ( + ClassificationCascadeLayer, + RegressionCascadeLayer, +) from deepforest._estimator import Estimator # Load utils @@ -21,7 +24,7 @@ # Parameters classifier_layer_kwargs = { "layer_idx": 0, - "n_classes": 10, + "n_outputs": 10, "criterion": "gini", "n_estimators": 1, "n_trees": 10, @@ -46,7 +49,7 @@ regressor_layer_kwargs = { "layer_idx": 0, - "n_classes": 1, + "n_outputs": 1, "criterion": "mse", "n_estimators": 1, "n_trees": 10, @@ -72,9 +75,9 @@ def test_classifier_layer_properties_after_fitting(): - layer = Layer(**classifier_layer_kwargs) + layer = ClassificationCascadeLayer(**classifier_layer_kwargs) X_aug = layer.fit_transform(X_train, y_train) - y_pred_full = layer.predict_full(X_test, is_classifier=True) + y_pred_full = layer.predict_full(X_test) # n_trees assert ( @@ -87,7 +90,7 @@ def test_classifier_layer_properties_after_fitting(): # Output dim expect_dim = ( 2 - * classifier_layer_kwargs["n_classes"] + * classifier_layer_kwargs["n_outputs"] * classifier_layer_kwargs["n_estimators"] ) assert X_aug.shape[1] == expect_dim @@ -103,10 +106,9 @@ def test_regressor_layer_properties_after_fitting(): X_train, X_test, y_train, y_test = train_test_split( X_binned, y, test_size=0.42, random_state=42 ) - layer = Layer(**regressor_layer_kwargs) - layer.is_classifier = False + layer = RegressionCascadeLayer(**regressor_layer_kwargs) X_aug = layer.fit_transform(X_train, y_train) - y_pred_full = layer.predict_full(X_test, is_classifier=False) + y_pred_full = layer.predict_full(X_test) # n_trees assert ( @@ -132,7 +134,7 @@ def test_layer_invalid_training_params(param, layer_kwargs): case_kwargs = copy.deepcopy(layer_kwargs) case_kwargs.update(param[1]) - layer = Layer(**case_kwargs) + layer = ClassificationCascadeLayer(**case_kwargs) if param[0] == 0: err_msg = "`n_estimators` = 0 should be strictly positive." diff --git a/tests/test_tree_same.py b/tests/test_tree_same.py index a7909bf..d0e0335 100644 --- a/tests/test_tree_same.py +++ b/tests/test_tree_same.py @@ -1,10 +1,11 @@ """ -Testing cases here make sure that the outputs of the reduced implementation -on `DecisionTreeClassifier` and `ExtraTreeClassifier` are exactly the same as -the original version in Scikit-Learn after the data binning. +Testing cases here make sure that predictions of the reduced implementation +on decision tree is exactly the same as the original version in Scikit-Learn +after data binning. """ import pytest +import numpy as np from numpy.testing import assert_array_equal from sklearn.tree import ( DecisionTreeClassifier as sklearn_DecisionTreeClassifier, @@ -19,7 +20,7 @@ from sklearn.model_selection import train_test_split from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper -# Toy classification datasets +# Toy datasets from sklearn.datasets import load_iris, load_wine, load_boston from deepforest import DecisionTreeClassifier @@ -137,3 +138,64 @@ def test_extra_tree_regressor_pred(load_func): expected_pred = model.predict(X_test_binned) assert_array_equal(actual_pred, expected_pred) + + +@pytest.mark.parametrize("load_func", [load_boston]) +def test_tree_regressor_multi_output_pred(load_func): + + X, y = load_func(return_X_y=True) + + # Generate pseudo multi output targets + y = np.expand_dims(y, axis=1) + y = np.concatenate((y, -y), axis=1) + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=test_size, random_state=random_state + ) + + # Data binning + binner = _BinMapper(random_state=random_state) + X_train_binned = binner.fit_transform(X_train) + X_test_binned = binner.transform(X_test) + + # Ours + model = DecisionTreeRegressor(random_state=random_state) + model.fit(X_train_binned, y_train) + actual_pred = model.predict(X_test_binned) + + # Sklearn + model = sklearn_DecisionTreeRegressor(random_state=random_state) + model.fit(X_train_binned, y_train) + expected_pred = model.predict(X_test_binned) + + assert_array_equal(actual_pred, expected_pred) + + +@pytest.mark.parametrize("load_func", [load_boston]) +def test_extra_tree_regressor_multi_output_pred(load_func): + X, y = load_func(return_X_y=True) + + # Generate pseudo multi output targets + y = np.expand_dims(y, axis=1) + y = np.concatenate((y, -y), axis=1) + + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=test_size, random_state=random_state + ) + + # Data binning + binner = _BinMapper(random_state=random_state) + X_train_binned = binner.fit_transform(X_train) + X_test_binned = binner.transform(X_test) + + # Ours + model = ExtraTreeRegressor(random_state=random_state) + model.fit(X_train_binned, y_train) + actual_pred = model.predict(X_test_binned) + + # Sklearn + model = sklearn_ExtraTreeRegressor(random_state=random_state) + model.fit(X_train_binned, y_train) + expected_pred = model.predict(X_test_binned) + + assert_array_equal(actual_pred, expected_pred) From 64e35c400d600c72c3bfc4f8063bbacecee12c0c Mon Sep 17 00:00:00 2001 From: xuyxu Date: Tue, 23 Feb 2021 00:06:10 +0800 Subject: [PATCH 42/94] [MNT] Update version number --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 5198c33..f0f9b3c 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ MAINTAINER = "Yi-Xuan Xu" MAINTAINER_EMAIL = "xuyx@lamda.nju.edu.cn" URL = "https://github.com/LAMDA-NJU/Deep-Forest" -VERSION = "0.1.2" +VERSION = "0.1.3" def configuration(parent_package="", top_path=None): From 0a72ef0bf7e43452aac0f16a4b668f59675a32bf Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Tue, 23 Feb 2021 16:09:27 +0800 Subject: [PATCH 43/94] [FIX] Add target check for CascadeForestRegressor (#44) * add regression check * Update CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/cascade.py | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4c5610c..87dfe11 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Fix| fix accepted types of target for :obj:`CascadeForestRegressor` (`#44 `__) @xuyxu - |Feature| add multi-output support for :obj:`CascadeForestRegressor` (`#40 `__) @Alex-Medium - |Feature| add layer-wise feature importances (`#39 `__) @xuyxu - |Feature| add scikit-learn backend (`#36 `__) @xuyxu diff --git a/deepforest/cascade.py b/deepforest/cascade.py index a1969d7..d5b5938 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -1407,6 +1407,24 @@ def __init__( verbose=verbose, ) + # Used to deal with target values + self.type_of_target_ = None + + def _check_target_values(self, y): + """ + Check the input target values for regressor. + """ + self.type_of_target_ = type_of_target(y) + if self.type_of_target_ not in ( + "continuous", + "continuous-multioutput", + ): + msg = ( + "CascadeForestRegressor is used for univariate or multi-variate regression," + " but the target values seem not to be one of them." + ) + raise ValueError(msg) + def _repr_performance(self, pivot): msg = "Val MSE = {:.5f}" return msg.format(pivot) @@ -1415,6 +1433,10 @@ def _repr_performance(self, pivot): """Build a deep forest using the training data.""", "regressor_fit" ) def fit(self, X, y, sample_weight=None): + + # Check the input for regression + self._check_target_values(y) + super().fit(X, y, sample_weight) def predict(self, X): From 182fdaacddaf7dc9a033d9b66953f19ec421a93b Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Wed, 3 Mar 2021 14:11:09 +0800 Subject: [PATCH 44/94] [MNT] Add support for manylinux aarch64 (#47) * [MNT] Add manylinux aarch64 * update ci * add emulation for cibuildwheels * merge build-and-test ci * fix syntax error * remove redundant steps --- .github/workflows/build-and-test.yml | 5 ++- .github/workflows/build-wheels.yml | 16 ++++++--- .github/workflows/mac-os-build-and-test.yml | 39 --------------------- CHANGELOG.rst | 1 + 4 files changed, 16 insertions(+), 45 deletions(-) delete mode 100644 .github/workflows/mac-os-build-and-test.yml diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index df8e963..1e11b29 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -11,7 +11,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, windows-latest] + os: [ubuntu-latest, windows-latest, macos-latest] python-version: [3.6, 3.7, 3.8] steps: - uses: actions/checkout@v2 @@ -21,6 +21,9 @@ jobs: python-version: ${{ matrix.python-version }} - name: Display python version run: python -c "import sys; print(sys.version)" + - name: Install libomp for Mac-OS + if: runner.os == 'macOS' + run: brew install libomp - name: Install package dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/build-wheels.yml b/.github/workflows/build-wheels.yml index f60f191..5b720c2 100644 --- a/.github/workflows/build-wheels.yml +++ b/.github/workflows/build-wheels.yml @@ -13,19 +13,25 @@ jobs: strategy: matrix: os: [ubuntu-latest, windows-latest, macos-latest] - python-version: [3.6, 3.7, 3.8] steps: - uses: actions/checkout@v2 - - name: Set up Python - uses: actions/setup-python@v2 + + - name: Set up QEMU + if: runner.os == 'Linux' + uses: docker/setup-qemu-action@v1 with: - python-version: ${{ matrix.python-version }} + platforms: all + - name: Build wheels uses: joerick/cibuildwheel@v1.9.0 with: output-dir: wheelhouse env: - CIBW_BUILD: "cp36-manylinux_x86_64 cp37-manylinux_x86_64 cp38-manylinux_x86_64 cp36-win_amd64 cp37-win_amd64 cp38-win_amd64 cp36-macosx_x86_64 cp37-macosx_x86_64 cp38-macosx_x86_64" + CIBW_ARCHS_LINUX: "x86_64 aarch64" + CIBW_ARCHS_WINDOWS: "AMD64" + CIBW_ARCHS_MACOS: "x86_64" + CIBW_BUILD: "cp36-manylinux_x86_64 cp37-manylinux_x86_64 cp38-manylinux_x86_64 cp36-win_amd64 cp37-win_amd64 cp38-win_amd64 cp36-macosx_x86_64 cp37-macosx_x86_64 cp38-macosx_x86_64 cp36-manylinux_aarch64 cp37-manylinux_aarch64 cp38-manylinux_aarch64" + - name: Store artifacts uses: actions/upload-artifact@v2 with: diff --git a/.github/workflows/mac-os-build-and-test.yml b/.github/workflows/mac-os-build-and-test.yml deleted file mode 100644 index 3914850..0000000 --- a/.github/workflows/mac-os-build-and-test.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: DeepForest-CI-MacOS - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -jobs: - build: - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [macos-latest] - python-version: [3.6, 3.7, 3.8] - steps: - - uses: actions/checkout@v2 - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: Display python version - run: python -c "import sys; print(sys.version)" - - name: Install OS packages - run: brew install libomp - - name: Install package dependencies - run: | - python -m pip install --upgrade pip - pip install -r build_tools/requirements.txt - - name: Install - run: pip install --verbose --editable . - - name: Run tests - run: | - pytest ./tests --cov-config=.coveragerc --cov-report=xml --cov=deepforest deepforest - - name: Publish code coverage - uses: codecov/codecov-action@v1 - with: - token: ${{ secrets.CODECOV_TOKEN }} - file: ./coverage.xml diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 87dfe11..387dfc6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| add official support for ManyLinux-aarch64 (`#47 `__) @xuyxu - |Fix| fix accepted types of target for :obj:`CascadeForestRegressor` (`#44 `__) @xuyxu - |Feature| add multi-output support for :obj:`CascadeForestRegressor` (`#40 `__) @Alex-Medium - |Feature| add layer-wise feature importances (`#39 `__) @xuyxu From f5cba16dccbf1693a3b7f9b5e7b9a41ab6a0c32f Mon Sep 17 00:00:00 2001 From: xuyxu Date: Thu, 4 Mar 2021 09:32:29 +0800 Subject: [PATCH 45/94] [FIX] Fix RTD badge --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 27b7c36..e423679 100644 --- a/README.rst +++ b/README.rst @@ -6,7 +6,7 @@ Deep Forest (DF) 21 .. |github| image:: https://github.com/LAMDA-NJU/Deep-Forest/workflows/DeepForest-CI/badge.svg .. _github: https://github.com/LAMDA-NJU/Deep-Forest/actions -.. |readthedocs| image:: https://readthedocs.org/projects/deep-forest/badge/?version=master +.. |readthedocs| image:: https://readthedocs.org/projects/deep-forest/badge/?version=latest .. _readthedocs: https://deep-forest.readthedocs.io .. |codecov| image:: https://codecov.io/gh/LAMDA-NJU/Deep-Forest/branch/master/graph/badge.svg?token=5BVXOT8RPO From 9bd41e500f0b6e3cc4f7b7a96bed343824ffad8d Mon Sep 17 00:00:00 2001 From: xuyxu Date: Thu, 4 Mar 2021 11:00:32 +0800 Subject: [PATCH 46/94] [MNT] Update Numpy version (#49) --- pyproject.toml | 2 +- requirements.txt | 2 +- setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c206313..80498c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ requires = [ "setuptools>=42", "wheel", "Cython>=0.28.5", - "numpy>=1.13.3,<1.20.0", + "numpy>=1.16.0,<1.20.0", "scipy>=0.19.1" ] [tool.black] diff --git a/requirements.txt b/requirements.txt index c292f16..74fb246 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -numpy>=1.13.3,<1.20.0 +numpy>=1.16.0,<1.20.0 scipy>=0.19.1 joblib>=0.11 scikit-learn>=0.22 \ No newline at end of file diff --git a/setup.py b/setup.py index f0f9b3c..8575863 100644 --- a/setup.py +++ b/setup.py @@ -64,7 +64,7 @@ def configuration(parent_package="", top_path=None): ], python_requires=">=3.6", install_requires=[ - "numpy>=1.13.3,<1.20.0", + "numpy>=1.16.0,<1.20.0", "scipy>=0.19.1", "joblib>=0.11", "scikit-learn>=0.22", From 8c6e81bcbf1464d193b36c78038699cf0973d337 Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Fri, 5 Mar 2021 12:52:09 +0800 Subject: [PATCH 47/94] [DOC] Update contributors * docs: update CONTRIBUTORS.md [skip ci] * docs: update .all-contributorsrc [skip ci] Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> --- .all-contributorsrc | 9 +++++++++ CONTRIBUTORS.md | 1 + 2 files changed, 10 insertions(+) diff --git a/.all-contributorsrc b/.all-contributorsrc index c980284..5ccb93d 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -97,6 +97,15 @@ "code", "test" ] + }, + { + "login": "chendingyan", + "name": "陈鼎彦", + "avatar_url": "https://avatars.githubusercontent.com/u/16874978?v=4", + "profile": "https://github.com/chendingyan", + "contributions": [ + "bug" + ] } ], "contributorsPerLine": 7, diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 0f51e94..1a108fd 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -19,6 +19,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
tczhao

💻 📖 ⚠️
zhenlingcn

🐛 +
陈鼎彦

🐛 From b9033569a879514fe3cb542361d51ce881fff894 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Fri, 5 Mar 2021 13:50:49 +0800 Subject: [PATCH 48/94] [FIX] Fix inconsistency on predictor name (#52) * fix predictor name * Update CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/cascade.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 387dfc6..17dd86e 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Fix| fix inconsistency on predictor name (`#52 `__) @xuyxu - |Feature| add official support for ManyLinux-aarch64 (`#47 `__) @xuyxu - |Fix| fix accepted types of target for :obj:`CascadeForestRegressor` (`#44 `__) @xuyxu - |Feature| add multi-output support for :obj:`CascadeForestRegressor` (`#40 `__) @Alex-Medium diff --git a/deepforest/cascade.py b/deepforest/cascade.py index d5b5938..89aa29b 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -506,7 +506,7 @@ def __init__( # Predictor self.use_predictor = use_predictor - self.predictor_name = predictor + self.predictor = predictor def __len__(self): return self.n_layers_ @@ -915,7 +915,7 @@ def fit(self, X, y, sample_weight=None): if self.use_predictor: if is_classifier(self): self.predictor_ = _build_classifier_predictor( - self.predictor_name, + self.predictor, self.criterion, self.n_trees, self.n_outputs_, @@ -927,7 +927,7 @@ def fit(self, X, y, sample_weight=None): ) else: self.predictor_ = _build_regressor_predictor( - self.predictor_name, + self.predictor, self.criterion, self.n_trees, self.n_outputs_, @@ -955,7 +955,7 @@ def fit(self, X, y, sample_weight=None): if self.verbose > 0: msg = "{} Fitting the concatenated predictor: {}" - print(msg.format(_utils.ctime(), self.predictor_name)) + print(msg.format(_utils.ctime(), self.predictor)) tic = time.time() self.predictor_.fit( @@ -1092,7 +1092,7 @@ def save(self, dirname="model"): d["is_classifier"] = is_classifier(self) if self.use_predictor: - d["predictor_name"] = self.predictor_name + d["predictor"] = self.predictor # Save label encoder if labels are encoded. if hasattr(self, "labels_are_encoded"): From b45ac61660669a35652c1f8a0835d1a2f231d2d7 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Sun, 7 Mar 2021 13:40:33 +0800 Subject: [PATCH 49/94] [FIX] Fix prediction workflow with one cascade layer (#56) * [FIX] Fix prediction workflow with one cascade layer * Update CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/_layer.py | 20 ++------------------ deepforest/cascade.py | 17 +++++++++++++---- 3 files changed, 16 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 17dd86e..8aae007 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Fix| fix the prediction workflow with only one cascade layer (`#56 `__) @xuyxu - |Fix| fix inconsistency on predictor name (`#52 `__) @xuyxu - |Feature| add official support for ManyLinux-aarch64 (`#47 `__) @xuyxu - |Fix| fix accepted types of target for :obj:`CascadeForestRegressor` (`#44 `__) @xuyxu diff --git a/deepforest/_layer.py b/deepforest/_layer.py index 605a25e..1b7ad67 100644 --- a/deepforest/_layer.py +++ b/deepforest/_layer.py @@ -134,24 +134,8 @@ def _validate_params(self): raise ValueError(msg.format(self.n_trees)) def transform(self, X): - """ - Return the concatenated transformation results from all base - estimators.""" - n_samples, _ = X.shape - X_aug = np.zeros((n_samples, self.n_outputs * self.n_estimators)) - for idx, (key, estimator) in enumerate(self.estimators_.items()): - if self.verbose > 1: - msg = "{} - Evaluating estimator = {:<5} in layer = {}" - key = key.split("-")[-1] + "_" + str(key.split("-")[-2]) - print(msg.format(_utils.ctime(), key, self.layer_idx)) - if self.partial_mode: - # Load the estimator from the buffer - estimator = self.buffer.load_estimator(estimator) - - left, right = self.n_outputs * idx, self.n_outputs * (idx + 1) - X_aug[:, left:right] += estimator.predict(X) - - return X_aug + """Preserved for the naming consistency.""" + return self.predict_full(X) def predict_full(self, X): """Return the concatenated predictions from all base estimators.""" diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 89aa29b..e427786 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -1334,8 +1334,12 @@ def predict_proba(self, X): predictor = self.buffer_.load_predictor(self.predictor_) proba = predictor.predict_proba(X_middle_test_) else: - proba = layer.predict_full(X_middle_test_) - proba = _utils.merge_proba(proba, self.n_outputs_) + if self.n_layers_ > 1: + proba = layer.predict_full(X_middle_test_) + proba = _utils.merge_proba(proba, self.n_outputs_) + else: + # Directly merge results with one cascade layer only + proba = _utils.merge_proba(X_aug_test_, self.n_outputs_) return proba @@ -1512,6 +1516,11 @@ def predict(self, X): predictor = self.buffer_.load_predictor(self.predictor_) _y = predictor.predict(X_middle_test_) else: - _y = layer.predict_full(X_middle_test_) - _y = _utils.merge_proba(_y, self.n_outputs_) + if self.n_layers_ > 1: + _y = layer.predict_full(X_middle_test_) + _y = _utils.merge_proba(_y, self.n_outputs_) + else: + # Directly merge results with one cascade layer only + _y = _utils.merge_proba(X_aug_test_, self.n_outputs_) + return _y From 4fc1a9ff402a8ee7330900c7b0684e1a5de584ec Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Mon, 8 Mar 2021 10:59:46 +0800 Subject: [PATCH 50/94] [DOC] Update contributors * docs: update CONTRIBUTORS.md [skip ci] * docs: update .all-contributorsrc [skip ci] Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> --- .all-contributorsrc | 10 ++++++++++ CONTRIBUTORS.md | 3 ++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index 5ccb93d..7483503 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -106,6 +106,16 @@ "contributions": [ "bug" ] + }, + { + "login": "zzzzwj", + "name": "Wenjie Zhang", + "avatar_url": "https://avatars.githubusercontent.com/u/23235538?v=4", + "profile": "https://github.com/zzzzwj", + "contributions": [ + "code", + "test" + ] } ], "contributorsPerLine": 7, diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 1a108fd..ea13a43 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -14,9 +14,10 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
Mr-memorandum

🐛
NiMaZi

💻 ⚠️
T-Allen-sudo

🚧 ⚠️ -
Yi-Xuan Xu

💻 📖 ⚠️ +
Wenjie Zhang

💻 ⚠️ +
Yi-Xuan Xu

💻 📖 ⚠️
tczhao

💻 📖 ⚠️
zhenlingcn

🐛
陈鼎彦

🐛 From 165e5d5e013e832f8cbe3a53e66d493cab4ffe7d Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Mon, 8 Mar 2021 11:04:50 +0800 Subject: [PATCH 51/94] [DOC] Update contributors * docs: update CONTRIBUTORS.md [skip ci] * docs: update .all-contributorsrc [skip ci] Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> --- .all-contributorsrc | 3 ++- CONTRIBUTORS.md | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index 7483503..73fcb0a 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -104,7 +104,8 @@ "avatar_url": "https://avatars.githubusercontent.com/u/16874978?v=4", "profile": "https://github.com/chendingyan", "contributions": [ - "bug" + "bug", + "code" ] }, { diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index ea13a43..9e5df2b 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -20,7 +20,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
Yi-Xuan Xu

💻 📖 ⚠️
tczhao

💻 📖 ⚠️
zhenlingcn

🐛 -
陈鼎彦

🐛 +
陈鼎彦

🐛 💻 From 5d97efd075c57e9b61bff9222e37b57b4828c67e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E9=BC=8E=E5=BD=A6?= Date: Mon, 8 Mar 2021 16:05:42 +0800 Subject: [PATCH 52/94] [FIX] Fix regressor y value check (#53) * [FIX] Fix regressor y value check * [FIX] Revert Code Quality * [FIX] Fix Logic * [FIX] Reformat to pass code quality check * refactor some code to pass ci * Update CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/cascade.py | 27 ++++++++++++++++++++++----- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 8aae007..7c82c20 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Enhancement| improve target checks for :obj:`CascadeForestRegressor` (`#53 `__) @chendingyan - |Fix| fix the prediction workflow with only one cascade layer (`#56 `__) @xuyxu - |Fix| fix inconsistency on predictor name (`#52 `__) @xuyxu - |Feature| add official support for ManyLinux-aarch64 (`#47 `__) @xuyxu diff --git a/deepforest/cascade.py b/deepforest/cascade.py index e427786..4da19b4 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -1415,20 +1415,37 @@ def __init__( self.type_of_target_ = None def _check_target_values(self, y): - """ - Check the input target values for regressor. - """ + """Check the input target values for regressor.""" self.type_of_target_ = type_of_target(y) + + if not self._check_array_numeric(y): + msg = ( + "CascadeForestRegressor only accepts numeric values as" + " valid target values." + ) + raise ValueError(msg) + if self.type_of_target_ not in ( "continuous", "continuous-multioutput", + "multiclass", + "multiclass-multioutput", ): msg = ( - "CascadeForestRegressor is used for univariate or multi-variate regression," - " but the target values seem not to be one of them." + "CascadeForestRegressor is used for univariate or" + " multi-variate regression, but the target values seem not" + " to be one of them." ) raise ValueError(msg) + def _check_array_numeric(self, y): + """Check the input numpy array y is all numeric.""" + numeric_types = np.typecodes['AllInteger'] + np.typecodes["AllFloat"] + if y.dtype.kind in numeric_types: + return True + else: + return False + def _repr_performance(self, pivot): msg = "Val MSE = {:.5f}" return msg.format(pivot) From 4947d960863a52010df775c0def5cdec26662375 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Mon, 8 Mar 2021 16:09:22 +0800 Subject: [PATCH 53/94] [ENH] Improve unit test efficiency --- tests/test_model_input.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/tests/test_model_input.py b/tests/test_model_input.py index 0b516a2..4d9fac0 100644 --- a/tests/test_model_input.py +++ b/tests/test_model_input.py @@ -5,6 +5,26 @@ from deepforest import CascadeForestClassifier +toy_kwargs = { + "n_bins": 10, + "bin_subsample": 2e5, + "max_layers": 10, + "n_estimators": 1, + "criterion": "gini", + "n_trees": 100, + "max_depth": 3, + "min_samples_leaf": 1, + "use_predictor": True, + "predictor": "forest", + "predictor_kwargs": {}, + "n_tolerant_rounds": 2, + "delta": 1e-5, + "n_jobs": -1, + "random_state": 0, + "verbose": 2, +} + + def test_model_input_label_encoder(): """Test if the model behaves the same with and without label encoding.""" @@ -13,12 +33,12 @@ def test_model_input_label_encoder(): y_as_str = np.char.add("label_", y.astype(str)) # Train model on integer labels. Labels should look like: 1, 2, 3, ... - model = CascadeForestClassifier(random_state=1) + model = CascadeForestClassifier(**toy_kwargs) model.fit(X, y) y_pred_int_labels = model.predict(X) # Train model on string labels. Labels should look like: "label_1", "label_2", "label_3", ... - model = CascadeForestClassifier(random_state=1) + model = CascadeForestClassifier(**toy_kwargs) model.fit(X, y_as_str) y_pred_str_labels = model.predict(X) From 7777f99f5ac2c6635f0558c56b92309aa5d9d89d Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Thu, 11 Mar 2021 15:34:52 +0800 Subject: [PATCH 54/94] [ENH] Support customized estimator and predictor (#48) * Update CHANGELOG.rst * add kfold wrapper * add customized cascade layer * add `set_estimators` * update unit tests * improve coverage * black formatting * add set_predictor * update unit tests * update documentation * Update kfoldwrapper.py * black formatting * fix unit tests * improve doc * improve backward compatibility * update unit tests * improve documentation * fix log --- CHANGELOG.rst | 1 + deepforest/_io.py | 81 +++++-- deepforest/_layer.py | 108 ++++++++- deepforest/cascade.py | 219 ++++++++++++++---- deepforest/utils/__init__.py | 0 deepforest/utils/kfoldwrapper.py | 99 ++++++++ .../use_customized_estimator.rst | 76 ++++++ docs/index.rst | 6 + tests/test_model_classifier.py | 10 +- tests/test_set_custom_estimator.py | 151 ++++++++++++ 10 files changed, 679 insertions(+), 72 deletions(-) create mode 100644 deepforest/utils/__init__.py create mode 100644 deepforest/utils/kfoldwrapper.py create mode 100644 docs/advanced_topics/use_customized_estimator.rst create mode 100644 tests/test_set_custom_estimator.py diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7c82c20..a95343c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| |API| add support on customized estimators (`#48 `__) @xuyxu - |Enhancement| improve target checks for :obj:`CascadeForestRegressor` (`#53 `__) @chendingyan - |Fix| fix the prediction workflow with only one cascade layer (`#56 `__) @xuyxu - |Fix| fix inconsistency on predictor name (`#52 `__) @xuyxu diff --git a/deepforest/_io.py b/deepforest/_io.py index 6654776..c840f43 100644 --- a/deepforest/_io.py +++ b/deepforest/_io.py @@ -140,7 +140,7 @@ def cache_estimator(self, layer_idx, est_idx, est_name, est): cached. est_idx : int The index of the estimator in the cascade layer to be cached. - est_name : {"rf", "erf"} + est_name : {"rf", "erf", "custom"} The name of the estimator to be cached. est : object The object of base estimator. @@ -303,7 +303,11 @@ def model_loadobj(dirname, obj_type, d=None): obj = load(os.path.join(dirname, "{}.pkl".format(obj_type))) return obj elif obj_type == "layer": - from ._layer import ClassificationCascadeLayer, RegressionCascadeLayer + from ._layer import ( + ClassificationCascadeLayer, + RegressionCascadeLayer, + CustomCascadeLayer, + ) if not isinstance(d, dict): msg = "Loading layers requires the dict from `param.pkl`." @@ -315,31 +319,62 @@ def model_loadobj(dirname, obj_type, d=None): for layer_idx in range(n_layers): - # Build a temporary layer - if d["is_classifier"]: - layer_ = ClassificationCascadeLayer( - layer_idx=layer_idx, - n_outputs=d["n_outputs"], - criterion=d["criterion"], - n_estimators=d["n_estimators"], - partial_mode=d["partial_mode"], - buffer=d["buffer"], - verbose=d["verbose"], - ) + if not d["use_custom_estimator"]: + if d["is_classifier"]: + layer_ = ClassificationCascadeLayer( + layer_idx=layer_idx, + n_outputs=d["n_outputs"], + criterion=d["criterion"], + n_estimators=d["n_estimators"], + partial_mode=d["partial_mode"], + buffer=d["buffer"], + verbose=d["verbose"], + ) + else: + layer_ = RegressionCascadeLayer( + layer_idx=layer_idx, + n_outputs=d["n_outputs"], + criterion=d["criterion"], + n_estimators=d["n_estimators"], + partial_mode=d["partial_mode"], + buffer=d["buffer"], + verbose=d["verbose"], + ) + + for est_type in ("rf", "erf"): + for est_idx in range(n_estimators): + est_key = "{}-{}-{}".format( + layer_idx, est_idx, est_type + ) + dest = os.path.join( + dirname, "estimator", est_key + ".est" + ) + + if not os.path.isfile(dest): + msg = "Missing estimator in the path: {}." + raise RuntimeError(msg.format(dest)) + + if d["partial_mode"]: + layer_.estimators_.update( + {est_key: os.path.abspath(dest)} + ) + else: + est = load(dest) + layer_.estimators_.update({est_key: est}) else: - layer_ = RegressionCascadeLayer( + + layer_ = CustomCascadeLayer( layer_idx=layer_idx, + n_splits=1, # will not be used n_outputs=d["n_outputs"], - criterion=d["criterion"], - n_estimators=d["n_estimators"], + estimators=[None] * n_estimators, # will not be used partial_mode=d["partial_mode"], buffer=d["buffer"], verbose=d["verbose"], ) - for est_type in ("rf", "erf"): for est_idx in range(n_estimators): - est_key = "{}-{}-{}".format(layer_idx, est_idx, est_type) + est_key = "{}-{}-custom".format(layer_idx, est_idx) dest = os.path.join(dirname, "estimator", est_key + ".est") if not os.path.isfile(dest): @@ -347,9 +382,7 @@ def model_loadobj(dirname, obj_type, d=None): raise RuntimeError(msg.format(dest)) if d["partial_mode"]: - layer_.estimators_.update( - {est_key: os.path.abspath(dest)} - ) + layer_.estimators_.update({est_key: dest}) else: est = load(dest) layer_.estimators_.update({est_key: est}) @@ -366,13 +399,13 @@ def model_loadobj(dirname, obj_type, d=None): pred_path = os.path.join(dirname, "estimator", "predictor.est") if not os.path.isfile(pred_path): - msg = "Missing classifier in the path: {}." + msg = "Missing predictor in the path: {}." raise RuntimeError(msg.format(pred_path)) if d["partial_mode"]: return os.path.abspath(pred_path) else: - clf = load(pred_path) - return clf + predictor = load(pred_path) + return predictor else: raise ValueError("Unknown object type: {}.".format(obj_type)) diff --git a/deepforest/_layer.py b/deepforest/_layer.py index 1b7ad67..036c6fd 100644 --- a/deepforest/_layer.py +++ b/deepforest/_layer.py @@ -1,10 +1,11 @@ -"""Implementation of the forest-based cascade layer.""" +"""Implementation of the cascade layer in deep forest.""" __all__ = [ "BaseCascadeLayer", "ClassificationCascadeLayer", "RegressionCascadeLayer", + "CustomCascadeLayer", ] import numpy as np @@ -14,6 +15,7 @@ from . import _utils from ._estimator import Estimator +from .utils.kfoldwrapper import KFoldWrapper def _build_estimator( @@ -336,3 +338,107 @@ def fit_transform(self, X, y, sample_weight=None): X_aug = np.hstack(X_aug) return X_aug + + +class CustomCascadeLayer(object): + """Implementation of the cascade layer for customized base estimators.""" + + def __init__( + self, + layer_idx, + n_splits, + n_outputs, + estimators, + partial_mode=False, + buffer=None, + random_state=None, + verbose=1, + is_classifier=True, + ): + self.layer_idx = layer_idx + self.n_splits = n_splits + self.n_outputs = n_outputs + self.n_estimators = len(estimators) + self.dummy_estimators_ = estimators + self.partial_mode = partial_mode + self.buffer = buffer + self.random_state = random_state + self.verbose = verbose + self.is_classifier = is_classifier + # Internal container + self.estimators_ = {} + + def fit_transform(self, X, y, sample_weight=None): + n_samples, _ = X.shape + X_aug = [] + + # Parameters were already validated by upstream methods + for estimator_idx, estimator in enumerate(self.dummy_estimators_): + kfold_estimator = KFoldWrapper( + estimator, + self.n_splits, + self.n_outputs, + self.random_state, + self.verbose, + self.is_classifier, + ) + + if self.verbose > 1: + msg = "{} - Fitting estimator = custom_{} in layer = {}" + print( + msg.format(_utils.ctime(), estimator_idx, self.layer_idx) + ) + + kfold_estimator.fit_transform(X, y, sample_weight) + X_aug.append(kfold_estimator.oob_decision_function_) + key = "{}-{}-custom".format(self.layer_idx, estimator_idx) + + if self.partial_mode: + # Cache the fitted estimator in out-of-core mode + buffer_path = self.buffer.cache_estimator( + self.layer_idx, estimator_idx, "custom", kfold_estimator + ) + self.estimators_.update({key: buffer_path}) + else: + self.estimators_.update({key: kfold_estimator}) + + # Set the OOB estimations and validation performance + oob_decision_function = np.zeros_like(X_aug[0]) + for estimator_oob_decision_function in X_aug: + oob_decision_function += ( + estimator_oob_decision_function / self.n_estimators + ) + + if self.is_classifier: # classification + y_pred = np.argmax(oob_decision_function, axis=1) + self.val_performance_ = accuracy_score( + y, y_pred, sample_weight=sample_weight + ) + else: # regression + self.val_performance_ = mean_squared_error( + y, y_pred, sample_weight=sample_weight + ) + + X_aug = np.hstack(X_aug) + return X_aug + + def transform(self, X): + """Preserved for the naming consistency.""" + return self.predict_full(X) + + def predict_full(self, X): + """Return the concatenated predictions from all base estimators.""" + n_samples, _ = X.shape + pred = np.zeros((n_samples, self.n_outputs * self.n_estimators)) + for idx, (key, estimator) in enumerate(self.estimators_.items()): + if self.verbose > 1: + msg = "{} - Evaluating estimator = custom_{} in layer = {}" + print(msg.format(_utils.ctime(), idx, self.layer_idx)) + if self.partial_mode: + # Load the estimator from the buffer + estimator = self.buffer.load_estimator(estimator) + + left, right = self.n_outputs * idx, self.n_outputs * (idx + 1) + pred[:, left:right] += estimator.predict(X) + + return pred diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 4da19b4..6e8a44d 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -14,7 +14,11 @@ from . import _utils from . import _io -from ._layer import ClassificationCascadeLayer, RegressionCascadeLayer +from ._layer import ( + ClassificationCascadeLayer, + RegressionCascadeLayer, + CustomCascadeLayer, +) from ._binner import Binner @@ -523,10 +527,25 @@ def _get_n_output(self, y): def _make_layer(self, **layer_args): """Make and configure a cascade layer.""" - if is_classifier(self): - layer = ClassificationCascadeLayer(**layer_args) + if not hasattr(self, "use_custom_estimator"): + # Use built-in cascade layers + if is_classifier(self): + layer = ClassificationCascadeLayer(**layer_args) + else: + layer = RegressionCascadeLayer(**layer_args) else: - layer = RegressionCascadeLayer(**layer_args) + # Use customized cascade layers + layer = CustomCascadeLayer( + layer_args["layer_idx"], + self.n_splits, + layer_args["n_outputs"], + self.dummy_estimators, + layer_args["partial_mode"], + layer_args["buffer"], + layer_args["random_state"], + layer_args["verbose"], + is_classifier(self), + ) return layer @@ -714,7 +733,10 @@ def predict(self, X): @property def n_aug_features_(self): - return 2 * self.n_estimators * self.n_outputs_ + if not hasattr(self, "use_custom_estimator"): + return 2 * self.n_estimators * self.n_outputs_ + else: + return self.n_estimators * self.n_outputs_ # flake8: noqa: E501 def fit(self, X, y, sample_weight=None): @@ -913,30 +935,36 @@ def fit(self, X, y, sample_weight=None): # Build the predictor if `self.use_predictor` is True if self.use_predictor: - if is_classifier(self): - self.predictor_ = _build_classifier_predictor( - self.predictor, - self.criterion, - self.n_trees, - self.n_outputs_, - self.max_depth, - self.min_samples_leaf, - self.n_jobs, - self.random_state, - self.predictor_kwargs, - ) - else: - self.predictor_ = _build_regressor_predictor( - self.predictor, - self.criterion, - self.n_trees, - self.n_outputs_, - self.max_depth, - self.min_samples_leaf, - self.n_jobs, - self.random_state, - self.predictor_kwargs, - ) + # Use built-in predictors + if self.predictor in ("forest", "xgboost", "lightgbm"): + if is_classifier(self): + self.predictor_ = _build_classifier_predictor( + self.predictor, + self.criterion, + self.n_trees, + self.n_outputs_, + self.max_depth, + self.min_samples_leaf, + self.n_jobs, + self.random_state, + self.predictor_kwargs, + ) + else: + self.predictor_ = _build_regressor_predictor( + self.predictor, + self.criterion, + self.n_trees, + self.n_outputs_, + self.max_depth, + self.min_samples_leaf, + self.n_jobs, + self.random_state, + self.predictor_kwargs, + ) + elif self.predictor == "custom": + if not hasattr(self, "predictor_"): + msg = "Missing predictor after calling `set_predictor`" + raise RuntimeError(msg) binner_ = Binner( n_bins=self.n_bins, @@ -958,9 +986,7 @@ def fit(self, X, y, sample_weight=None): print(msg.format(_utils.ctime(), self.predictor)) tic = time.time() - self.predictor_.fit( - X_middle_train_, y, sample_weight=sample_weight - ) + self.predictor_.fit(X_middle_train_, y, sample_weight) toc = time.time() if self.verbose > 0: @@ -974,6 +1000,92 @@ def fit(self, X, y, sample_weight=None): return self + def set_estimator(self, estimators, n_splits=5): + """ + Specify custom base estimators, which will override estimators used + by default. + + Parameters + ---------- + estimators : :obj:`list` + A list of your base estimators, will be used in all cascade layers. + n_splits : :obj:`int`, default=5 + The number of folds, must be at least 2. + """ + # Validation check + if not isinstance(estimators, list): + msg = ( + "estimators should be a list that stores instantiated" + " objects of your base estimator." + ) + raise ValueError(msg) + + for idx, estimator in enumerate(estimators): + if not callable(getattr(estimator, "fit", None)): + msg = "The `fit` method of estimator = {} is not callable." + raise AttributeError(msg.format(idx)) + + if is_classifier(self) and not callable( + getattr(estimator, "predict_proba", None) + ): + msg = ( + "The `predict_proba` method of estimator = {} is not" + " callable." + ) + raise AttributeError(msg.format(idx)) + + if not is_classifier(self) and not callable( + getattr(estimator, "predict", None) + ): + msg = "The `predict` method of estimator = {} is not callable." + raise AttributeError(msg.format(idx)) + + if not n_splits >= 2: + msg = "n_splits = {} should be at least 2." + raise ValueError(msg.format(n_splits)) + + self.dummy_estimators = estimators + self.n_splits = n_splits + self.use_custom_estimator = True + + # Update attributes + self.n_estimators = len(estimators) + + def set_predictor(self, predictor): + """ + Specify the custom predictor, which will override the predictor + used by default. + + Parameters + ---------- + predictor : :obj:`object` + The instantiated object of your predictor. + """ + # Validation check + if not callable(getattr(predictor, "fit", None)): + msg = "The `fit` method of the predictor is not callable." + raise AttributeError(msg) + + if is_classifier(self) and not callable( + getattr(predictor, "predict_proba", None) + ): + msg = ( + "The `predict_proba` method of the predictor is not" + " callable." + ) + raise AttributeError(msg) + + if not is_classifier(self) and not callable( + getattr(predictor, "predict", None) + ): + msg = "The `predict` method of the predictor is not callable." + raise AttributeError(msg) + + # Set related attributes + self.predictor = "custom" + self.predictor_ = predictor + self.use_predictor = True + def get_layer_feature_importances(self, layer_idx): """ Return the impurity-based feature importances of the ``layer_idx``-th @@ -992,6 +1104,13 @@ def get_layer_feature_importances(self, layer_idx): The impurity-based feature importances of the cascade layer. Notice that the number of input features are different between the first cascade layer and remaining cascade layers. + + + .. note:: + - This method is only applicable when deep forest is built using + the ``sklearn`` backend + - The functionality of this method is not available when using + customized estimators in deep forest. """ if self.backend == "custom": msg = ( @@ -1002,10 +1121,10 @@ def get_layer_feature_importances(self, layer_idx): layer = self._get_layer(layer_idx) return layer.feature_importances_ - def get_forest(self, layer_idx, est_idx, forest_type): + def get_estimator(self, layer_idx, est_idx, estimator_type): """ - Get the `est_idx`-th forest estimator from the `layer_idx`-th - cascade layer in the model. + Get the `est_idx`-th estimator from the `layer_idx`-th cascade layer + in the deep forest. Parameters ---------- @@ -1013,17 +1132,20 @@ def get_forest(self, layer_idx, est_idx, forest_type): The index of the cascade layer, should be in the range ``[0, self.n_layers_-1]``. est_idx : :obj:`int` - The index of the forest estimator, should be in the range + The index of the estimator, should be in the range ``[0, self.n_estimators]``. - forest_type : :obj:`{"rf", "erf"}` + estimator_type : :obj:`{"rf", "erf", "custom"}` Specify the forest type. - If ``rf``, return the random forest. - If ``erf``, return the extremely random forest. + - If ``custom``, return the customized estimator, only applicable + when using customized estimators in deep forest via + :meth:`set_estimator`. Returns ------- - estimator : The forest estimator with the given index. + estimator : Estimator with the given index. """ if not self.is_fitted_: raise AttributeError("Please fit the model first.") @@ -1043,15 +1165,22 @@ def get_forest(self, layer_idx, est_idx, forest_type): ) raise ValueError(msg.format(self.n_estimators, est_idx)) - if forest_type not in ("rf", "erf"): + if estimator_type not in ("rf", "erf", "custom"): msg = ( - "`forest_type` should be one of {{rf, erf}}," + "`estimator_type` should be one of {{rf, erf, custom}}," " but got {} instead." ) - raise ValueError(msg.format(forest_type)) + raise ValueError(msg.format(estimator_type)) + + if estimator_type == "custom" and not self.use_custom_estimator: + msg = ( + "`estimator_type` = {} is only applicable when using" + "customized estimators in deep forest." + ) + raise ValueError(msg.format(estimator_type)) layer = self._get_layer(layer_idx) - est_key = "{}-{}-{}".format(layer_idx, est_idx, forest_type) + est_key = "{}-{}-{}".format(layer_idx, est_idx, estimator_type) estimator = layer.estimators_[est_key] # Load the model if in partial mode @@ -1090,6 +1219,9 @@ def save(self, dirname="model"): d["verbose"] = self.verbose d["use_predictor"] = self.use_predictor d["is_classifier"] = is_classifier(self) + d["use_custom_estimator"] = ( + True if hasattr(self, "use_custom_estimator") else False + ) if self.use_predictor: d["predictor"] = self.predictor @@ -1131,8 +1263,11 @@ def load(self, dirname): self.n_features_ = d["n_features"] self.n_outputs_ = d["n_outputs"] self.partial_mode = d["partial_mode"] + self.buffer_ = d["buffer"] self.verbose = d["verbose"] self.use_predictor = d["use_predictor"] + if d["use_custom_estimator"]: + self.use_custom_estimator = True # Load label encoder if labels are encoded. if "labels_are_encoded" in d: diff --git a/deepforest/utils/__init__.py b/deepforest/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/deepforest/utils/kfoldwrapper.py b/deepforest/utils/kfoldwrapper.py new file mode 100644 index 0000000..a2df195 --- /dev/null +++ b/deepforest/utils/kfoldwrapper.py @@ -0,0 +1,99 @@ +""" +Implementation of the estimator wrapper to support customized base estimators. +""" + + +__all__ = ["KFoldWrapper"] + +import copy +import numpy as np +from sklearn.model_selection import KFold + +from .. import _utils + + +class KFoldWrapper(object): + """ + A general wrapper for base estimators without the characteristic of + out-of-bag (OOB) estimation. + """ + + def __init__( + self, + estimator, + n_splits, + n_outputs, + random_state=None, + verbose=1, + is_classifier=True, + ): + + # Parameters were already validated by upstream methods + self.dummy_estimator_ = estimator + self.n_splits = n_splits + self.n_outputs = n_outputs + self.random_state = random_state + self.verbose = verbose + self.is_classifier = is_classifier + # Internal container + self.estimators_ = [] + + @property + def estimator_(self): + """Return the list of internal estimators.""" + return self.estimators_ + + def fit_transform(self, X, y, sample_weight=None): + n_samples, _ = X.shape + splitter = KFold( + n_splits=self.n_splits, + shuffle=True, + random_state=self.random_state, + ) + self.oob_decision_function_ = np.zeros((n_samples, self.n_outputs)) + + for k, (train_idx, val_idx) in enumerate(splitter.split(X, y)): + estimator = copy.deepcopy(self.dummy_estimator_) + + if self.verbose > 1: + msg = "{} - - Fitting the base estimator with fold = {}" + print(msg.format(_utils.ctime(), k)) + + # Fit on training samples + if sample_weight is None: + # Notice that a bunch of base estimators do not take + # `sample_weight` as a valid input. + estimator.fit(X[train_idx], y[train_idx]) + else: + estimator.fit(X[train_idx], y[train_idx], sample_weight) + + # Predict on hold-out samples + if self.is_classifier: + self.oob_decision_function_[ + val_idx + ] += estimator.predict_proba(X[val_idx]) + else: + self.oob_decision_function_[val_idx] += estimator.predict( + X[val_idx] + ) + + # Store the estimator + self.estimators_.append(estimator) + + return self.oob_decision_function_ + + def predict(self, X): + n_samples, _ = X.shape + out = np.zeros((n_samples, self.n_outputs)) # pre-allocate results + for estimator in self.estimators_: + if self.is_classifier: + out += estimator.predict_proba(X) # classification + else: + if self.n_outputs_ > 1: + out += estimator.predict(X) # multi-variate regression + else: + out += estimator.predict(X).reshape( + n_samples, -1 + ) # univariate regression + + return out / self.n_splits # return the average prediction diff --git a/docs/advanced_topics/use_customized_estimator.rst b/docs/advanced_topics/use_customized_estimator.rst new file mode 100644 index 0000000..851f988 --- /dev/null +++ b/docs/advanced_topics/use_customized_estimator.rst @@ -0,0 +1,76 @@ +Use Customized Estimators +========================= + +The version v0.1.4 of :mod:`deepforest` has added the support on: + +- using customized base estimators in cascade layers of deep forest +- using the customized predictor concatenated to the deep forest + +The page gives a detailed introduction on how to use this new feature. + +Instantiate the deep forest model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To begin with, you need to instantiate a deep forest model. Notice that some parameters specified here will be overridden by downstream steps. For example, if the parameter :obj:`use_predictor` is set to ``False`` here, whereas :meth:`set_predictor` is called latter, then the internal attribute :obj:`use_predictor` will be altered to ``True``. + +.. code-block:: python + + from deepforest import CascadeForestClassifier + model = CascadeForestClassifier() + +Instantiate your estimators +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In order to use customized estimators in the cascade layer of deep forest, the next step is to instantiate the estimators and encapsulate them into a Python list: + +.. code-block:: python + + n_estimators = 4 # the number of base estimators per cascade layer + estimators = [your_estimator(random_state=i) for i in range(n_estimators)] + +.. tip:: + + You need to make sure that instantiated estimators in the list are with different random seeds if seeds are manually specified. Otherwise, they will have the same behavior on the dataset and make cascade layers less effective. + +For the customized predictor, you only need to instantiate it, and there is no extra step: + +.. code-block:: python + + predictor = your_predictor() + +Deep forest will conduct internal checks to make sure that :obj:`estimators` and :obj:`predictor` are valid for training and evaluating. To pass the internal checks, the class of your customized estimators or predictor should at least implement methods listed below: + +* :meth:`fit` for training +* **[Classification]** :meth:`predict_proba` for evaluating +* **[Regression]** :meth:`predict` for evaluating + +The name of these methods follow the convention in scikit-learn, and they are already implemented in a lot of packages offering scikit-learn APIs (e.g., `XGBoost `__, `LightGBM `__, `CatBoost `__). Otherwise, you have to implement a wrapper on your customized estimators to make these methods callable. + +Call :meth:`set_estimator` and :meth:`set_predictor` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The core step is to call :meth:`set_estimator` and :meth:`set_predictor` to override estimators used by default: + +.. code-block:: python + + # Customized base estimators + model.set_estimator(estimators) + + # Customized predictor + model.set_predictor(predictor) + +:meth:`set_estimator` has another parameter :obj:`n_splits`, which determines the number of folds of the internal cross-validation strategy. Its value should be at least ``2``, and the default value is ``5``. Generally speaking, a larger :obj:`n_splits` leads to better generalization performance. If you are confused about the effect of cross-validation here, please refer to `the original paper `__ for details on how deep forest adopts the cross-validation strategy to build cascade layers. + +Train and Evaluate +~~~~~~~~~~~~~~~~~~ + +Remaining steps follow the original workflow of deep forest. + +.. code-block:: python + + model.train(X_train, y_train) + y_pred = model.predict(X_test) + +.. note:: + + When using customized estimators via :meth:`set_estimator`, deep forest adopts the cross-validation strategy to grow cascade layers. Suppose that :obj:`n_splits` is set to ``5`` when calling :meth:`set_estimator`, each estimator will be repeatedly trained over five times to get full augmented features from a cascade layer. As a result, you may experience a drastic increase in running time and memory. diff --git a/docs/index.rst b/docs/index.rst index a3fa4bb..063a07e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -106,6 +106,12 @@ Reference Parameters Tunning Experiments +.. toctree:: + :maxdepth: 1 + :caption: Advanced Topics + + Use Customized Estimators <./advanced_topics/use_customized_estimator> + .. toctree:: :maxdepth: 1 :caption: For Developers diff --git a/tests/test_model_classifier.py b/tests/test_model_classifier.py index c1bd9ae..fdc8afe 100644 --- a/tests/test_model_classifier.py +++ b/tests/test_model_classifier.py @@ -107,21 +107,21 @@ def test_model_properties_after_fitting(): # Test the hook on forest estimator assert ( - model.get_forest(0, 0, "rf") + model.get_estimator(0, 0, "rf") is model._get_layer(0).estimators_["0-0-rf"].estimator_ ) with pytest.raises(ValueError) as excinfo: - model.get_forest(model.n_layers_, 0, "rf") + model.get_estimator(model.n_layers_, 0, "rf") assert "`layer_idx` should be in the range" in str(excinfo.value) with pytest.raises(ValueError) as excinfo: - model.get_forest(0, model.n_estimators, "rf") + model.get_estimator(0, model.n_estimators, "rf") assert "`est_idx` should be in the range" in str(excinfo.value) with pytest.raises(ValueError) as excinfo: - model.get_forest(0, 0, "Unknown") - assert "`forest_type` should be one of" in str(excinfo.value) + model.get_estimator(0, 0, "Unknown") + assert "`estimator_type` should be one of" in str(excinfo.value) @pytest.mark.parametrize("backend", ["custom", "sklearn"]) diff --git a/tests/test_set_custom_estimator.py b/tests/test_set_custom_estimator.py new file mode 100644 index 0000000..b1d2dfe --- /dev/null +++ b/tests/test_set_custom_estimator.py @@ -0,0 +1,151 @@ +import pytest +import shutil +from numpy.testing import assert_array_equal +from sklearn.datasets import load_iris +from sklearn.tree import DecisionTreeClassifier +from sklearn.model_selection import train_test_split + +from deepforest import CascadeForestClassifier, CascadeForestRegressor + + +save_dir = "./tmp" + +# Load data +X, y = load_iris(return_X_y=True) +X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.42, random_state=42 +) + + +def test_custom_cascade_layer_workflow_in_memory(): + + model = CascadeForestClassifier() + + n_estimators = 4 + estimators = [DecisionTreeClassifier() for _ in range(n_estimators)] + model.set_estimator(estimators) # set custom base estimators + + predictor = DecisionTreeClassifier() + model.set_predictor(predictor) + + model.fit(X_train, y_train) + y_pred_before = model.predict(X_test) + + # Save and Reload + model.save(save_dir) + + model = CascadeForestClassifier() + model.load(save_dir) + + # Predictions after loading + y_pred_after = model.predict(X_test) + + # Make sure the same predictions before and after model serialization + assert_array_equal(y_pred_before, y_pred_after) + + assert ( + model.get_estimator(0, 0, "custom") + is model._get_layer(0).estimators_["0-0-custom"].estimator_ + ) + + model.clean() # clear the buffer + shutil.rmtree(save_dir) + + +def test_custom_cascade_layer_workflow_partial_mode(): + + model = CascadeForestClassifier(partial_mode=True) + + n_estimators = 4 + estimators = [DecisionTreeClassifier() for _ in range(n_estimators)] + model.set_estimator(estimators) # set custom base estimators + + predictor = DecisionTreeClassifier() + model.set_predictor(predictor) + + model.fit(X_train, y_train) + y_pred_before = model.predict(X_test) + + # Save and Reload + model.save(save_dir) + + model = CascadeForestClassifier() + model.load(save_dir) + + # Predictions after loading + y_pred_after = model.predict(X_test) + + # Make sure the same predictions before and after model serialization + assert_array_equal(y_pred_before, y_pred_after) + + model.clean() # clear the buffer + shutil.rmtree(save_dir) + + +def test_custom_base_estimator_wrong_estimator_type(): + + model = CascadeForestClassifier() + with pytest.raises(ValueError) as excinfo: + model.set_estimator(42) + assert "estimators should be a list" in str(excinfo.value) + + +def test_custom_estimator_missing_fit(): + class tmp_estimator: + def __init__(self): + pass + + model = CascadeForestClassifier() + with pytest.raises(AttributeError) as excinfo: + model.set_estimator([tmp_estimator()]) + assert "The `fit` method of estimator" in str(excinfo.value) + + with pytest.raises(AttributeError) as excinfo: + model.set_predictor(tmp_estimator()) + assert "The `fit` method of the predictor" in str(excinfo.value) + + +def test_custom_base_estimator_missing_predict_proba(): + class tmp_estimator: + def __init__(self): + pass + + def fit(self, X, y): + pass + + model = CascadeForestClassifier() + with pytest.raises(AttributeError) as excinfo: + model.set_estimator([tmp_estimator()]) + assert "The `predict_proba` method" in str(excinfo.value) + + with pytest.raises(AttributeError) as excinfo: + model.set_predictor(tmp_estimator()) + assert "The `predict_proba` method of the predictor" in str(excinfo.value) + + +def test_custom_base_estimator_missing_predict(): + class tmp_estimator: + def __init__(self): + pass + + def fit(self, X, y): + pass + + model = CascadeForestRegressor() + with pytest.raises(AttributeError) as excinfo: + model.set_estimator([tmp_estimator()]) + assert "The `predict` method" in str(excinfo.value) + + with pytest.raises(AttributeError) as excinfo: + model.set_predictor(tmp_estimator()) + assert "The `predict` method of the predictor" in str(excinfo.value) + + +def test_custom_base_estimator_invalid_n_splits(): + + model = CascadeForestRegressor() + n_estimators = 4 + estimators = [DecisionTreeClassifier() for _ in range(n_estimators)] + with pytest.raises(ValueError) as excinfo: + model.set_estimator(estimators, n_splits=1) + assert "should be at least 2" in str(excinfo.value) From 97cf043eed261c4156ad7f97fd3f531aaa4fde38 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Thu, 11 Mar 2021 16:08:27 +0800 Subject: [PATCH 55/94] [MNT] Update version number --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 8575863..bce24ba 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ MAINTAINER = "Yi-Xuan Xu" MAINTAINER_EMAIL = "xuyx@lamda.nju.edu.cn" URL = "https://github.com/LAMDA-NJU/Deep-Forest" -VERSION = "0.1.3" +VERSION = "0.1.4" def configuration(parent_package="", top_path=None): From 5ea3a00475cb283cc8f03bb71999b776b6ad8826 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Thu, 11 Mar 2021 19:57:24 +0800 Subject: [PATCH 56/94] [DOC] Update CHANGELOG.rst --- CHANGELOG.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a95343c..19f22dc 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -36,15 +36,15 @@ Version 0.1.* - |Fix| fix inconsistency on predictor name (`#52 `__) @xuyxu - |Feature| add official support for ManyLinux-aarch64 (`#47 `__) @xuyxu - |Fix| fix accepted types of target for :obj:`CascadeForestRegressor` (`#44 `__) @xuyxu -- |Feature| add multi-output support for :obj:`CascadeForestRegressor` (`#40 `__) @Alex-Medium -- |Feature| add layer-wise feature importances (`#39 `__) @xuyxu -- |Feature| add scikit-learn backend (`#36 `__) @xuyxu +- |Feature| |API| add multi-output support for :obj:`CascadeForestRegressor` (`#40 `__) @Alex-Medium +- |Feature| |API| add layer-wise feature importances (`#39 `__) @xuyxu +- |Feature| |API| add scikit-learn backend (`#36 `__) @xuyxu - |Feature| add official support for Mac-OS (`#34 `__) @T-Allen-sudo -- |Feature| support configurable criterion (`#28 `__) @tczhao -- |Feature| support regression prediction (`#25 `__) @tczhao +- |Feature| |API| support configurable criterion (`#28 `__) @tczhao +- |Feature| |API| support regression prediction (`#25 `__) @tczhao - |Fix| fix accepted data types on the :obj:`binner` (`#23 `__) @xuyxu -- |Feature| implement the :meth:`get_forest` method for efficient indexing (`#22 `__) @xuyxu +- |Feature| |API| implement the :meth:`get_estimator` method for efficient indexing (`#22 `__) @xuyxu - |Feature| support class label encoding (`#18 `__) @NiMaZi -- |Feature| support sample weight in :meth:`fit` (`#7 `__) @tczhao -- |Feature| configurable predictor parameter (`#9 `__) @tczhao +- |Feature| |API| support sample weight in :meth:`fit` (`#7 `__) @tczhao +- |Feature| |API| configurable predictor parameter (`#9 `__) @tczhao - |Enhancement| add base class ``BaseEstimator`` and ``ClassifierMixin`` (`#8 `__) @pjgao From 24e2da61d101ae1fd1d82f13cee13361b2026958 Mon Sep 17 00:00:00 2001 From: "allcontributors[bot]" <46447321+allcontributors[bot]@users.noreply.github.com> Date: Sun, 21 Mar 2021 13:15:41 +0800 Subject: [PATCH 57/94] [DOC] Update contributors * docs: update CONTRIBUTORS.md [skip ci] * docs: update .all-contributorsrc [skip ci] Co-authored-by: allcontributors[bot] <46447321+allcontributors[bot]@users.noreply.github.com> --- .all-contributorsrc | 9 +++++++++ CONTRIBUTORS.md | 1 + 2 files changed, 10 insertions(+) diff --git a/.all-contributorsrc b/.all-contributorsrc index 73fcb0a..9eb43ce 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -117,6 +117,15 @@ "code", "test" ] + }, + { + "login": "zshgostop", + "name": "zshgostop", + "avatar_url": "https://avatars.githubusercontent.com/u/48615178?v=4", + "profile": "https://github.com/zshgostop", + "contributions": [ + "bug" + ] } ], "contributorsPerLine": 7, diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 9e5df2b..e546219 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -20,6 +20,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
Yi-Xuan Xu

💻 📖 ⚠️
tczhao

💻 📖 ⚠️
zhenlingcn

🐛 +
zshgostop

🐛
陈鼎彦

🐛 💻 From cd1aa34379d15083c383d218602f977f874409b2 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Sun, 21 Mar 2021 14:20:34 +0800 Subject: [PATCH 58/94] [FIX] Add sample index for `sample_weight` (#64) * Update kfoldwrapper.py * Update CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/utils/kfoldwrapper.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 19f22dc..defe29d 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Fix| fix missing sample indices for parameter ``sample_weight`` in :obj:`KFoldWrapper` (`#48 `__) @xuyxu - |Feature| |API| add support on customized estimators (`#48 `__) @xuyxu - |Enhancement| improve target checks for :obj:`CascadeForestRegressor` (`#53 `__) @chendingyan - |Fix| fix the prediction workflow with only one cascade layer (`#56 `__) @xuyxu diff --git a/deepforest/utils/kfoldwrapper.py b/deepforest/utils/kfoldwrapper.py index a2df195..380a3ac 100644 --- a/deepforest/utils/kfoldwrapper.py +++ b/deepforest/utils/kfoldwrapper.py @@ -65,7 +65,9 @@ def fit_transform(self, X, y, sample_weight=None): # `sample_weight` as a valid input. estimator.fit(X[train_idx], y[train_idx]) else: - estimator.fit(X[train_idx], y[train_idx], sample_weight) + estimator.fit( + X[train_idx], y[train_idx], sample_weight[train_idx] + ) # Predict on hold-out samples if self.is_classifier: From 942f31a09e4e1c0f0350e219b21a3f57b5343cab Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Wed, 24 Mar 2021 21:00:01 +0800 Subject: [PATCH 59/94] doc: add page on user reports (#66) --- docs/index.rst | 1 + docs/report_from_users.rst | 12 ++++++++++++ 2 files changed, 13 insertions(+) create mode 100644 docs/report_from_users.rst diff --git a/docs/index.rst b/docs/index.rst index 063a07e..40f780b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -105,6 +105,7 @@ Reference API Reference Parameters Tunning Experiments + Report from Users .. toctree:: :maxdepth: 1 diff --git a/docs/report_from_users.rst b/docs/report_from_users.rst new file mode 100644 index 0000000..6afb697 --- /dev/null +++ b/docs/report_from_users.rst @@ -0,0 +1,12 @@ +Report from Users +================= + +The page collects user reports on using deep forest. Thanks all of them for their nice work! + +Competition +----------- + +* 1st winning solution of the competition `Insurance-Pricing-Game@AIcrowd `__: `[Solution] `__ | `[Presentation] `__ + +Application +----------- \ No newline at end of file From a4be16791581052a244138fd770cbe8cdd6f2d4f Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Tue, 13 Apr 2021 13:56:59 +0800 Subject: [PATCH 60/94] fix(Regressor): fix inconsistency on array shape in customized mode (#67) * fix: handling different layout * Update CHANGELOG.rst * add unit tests --- CHANGELOG.rst | 1 + deepforest/_layer.py | 1 + deepforest/utils/kfoldwrapper.py | 11 ++-- tests/test_set_custom_estimator.py | 98 ++++++++++++++++++++++++++---- 4 files changed, 96 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index defe29d..98af933 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Fix| fix inconsistency on array shape for :obj:`CascadeForestRegressor` in customized mode (`#67 `__) @xuyxu - |Fix| fix missing sample indices for parameter ``sample_weight`` in :obj:`KFoldWrapper` (`#48 `__) @xuyxu - |Feature| |API| add support on customized estimators (`#48 `__) @xuyxu - |Enhancement| improve target checks for :obj:`CascadeForestRegressor` (`#53 `__) @chendingyan diff --git a/deepforest/_layer.py b/deepforest/_layer.py index 036c6fd..722312d 100644 --- a/deepforest/_layer.py +++ b/deepforest/_layer.py @@ -415,6 +415,7 @@ def fit_transform(self, X, y, sample_weight=None): y, y_pred, sample_weight=sample_weight ) else: # regression + y_pred = oob_decision_function self.val_performance_ = mean_squared_error( y, y_pred, sample_weight=sample_weight ) diff --git a/deepforest/utils/kfoldwrapper.py b/deepforest/utils/kfoldwrapper.py index 380a3ac..61b47ff 100644 --- a/deepforest/utils/kfoldwrapper.py +++ b/deepforest/utils/kfoldwrapper.py @@ -75,9 +75,12 @@ def fit_transform(self, X, y, sample_weight=None): val_idx ] += estimator.predict_proba(X[val_idx]) else: - self.oob_decision_function_[val_idx] += estimator.predict( - X[val_idx] - ) + val_pred = estimator.predict(X[val_idx]) + + # Reshape for univariate regression + if self.n_outputs == 1 and len(val_pred.shape) == 1: + val_pred = np.expand_dims(val_pred, 1) + self.oob_decision_function_[val_idx] += val_pred # Store the estimator self.estimators_.append(estimator) @@ -91,7 +94,7 @@ def predict(self, X): if self.is_classifier: out += estimator.predict_proba(X) # classification else: - if self.n_outputs_ > 1: + if self.n_outputs > 1: out += estimator.predict(X) # multi-variate regression else: out += estimator.predict(X).reshape( diff --git a/tests/test_set_custom_estimator.py b/tests/test_set_custom_estimator.py index b1d2dfe..6ec5d55 100644 --- a/tests/test_set_custom_estimator.py +++ b/tests/test_set_custom_estimator.py @@ -1,8 +1,9 @@ import pytest import shutil +import numpy as np from numpy.testing import assert_array_equal -from sklearn.datasets import load_iris -from sklearn.tree import DecisionTreeClassifier +from sklearn.datasets import load_iris, load_boston +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.model_selection import train_test_split from deepforest import CascadeForestClassifier, CascadeForestRegressor @@ -12,12 +13,20 @@ # Load data X, y = load_iris(return_X_y=True) -X_train, X_test, y_train, y_test = train_test_split( +X_train_clf, X_test_clf, y_train_clf, y_test_clf = train_test_split( X, y, test_size=0.42, random_state=42 ) +X, y = load_boston(return_X_y=True) +X_train_reg, X_test_reg, y_train_reg, y_test_reg = train_test_split( + X, y, test_size=0.42, random_state=42 +) + +# multi-output target values +y_train_reg_multi = np.array([y_train_reg, y_train_reg]).reshape(-1, 2) + -def test_custom_cascade_layer_workflow_in_memory(): +def test_classifier_custom_cascade_layer_workflow_in_memory(): model = CascadeForestClassifier() @@ -28,8 +37,8 @@ def test_custom_cascade_layer_workflow_in_memory(): predictor = DecisionTreeClassifier() model.set_predictor(predictor) - model.fit(X_train, y_train) - y_pred_before = model.predict(X_test) + model.fit(X_train_clf, y_train_clf) + y_pred_before = model.predict(X_test_clf) # Save and Reload model.save(save_dir) @@ -38,7 +47,7 @@ def test_custom_cascade_layer_workflow_in_memory(): model.load(save_dir) # Predictions after loading - y_pred_after = model.predict(X_test) + y_pred_after = model.predict(X_test_clf) # Make sure the same predictions before and after model serialization assert_array_equal(y_pred_before, y_pred_after) @@ -52,7 +61,7 @@ def test_custom_cascade_layer_workflow_in_memory(): shutil.rmtree(save_dir) -def test_custom_cascade_layer_workflow_partial_mode(): +def test_classifier_custom_cascade_layer_workflow_partial_mode(): model = CascadeForestClassifier(partial_mode=True) @@ -63,8 +72,8 @@ def test_custom_cascade_layer_workflow_partial_mode(): predictor = DecisionTreeClassifier() model.set_predictor(predictor) - model.fit(X_train, y_train) - y_pred_before = model.predict(X_test) + model.fit(X_train_clf, y_train_clf) + y_pred_before = model.predict(X_test_clf) # Save and Reload model.save(save_dir) @@ -73,7 +82,74 @@ def test_custom_cascade_layer_workflow_partial_mode(): model.load(save_dir) # Predictions after loading - y_pred_after = model.predict(X_test) + y_pred_after = model.predict(X_test_clf) + + # Make sure the same predictions before and after model serialization + assert_array_equal(y_pred_before, y_pred_after) + + model.clean() # clear the buffer + shutil.rmtree(save_dir) + + +@pytest.mark.parametrize("y_train", [y_train_reg, y_train_reg_multi]) +def test_regressor_custom_cascade_layer_workflow_in_memory(y_train): + + model = CascadeForestRegressor() + + n_estimators = 4 + estimators = [DecisionTreeRegressor() for _ in range(n_estimators)] + model.set_estimator(estimators) # set custom base estimators + + predictor = DecisionTreeRegressor() + model.set_predictor(predictor) + + model.fit(X_train_reg, y_train) + y_pred_before = model.predict(X_test_reg) + + # Save and Reload + model.save(save_dir) + + model = CascadeForestRegressor() + model.load(save_dir) + + # Predictions after loading + y_pred_after = model.predict(X_test_reg) + + # Make sure the same predictions before and after model serialization + assert_array_equal(y_pred_before, y_pred_after) + + assert ( + model.get_estimator(0, 0, "custom") + is model._get_layer(0).estimators_["0-0-custom"].estimator_ + ) + + model.clean() # clear the buffer + shutil.rmtree(save_dir) + + +@pytest.mark.parametrize("y_train", [y_train_reg, y_train_reg_multi]) +def test_regressor_custom_cascade_layer_workflow_partial_mode(y_train): + + model = CascadeForestRegressor(partial_mode=True) + + n_estimators = 4 + estimators = [DecisionTreeRegressor() for _ in range(n_estimators)] + model.set_estimator(estimators) # set custom base estimators + + predictor = DecisionTreeRegressor() + model.set_predictor(predictor) + + model.fit(X_train_reg, y_train) + y_pred_before = model.predict(X_test_reg) + + # Save and Reload + model.save(save_dir) + + model = CascadeForestRegressor() + model.load(save_dir) + + # Predictions after loading + y_pred_after = model.predict(X_test_reg) # Make sure the same predictions before and after model serialization assert_array_equal(y_pred_before, y_pred_after) From a993f4b3e05999cbe0a33fa23adf0d0d3d0793b9 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Thu, 15 Apr 2021 16:37:38 +0800 Subject: [PATCH 61/94] mnt: update version number --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index bce24ba..ea2efbe 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ MAINTAINER = "Yi-Xuan Xu" MAINTAINER_EMAIL = "xuyx@lamda.nju.edu.cn" URL = "https://github.com/LAMDA-NJU/Deep-Forest" -VERSION = "0.1.4" +VERSION = "0.1.5" def configuration(parent_package="", top_path=None): From 171abcb23ca467603c4969e1d2adcc3fcea0de12 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Fri, 16 Apr 2021 14:23:55 +0800 Subject: [PATCH 62/94] Bump version: v0.1.5 --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index ea2efbe..3df4a10 100644 --- a/setup.py +++ b/setup.py @@ -61,6 +61,7 @@ def configuration(parent_package="", top_path=None): "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", ], python_requires=">=3.6", install_requires=[ From 05cd74c14e6efe951f25b65633e5b4f3dee18176 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Fri, 16 Apr 2021 15:58:06 +0800 Subject: [PATCH 63/94] mnt: support python3.9 (#69) * mnt: support python39 * Update CHANGELOG.rst * Update build-wheels.yml --- .github/workflows/build-and-test.yml | 2 +- .github/workflows/build-wheels.yml | 4 ++-- CHANGELOG.rst | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 1e11b29..c27d57d 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -12,7 +12,7 @@ jobs: strategy: matrix: os: [ubuntu-latest, windows-latest, macos-latest] - python-version: [3.6, 3.7, 3.8] + python-version: [3.6, 3.7, 3.8, 3.9] steps: - uses: actions/checkout@v2 - name: Set up Python diff --git a/.github/workflows/build-wheels.yml b/.github/workflows/build-wheels.yml index 5b720c2..260862d 100644 --- a/.github/workflows/build-wheels.yml +++ b/.github/workflows/build-wheels.yml @@ -30,8 +30,8 @@ jobs: CIBW_ARCHS_LINUX: "x86_64 aarch64" CIBW_ARCHS_WINDOWS: "AMD64" CIBW_ARCHS_MACOS: "x86_64" - CIBW_BUILD: "cp36-manylinux_x86_64 cp37-manylinux_x86_64 cp38-manylinux_x86_64 cp36-win_amd64 cp37-win_amd64 cp38-win_amd64 cp36-macosx_x86_64 cp37-macosx_x86_64 cp38-macosx_x86_64 cp36-manylinux_aarch64 cp37-manylinux_aarch64 cp38-manylinux_aarch64" - + CIBW_BUILD: cp3*-macosx_x86_64 cp3*-win_amd64 cp3*-manylinux_x86_64 cp3*-manylinux_aarch64 + CIBW_SKIP: cp35-* - name: Store artifacts uses: actions/upload-artifact@v2 with: diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 98af933..bf763c7 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| support python 3.9 (`#69 `__) @xuyxu - |Fix| fix inconsistency on array shape for :obj:`CascadeForestRegressor` in customized mode (`#67 `__) @xuyxu - |Fix| fix missing sample indices for parameter ``sample_weight`` in :obj:`KFoldWrapper` (`#48 `__) @xuyxu - |Feature| |API| add support on customized estimators (`#48 `__) @xuyxu From ed6ffdf708615be5e766c88ca8beb6c6c8351e24 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sat, 17 Apr 2021 10:15:22 +0800 Subject: [PATCH 64/94] mnt: add MANIFEST.in for source distribution --- MANIFEST.in | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 MANIFEST.in diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..90b37fa --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,7 @@ +include *.rst +recursive-include docs * +recursive-exclude tests * +include *.md +recursive-include deepforest *.py +recursive-include deepforest *.c *.h *.pyx *.pxd *.pxi +include LICENSE \ No newline at end of file From f4554ccb16352f90d0e5e6c5273db6692c817197 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sat, 17 Apr 2021 10:19:31 +0800 Subject: [PATCH 65/94] docs: update config for all-contributor --- .all-contributorsrc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index 9eb43ce..f7ce66c 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -9,6 +9,9 @@ "imageSize": 100, "commit": false, "commitConvention": "none", + "contributorsPerLine": 7, + "contributorsSortAlphabetically": true, + "skipCi": true "contributors": [ { "login": "xuyxu", @@ -128,7 +131,4 @@ ] } ], - "contributorsPerLine": 7, - "contributorsSortAlphabetically": true, - "skipCi": true } From 9bd5a1be4cf0d33270b84812bf41d880dd6503ca Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sat, 17 Apr 2021 10:31:54 +0800 Subject: [PATCH 66/94] doc: pin RTD link to stable version --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index e423679..a76b59b 100644 --- a/README.rst +++ b/README.rst @@ -87,7 +87,7 @@ Regression Resources --------- -* `Documentation `__ +* `Documentation `__ * Deep Forest: `[Paper] `__ * Keynote at AISTATS 2019: `[Slides] `__ From 882ef65de53616c356234ef642b0700ba8aa85b8 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sat, 17 Apr 2021 10:53:11 +0800 Subject: [PATCH 67/94] mnt: specify language type for Github Linguist --- .gitattributes | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..8f8f553 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,3 @@ +*.pyx linguist-language=Cython +*.pxd linguist-language=Cython +*.rst linguist-language=reStructuredText \ No newline at end of file From 3cfbbb17be868abac8cbcba868db340aba5eba55 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sat, 17 Apr 2021 10:53:54 +0800 Subject: [PATCH 68/94] chore: remove unnecessary keywords --- setup.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/setup.py b/setup.py index 3df4a10..9c9081a 100644 --- a/setup.py +++ b/setup.py @@ -51,13 +51,11 @@ def configuration(parent_package="", top_path=None): classifiers=[ "Intended Audience :: Science/Research", "Intended Audience :: Developers", - "Programming Language :: C", "Programming Language :: Python", "Topic :: Software Development", "Topic :: Scientific/Engineering", "Operating System :: Microsoft :: Windows", "Operating System :: Unix", - "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", From 490b861f396fe748e249ff780d7c82d9a4261a4a Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Mon, 19 Apr 2021 15:52:40 +0800 Subject: [PATCH 69/94] fix(Regressor): handle the case with no internal node in tree (#70) * add code * Update CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/_forest.pyx | 6 ++++++ deepforest/tree/_tree.pyx | 4 ++-- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index bf763c7..af83210 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Fix| fix the breakdown under the corner case where no internal node exists (`#70 `__) @xuyxu - |Feature| support python 3.9 (`#69 `__) @xuyxu - |Fix| fix inconsistency on array shape for :obj:`CascadeForestRegressor` in customized mode (`#67 `__) @xuyxu - |Fix| fix missing sample indices for parameter ``sample_weight`` in :obj:`KFoldWrapper` (`#48 `__) @xuyxu diff --git a/deepforest/_forest.pyx b/deepforest/_forest.pyx index 335f824..a21938b 100644 --- a/deepforest/_forest.pyx +++ b/deepforest/_forest.pyx @@ -77,6 +77,7 @@ cdef void _apply_region(const DTYPE_t [:, :] data, """ cdef: SIZE_t n_samples = data.shape[0] + SIZE_t n_internal_nodes = feature.shape[0] SIZE_t i SIZE_t node_id SIZE_t node_feature @@ -87,6 +88,11 @@ cdef void _apply_region(const DTYPE_t [:, :] data, with nogil: for i in range(n_samples): + # Skip the corner case where the root node is a leaf node + if n_internal_nodes == 0: + out[i] = 0 + continue + node_id = 0 node_feature = feature[node_id] node_threshold = threshold[node_id] diff --git a/deepforest/tree/_tree.pyx b/deepforest/tree/_tree.pyx index 6eed475..f43768a 100644 --- a/deepforest/tree/_tree.pyx +++ b/deepforest/tree/_tree.pyx @@ -252,7 +252,7 @@ cdef class DepthFirstTreeBuilder(TreeBuilder): if depth > max_depth_seen: max_depth_seen = depth - if rc >= 0: + if rc >= 0 and tree.internal_node_count > 0: rc = tree._resize_node_c(tree.internal_node_count) if rc >= 0: @@ -463,7 +463,7 @@ cdef class Tree: cdef int _resize_node_c(self, SIZE_t internal_capacity=SIZE_MAX) nogil except -1: """Resize `self.nodes` to `internal_capacity`. - + Returns -1 in case of failure to allocate memory (and raise MemoryError) or 0 otherwise. """ From 68bb6a3ef0d2e8db98ec05f5c448eb4548bdf61b Mon Sep 17 00:00:00 2001 From: xuyxu Date: Mon, 19 Apr 2021 16:03:34 +0800 Subject: [PATCH 70/94] style: flake8 formatting --- deepforest/tree/_criterion.pyx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/deepforest/tree/_criterion.pyx b/deepforest/tree/_criterion.pyx index 93e9a9c..32bfe7d 100644 --- a/deepforest/tree/_criterion.pyx +++ b/deepforest/tree/_criterion.pyx @@ -190,9 +190,9 @@ cdef class Criterion: self.children_impurity(&impurity_left, &impurity_right) return ((self.weighted_n_node_samples / self.weighted_n_samples) * - (impurity - (self.weighted_n_right / + (impurity - (self.weighted_n_right / self.weighted_n_node_samples * impurity_right) - - (self.weighted_n_left / + - (self.weighted_n_left / self.weighted_n_node_samples * impurity_left))) @@ -719,7 +719,7 @@ cdef class RegressionCriterion(Criterion): self.sum_left = calloc(n_outputs, sizeof(double)) self.sum_right = calloc(n_outputs, sizeof(double)) - if (self.sum_total == NULL or + if (self.sum_total == NULL or self.sum_left == NULL or self.sum_right == NULL): raise MemoryError() @@ -1241,7 +1241,7 @@ cdef class MAE(RegressionCriterion): w = sample_weight[i] impurity_left += fabs(self.y[i, k] - median) * w - p_impurity_left[0] = impurity_left / (self.weighted_n_left * + p_impurity_left[0] = impurity_left / (self.weighted_n_left * self.n_outputs) for k in range(self.n_outputs): @@ -1253,7 +1253,7 @@ cdef class MAE(RegressionCriterion): w = sample_weight[i] impurity_right += fabs(self.y[i, k] - median) * w - p_impurity_right[0] = impurity_right / (self.weighted_n_right * + p_impurity_right[0] = impurity_right / (self.weighted_n_right * self.n_outputs) From 0faa3cb068d646b02f589d5dc4d8ee9cdee9ccca Mon Sep 17 00:00:00 2001 From: xuyxu Date: Mon, 19 Apr 2021 19:54:02 +0800 Subject: [PATCH 71/94] doc: update homepage --- README.rst | 5 ----- docs/index.rst | 8 ++------ 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/README.rst b/README.rst index a76b59b..d3b6589 100644 --- a/README.rst +++ b/README.rst @@ -111,8 +111,3 @@ Reference Pages = {3553-3559}, Title = {{Deep Forest:} Towards an alternative to deep neural networks}, Year = {2017}} - -Acknowledgement ---------------- - -The lead developer and maintainer of DF21 is Mr. `Yi-Xuan Xu `__. Before the release, it has been used internally in the LAMDA Group, Nanjing University, China. diff --git a/docs/index.rst b/docs/index.rst index 40f780b..0d91af5 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -73,7 +73,7 @@ Resources * Deep Forest: `[Paper] `__ * Keynote at AISTATS 2019: `[Slides] `__ -* Source Code: `[Gitee] `__ | `[GitHub] `__ +* Source Code: `[GitHub] `__ | `[Gitee] `__ Reference --------- @@ -111,6 +111,7 @@ Reference :maxdepth: 1 :caption: Advanced Topics + Model Architecture <./advanced_topics/architecture> Use Customized Estimators <./advanced_topics/use_customized_estimator> .. toctree:: @@ -126,8 +127,3 @@ Reference About Us Related Software - -Acknowledgement ---------------- - -The lead developer and maintainer of DF21 is Mr. `Yi-Xuan Xu `__. Before the release, it has been used internally in the LAMDA Group, Nanjing University, China. From 765388febfdef0e740c27ecda52a1285257ca9fb Mon Sep 17 00:00:00 2001 From: xuyxu Date: Mon, 19 Apr 2021 19:54:19 +0800 Subject: [PATCH 72/94] doc: add the page on model architecture --- docs/advanced_topics/architecture.png | Bin 0 -> 214058 bytes docs/advanced_topics/architecture.rst | 53 ++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 docs/advanced_topics/architecture.png create mode 100644 docs/advanced_topics/architecture.rst diff --git a/docs/advanced_topics/architecture.png b/docs/advanced_topics/architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..eb904bf64001580a4709e268a045a8496b680493 GIT binary patch literal 214058 zcmc$`XH-;Kw*^|Z2^A%YG$J{Jpnymc5hO|yP;yYolBA%>NkAn@kt8FEL`f0^$%;r0 zlCvdQ$w7(?Z=JI5H`?#sAMei_k1=|HG*xxZ-fPV@=Ui)_z}qTvr%#?gi9(@HE67W$ zqfo?tDAZw3qGRxz@(t~~@a>R;x||d$ul>SL_{R}*No7eCssMXx_x@4%_lZaHIu0ll zbszHIp}#wu7f`5!GzDqNJ1&OvqsJ2|F#88963U`KvIY)Os+~PVNooD+*;Eh3{gLe}%5?9C(_sTl25QSvv_unnd)k zt++@mj_j#iZHgG=#BH=52*4%$*B23SGsA!V3jTRWiu%a^`Ypo^4!GO@{yIWrO8Vcw zL;0Xy6aV$UeoM*_1&i_DUw`@Y6A^w#VGc9=f;o0qKqx?gQbHqey4eD_kUmZ4YOCjuj0fN`*G>+R0Rr+d_!@E zJ{@zNERBrNvL7kIt$rleEzVpJzK1>XUHXrEW}q@9{a|8lo?w+<)fba^kF!qEh z{;JJT?B34CFf(#FXX}@04s1iOt;%Eg-8tabrE-TKrTlU0VZr=F`n=Vf4>0JqjAXBe zSmbrL|DLFSu2K5_b9bg1v-_3XSG@PPGBE7%DoLVoP76bE4Rr3Uj&pr&9&2qv_<-+! zE+*j}Is7)cY1vI+Q5de4j-Z3fb7@~b_!1PCU%lOI_!mdzXoYk3LH%O&j;*`&^6g{) zUc5#b>La{SlDjWLDGK3Q!fK4Xw!TDhYpIN$bDVxI^zB{DUnlK$@C&mw`NOsqZFFSU z{@gaS+Ofx{Oi8`dCj2kL>nE4qV6Ur}9)6$b{LK}weq&7Sk57{~ijw>;MO_Eq;E(yc zaR>X({a3#m9c*hkj0dupSMv^juzypdq z-IZs1NxMx}@xt~gu1i%LN~N}=trbhvREV((WZHQxh&*Pm%Its1K0 zrf$D}+&62BH{GkO-G(b}gSW8#=Cxz9Hsa6`$DZub<+ecYvHW;_TplEg{Z5PFGk zj!WN0-9}=Jyapjc^&|xTCxu{z;p+x(Gs7cJ3+lQK1trFVy(RBl1=G2{Tmg6dd_m25 z%ZC=lOyAp~B+<0j5(0&xolXV81HIUjB6VcGm6JzVDOho?9Cmd9L@8 zwR}16x|?6KKjf7HANHQqcZ?ZtGH@Q9+KPSMlW&Amioe2%!|8eD>*3ue#d$SzwCiGT z&-P>)Pq!yoVMBj?p)%55dS0Sme8sY>&g+OlCC=|r%^p7CJ&9Jq1DSQcDcpwGQl;@i z{+|&_$cLkxesKGo(q6JJxb8AVSFE**Ei9eSF1FuW?@t*25-b;TBF$}m)~qK}4R2xR zrINB{C3Ox~%6BY?J8yZUFY>SD6YPoKx2sJC3r+iCEzVuke#Oh}{5c@~RphKG`hY_9 z+G}2ou*M{MsnmPlclUM7lT1BT7tg#NRsIsEp(65Sex$5h*Q_amHT_}l+gV}r$-IjN ze~UV1a%!Z9Z2xU&+57f|8&y=8MZVsxg*3$&Cr=#pV9nnGW+CsDJ{VTHWlgLF? z>L}Jm>6E0YO?uznolV*_5i?}ll~`?L?i<}cBQ}4z|8MiY935kcx7E2u19huKNo8=| z3+Ce`kH(}rDt3|p-=-fQr@Krd{wu6(GD5M3FXH~^r-RsYoGRN~>GV;$2@ov9<9GIIJEZ7P$5*zLFA>bxYhAmW6IeX6 z+^Pe)!L{G$u05IO)V@&3rW`+jYh!(arBD(6B&fpVTp7SdW*WA$RD%;#d6~b~exBy4 z_2mY7?@FurNY(3>*S97q1U$DL-A(ee@$8}Ec&fDJx?kVnxu`69bL?3H0X~^&)<&IiahkchIxx}d4FSf&d+m0I&ZUV5kl|Zvduy=m>Cmj)>n=LT{NLe6rom@OWq|Nz2Ix>L2 zkuxK4{JfA!Bk4{>_%RZ?`Sb3*ceMs_&!cc9)z%aTyK5bI3*&`LBac3xo*lTIdacl+ zlgT|$|BW10v*FgcONQ3iX67_Xn|WBa``AZwoo}9%w%#*pUx{nriFqj5D^4$KV z)^JNR-_QmdRP4FqgxR$z9W}!)Pj9#{LwIBvD@I@Z$#q&GPI|w}?S&i59|iB98;+J7 z=L_VlN$#w4T?;tz-~rSyO-X4Q+$+h$?srn9l~O2V;=+JR`rLEfnu;+P_GG;kbs36v zk=$gsn2&J7t;t0vT}#orr3LM{fJ@E~AB7ITw~7&Gim(nAYj7J~;T(T_sY+PbJ_y8L+iE$1jxxV?4E?my5#36BHeH)G-hC!s5rn-0g_%+-D0V=DXnyeT&5PLwAJ>@jzKWtX7J>VPqJka;B4CzXf! zPXfb#8cKUzHZgg=f>Ni~(-E7S2@GtIz^S~+ z8?i7n5h~U$XvlBb#o`{7fEz9-RL&m_m*g%c$|Y7WvDSgTtt?rG5U|?coFIWe8M&*f zw-|h>`kmZ|s?}!hS^lNUwNP;;)%Dq4gNe#pQ)9(qnT?~$i78c!IPZf|x~aZ2MJh}g zl;r_tCQ4J>=Z=jV_!DW9lNZ=m%3Bl~Ka)u0@ctig8YoB2@-i~E(yM;ZJed~bJMj4M z@f_~{hb`?1LLGE3o)IYSxB}aIt6@*i9sGN_58}VR-kjw}#~iy9I1t*qST1BcA`F*b zEOzLYRGVsrJpWG3s zGmSAwhl+LDp1@yyTxJIf-dH8%O9zs_bnYQ7GHXNgFe4>?YrWrSK`orTfOe8)YTZ)ELgZAcu_#SQr>AGS4<{dBJUN z6C(l9F+H7Ncl8;qQ>>`UE#CNA+c_iWQOqp5vzeh#;tkKlML)=H4O*>%E7k*Fy(X_c z@f=;P=#1{h+e!a(owrn-N9>=8*Xl`b)Ih!Y&sP^EL^n$cv3i(-k_@NcHK;GMg_o)~ zQ^cA6iRu(8-#etKHqV5`G>UalQT3@N1c*4j)q)_dxO)(~^$|G}bu-Z^GDTK>M=y2fh{kdd4( zP=%m9iqBzFfJenY9ZLnRIUsz83aiL+B$i8uyh5@hCi43TmAFb#= z%dM$o;55^ogwMR@rJ8bzbN)9G`&_m0E!%dfYQ0a-tUXbLkCmHS(K7^iDWv;W{k}pO zsK&I=TqHeL8t50)Tuo>+n`wsl$!DCb5BB#Aj2Fx2cy}sfBH^O#LB_r{?$6aT$8y9= zzmTllx6wy8vEqsBylRe{Ct;$uH#;N_D#dG+y|myR{)n~T1nhP*LxkAkhtInZLPc`2 zK*1x+TRY|GZIpc+R^7=B+%JjX0&Q<_(=p&oCyKnC_l@CkhR3GfSYZRI&^iRmA z-XT{hQ3S}y4}CGd3KGZ2J(aI!cQx2@xFlEf>e(5moU4aw9DjE8t%XSeJ*{YiAkOX2 zvoV*8d<#*^-<}p@_|R)-LCF1%qxw+zhHPXef1|3zUTSEJw_c$U`E%F>gmz!|isLtJ zg`B&OZN@BrmqemFbK>UzQIn2j5i^zjOi$2{tiFD*>!9)W4ku<8WSku~XmW$4JS;`2 z5nPQM4{0R4D$8@7hiys+nICMF*6b;J=0N=#U|!X?|3W#dtpGJq35qiK88)r<4YV1d zC$J7;POFnmv%);+8FEgIWOOrUpF@N%{_WlDc{#LVFMmMP-<5dw$umz@wU3{<@J?qfmHXRs3u)-BJ-UmA zl>_S^UduB|UBg)4a-~ZEw`hD3Cs9E~1SoO7f-AMwrUrT99&|&J` zAT=L0zu=x3N{`HM+BiA;jNWUxQs6U?kq6jjR%erdsl^jx;j0CjhMgtGAPe=n-`&62 zKj169$TsFduUA*IC+0N&j{feeTMX(PZh*H2xIb$vfsY+YBW9AA$SwWG|0S48tA?H~ZlL(Vmy0M_7`sj$zf%1^YTOvX&?wy*_XUn&`%q zzxhqKB{bX#2;W$KN@IJOSvF+Zf}_exEP=cCFby}ThYS>& z2EV7>Nj}(Z*jE{AN}>h~H!ZOpeJH*N6(ebBbi$@Rr;1TAODbr9II2rv$VmZqykcmWf41H*)orX^6Nc;^&w za&Tv=`f;8cNqfCNCOsopgD$R3#TeD!xkJQW6AZOI0f?`*^I2Dj{kH7<>WAC-1*`fH zYGcY4Q!$Biu!>{(uU{S1PntI-2~Xnu&aUkI`f0r2x3_~kD3P1I^m6HO zL?-Q3cpmI+v;m6sdRx8rE?sqx61(Zav)a|oTl3}aAwSLnIEO$Iu zmRZ9S5u%t`?cpgTQ_DcYv>v6>zZXQwogOMaoB5`-+H+@K4rw9$=j)I6=?2-3aj#^^ zf-Gu+w4*2c&)>lhH`I_D{GHG&9iRM;D`%)ADFu?V_AIRKqB~`%;_hZ^4r2d!dj>iV zKZdE_{4K%`?kxT0voqwf6zSkM z=bbPEmRTf+{BQZxWUuWeqy#B}9~kmCUoA1s)mFqprxq~NHnly;@Sit)Izq2I%VjzEij19|425%t9)8vOaA?O!q5L-wFgluBUdCr@;x0N9O_Z9lw0m*#Y6HJWhCsc-)XI{GJA(AcIk-_4-uC)}+#hKdHp}6cN>+BN(V;^;Rnc?jm#59kMVYf)@Jz z3_Jks(e@BQpdUr_oZ`DsvdqTo0@F1zRMHJ9oTdvJ=mbW~F#6YVw zpwmNE`FGhdQ!(VMw_u1kk8-vA3GHtJyR2rtYtl_|Zq}fp?Og5=gsbZfA@DA(xV7zJK!5d~E_`(+s+8(@<02*H9!~`vy^& zGlkSy#3R}fvt1Q12?=?zp%Sz+hWLEtP74pKNLIa;1`0m!t2lb?h7t-cpON<-27L7n zZ7gSp4Wx=01UR1K-yzL%(lpE`vPlZ;c};c^o8gi+I!cL^*hI<*Tpc^3>5J>0+p*9B z9EyL`Q{(NOtwi=*N&Wo)-96IYg(Q9dO{^@t`K7q~e7`EW1(z_lU|;9f4TMNr!|t(DIuEh)W6G zuRQ{KC@Wx!>rUo9gb-mHnRj>^E;uOF4FKcP!$nFdfPiMIW)H8Bkez5)lE-@@?MDr; z<<<+N2+h`F%kKHKf3Mhga54_DKS9|rg_ji4&fHDA^#aI!<(kS)MSVHbGWb(=K~$IM z_}HS5CbHTa*}*p$$g)kr-|IAOP7R!28@So}N)?mou`)%uE9%y~KPGM=O#lIp%||lF^18t7Q1%uz9(_#^%er&~xoDLS$w$VrmrlR$(wKO5z8Vp{Im|)hzjsigJolbO9(rR%*0n0A0chg0`Yn$Kw^75a} zARaHYD(Gw8@aUCH>fPvPYWeqtfAj|2`Ur5DuTmQ>&u9V&hHvy;F-B_!TwVDI`B2QJ zB~nAjgD9Cead)T3Y@AI=GJSalRuYl&Lu=*VeNW|}pRLVpnSOmGiGL%@Wldd|uiyd~ zEl=VVB?%^wg^h$2)*bAhxZ`v#AFqKgIvt_NtJWa@^fZl`gf7cqXn(z8==E(2jUqGU zxpsGGKSYi=b}QrGOq~~g^h%Vs<^rZi4bMDzOOsj}|2omgL3Z&Bb$dQ8eB`+rjr(d7 zyPh*wR0V&t0pnE7=b9SJZnm92s@!*WK=|+ACIrQLOejGM4$TFrO>%>n9D7V2Q{4S%=_=nys@hWQ7Eyy>?g9j{Mmp4lNhe_+cELWsm~)3 zBLdGngS4KP{@G$fQ_{gRmC#~*gAf4$#M+?e?Em$%ySKr8;iiK3pTqg zkh>lIoT&J$I1g#CLR211Fe%Q|Wd3cw7v6zIsnn6X;H$+omIVz!FTSlkFZ!>ho zT{@K^Msm`Y!H_lP5$q&1j~2G8Q^A4!lLD~Nn1)^9zxNS^m}i)-&}f8%@EVkBr(s<< z8YgoAF-@c*@rlqAe;pcg!J(DUtRv4LId)P*b1y^XRH?LKlzhV(q z9x@VV0@fftQy(rpb?A5erl&+pLaWh4LOl<#v!dR@)I0;}PQOo#stSI%xjdHF@IwBO zKE)*XReQ!`{ebf9iMW`?{b?AJIfpU&gYpDQQnR#O)c^1`(S5OOwvIM`1SGXRr~g?K zbaa1R2Gsm`aNnIFYA@G<$lFV_6)8&XTo#N>lPSTx>M=V2k_%+EAf@icxWd=~G$2pzRQ{*0 zazZOCIs9bgGQ2SwAWm|#yg&ENxfW_{RNSgtfS-N zBH8aKlJlzYx|lKx)3B9^hU}KQ3gWIpQ$>tU9O=0{1_|a)xU3@yQHM@2#!=sJvDr{m z^MKG$V|Fe-CFC%zv|ezMrmV$Z7NTXgx8+Wjx{LvDw8=wi7P6W*->TdbB3KpQP9&AB z)YG)p9PHV-E1-zzX7H4ZEDnF6BYsO1}olofYXM6|SJIf?oat z(DNi|({U8jpM|uFJhp#k%<`}B{?R+plJt^IVH-OnZ^JAXTFe#$EVMfvYCBnY1v!~i zml!lZ-Yt&f$DYyaI-4P#DT;7FlNwFo$H(&d1FT7?qDFp7y64m)OuvX;1RK?FN~29< z6!!ZznEBlC)(+Eg=5JesnI|~>QSyRo(``k49pv~7et{v}MJXaif6|CVi(xC4Dvj8Q zVM2H?I7LbFHzH2|V%3J>`p1Mxu$O|LqjHASN(CvQZTm)8@=%V=1EWM$r@~_wai8$MJpolE?D!2(1aIeDA4eY9( zWJY9wp-ow6)MTjC&Q8>6-U3_&69A*UP-PS9$={6~u>Tb5mN=yQnAKHY1a=Q~5O*L3 z_NAk!K=v;rWg2QgL3%M~;bxFK3Y(O}4^D34zrMaHQhge^ryFEy$C@sq-ysn>D@nEG zCM>~jo@Pv)ika)1EkwwO+$2|Hhy3J`OQ@z5FpuJqktmpoxw%(KAtis2U`sFXmoXiH z6K?{pbxZ(-#p_6OdApU1emO=phMqamJbFXSL9#)!h?%O7^&YtYERON!)64@y)0$cl z!*Mxkld;+-vxU%#ekCpy)5sryt6{zbz2v%QvYqt*$9Hljy+F zLqXUgDR~q^WF5faNdQ@MZMMKB)DD0Gg?a@t_OYSIip=_rma>_bR@h~4!Y;LoEnUM5 z+E3IEGNUPBtE_yfPzMSpq}eW7CfiF9ao#$A)!G6fyk47Q$Fna;@v%766%jKX7@7>@ zwzSb-s$F8i>YOWJRTW>460hX}>2;1rcYwJvSEoqUKoc)liHOZ~aA9WAKzJaPr_2GH z(;)@&k}rj;i=j4^7rpclTo;!{q73eXC6>UKp@8R347a0+8cGQ1^9Z?x!otFy!`x#O z3A-;7DU12#tK8Oed_!WOOBD}>OjZl<=x(|f*$nyOgA|phy5y)E=Y?$NxAkFnDw&WZZCO4YWbUEylzvikARhTwXL@s`{lqS7&vxdTW}d z$bG|hr)D+eyx=X*c_`6?%uEW0ZswiL|tG&y*GOm41`q*#FpgBc1g&HS0k>eg9i<=LOvWEen<3Ucn!|b5{Iid!cd6* zlpm?c-%L-i%ZE9}bc!v)#k&rgbR;kO$(!(}t(kl7Z_T6wLeIL02v6Pl+P{bp(?G_^ z=m%Bk+zUi83bzhl_vrU1^JZrB|4`^Oqi#jZI2WX?`tk*@!H@?G*_c7!>RDcTy+YA1 ziX{Nfn{GBU%}HyM_9V!ZY&)xSw_dqp7PvDzuV)+}y@zpah+644zNI2Oc0}vYePC_V zIYsU24T_b}OEgXesDw zni+r$0dIcjX|Slyv2$g02fb+Cob)Vpmu_ExRXrt~{{*Tovj2&4apRFUIa*?IN3+e| z4y*f<@|(5`Td9x7nfI&rGwYhr*Z{}tD|tKO$+tV(gPxJWXP{CAM5=kHG{5#?+Y`0d zkA&J$NTtYz+HJUO2s*4|lpkELHh^(K0js{80CoAzlD`{D3CR}+hRH|FkFTFFms4Tn zVY+%}u_%ZJ5#@)E5x=WXf{tEZ!D|>7xcmT(#YGnR>K+Tf;67i_x5G)%fb{L>f>>Z= ziO^kZ*Z5~=_T5358zdW;Pu?q4lKNX@?C|^3jnZ;h(Vujjk~37{u@c)QtGSrvM8_@##6~cKg4wqn7$NSX zMA|Z%XG+V}ke+Itk!y8=kwsUE>Uvk-rLk~_RkW8kRFOUzZ0cDl$=^MdW`@TxxVmI* zrbs`+{M`fI8p!@KAkmX5Jn^-aPBl>Xm$fcZ+@3l&sHDy0U&ma?8&0t_RL*uyXylQf zPKk9Krb{U=^<4Q|9*S7s4uGUx_W-A7A=*qw&6Hslp=YHtMVTm6^Vk;9N&Shc@m=Rj zN9P3Eg>TZ%QI@sH10EX~Z4?oMhPObvv>$evoF5?W;~od<_7(KeS)^vqCEQLuB9--$ zlssQP>tc<%JOYLOCn$2%1By&K_Rsn_M&d>XCu>a((ELtAhEcgg9`k!SjNW3Dp{o_qr?UPe&&_~Y69^!w@DZZ(8jBM3TD(L%= zaYF>mHV&BxB9FIw*b^}IC<%14}%Gsl!cQ$%a zxA@0xNg>A=QeGE=aA=f{lKe|W<>IL4#^|hM*seMZE6yVotTTTZXOT>sJenQoy3x}S6H0qUDyqhrY1N6H)) zI_n)5h6EqvR8kG?KTb@{1F~9YjEqzx-6MQ}Z}7~L1%T}|qFAcaNXu@Hfk|!QwYRPX z1eznhg||Y<0O!vXS^VCHUWzEt+{D+xi*=_xNlcH`JwRKVt7gCu`G#6kYS4SG2ss@X}d|p=p}Q~Vn~$p z?BMk%g@D2;7H0$E8ZN;d4U{j((9uJwEE&1nCm8GU<+qe>!QUyMXR|Wmk*?SpSt8Z7 z6jDONp%I#gT_G1*!w%Deq$m)Slw+H1ysPWJv1r4l`qElCk&9S@&79I{JW@$1sv&x6 za+av4205-0uqn_tUMmSs&KKbEkNiNganPC4g z6SkqdSyQ|Y6l_YDMMOmU45+5w_P|s)Tt7otA{GBB5@&N}e->`^r#IR8Rw;Q2EZ#fA zIK-~cJ;n8u*F~CGs#$X!3V_IM zFkC=l&>1ZSy;=EGI&rtmVU7q5Rk0^x7s;g}=r~hd4PNLj_9Z1H*}xAc8PNr7Wej9b zNKuk#IO6ekPMGua>CI&W%&0sOV_@rh<$TS4jR@pqP5F1^uiy(whrj%xk@4y>(iqMY zW0?^WRu#8UCSq)oFXqk^hyHpWrHy&V%iMEP!uO$vy33%|-Cv^2@&cQgW7|zCCC|_F zsp| z>Up}T<9gAekMh441r~-QgiveFH2K~Y=snyAugkgMC2flFyo-!MfV?d~$%v$nKQ#)q z{KVQ0oP71stDG7o#Lh<;{ZsWP&oB;&Dxv`@b)STNrIku0Wjg8(Td5v5(j>2!p-^bJ zkeYW2YO+qar}Us!PoEAl1i1zMT;`LE$gs*u#*tit6bx%e_6fP9OagbXlV7wPz9p`W ztWHQ&PwM9D+|t?peD6$Be)IMVMR05(fIHK^mKcSq3qC8!b@y$mZDiwJg9atKJlhsn zs>1Gx(f%Hc795ahg9-y52NN~d9UEj$;!)}Vj?a5Ax(F+tyCFrS;Pf=C6!WVMIR!y+3SUYAOWb! zkq}YPUddUfJ?f6ix)r)(F;^(ik(ZXY1Ip|!x?z;3cRfb`2#m0LnN2o^n}Oyr1I20v zyvD3Q&*0f&pR5Rh*@W5t&k7RdXQwL+N$kCI^NB}3kIKO9b@{$1__(d^OI79ul03Wq zIzJwkBwhxsDidx*HW})!`*xTYw*R5!qo$*InAnlQOo*C3TB@5zcY3q69AqA%IDhtN zsA6&(xbEgP%7hXzK5kU#RdLMy%pz#jcLgTxtEr)}9tXVob;bLI!J-DPndZ@sL zgZ)G+=wXq+k9?G=c&N)E{>@Wa{4&I1RQ=DfzD{qqNeiKmm1+x{K6bY=}?0R|7rQ?0X$Yq)}O~&{NHM7%JzZ zjQ&S>a)(D2bz>CZ6RGS=OOBp8r|n$x?Y)(jv`EYXAK?N;k;8>I4ox|0kuLaLfz*50 z{#HZLAh{P#Dv92|3T>n{m<5;7Z;KxOc^Kv68`g9cSh7Z`h!g=#8O)j!?Po<7!08l# z8P0TSIO{_VbA^Z#4x!Eip~orNr!Z6=Ojx#9=6$q(xk|;@QyL|N&{h~}v?C`|hV(=v z$q7bDWk|bbFA318kTeOc@v1W-L{T5kl&kdspmgI%m?PTMV`lqS;k{lZ%$_W) zP{7%tVobthVGFO=o%YkCv?c%$eb?t8IaPiy7O4wx-V|`x@(xoW#~Gaup#qt-Fphax zRwY9w|0CjP8C#n@HcL2yS2rqo)$W@$u-ea~l5k-e+QAGFhv=)bZIfZCZlhI)HS;hD z`d_mgX4;X1oB^VT!uL2)fsP&G8z1_#%wHNP8kCa3Un07{znte9Ob;HsuJF64SJG;Y zs~!b-_!27o+Xo~P1%bklMzZkgC?KqSCDKcpGs#PoJsGK^C?TX}u%qR$n$??|9nhEQ z`nK%#7RKyL^wC)tsojvMq2l#~RHZonK9+Q7m&PN>4S4w(i*(8ug7!4#dWkqp1)Mw3=A|3KdIIGb&Dw#+@TC=k#wpc>M@Y=+{0ftq}`|j%G zJE`~E_)k)mG{|$w1lhE&>v6zAGC??f!v~{ULI{JI>(3VgBSw%)j8G|3k2W)K+QON~ z*+ZwAVD4&O4do*pMzKD${h>)vEA#kEig0CI4*!~_l90eT6@JC5t%AeV2HBy^S#dK) zKQs3eW)CFoFxMEpWb+zx_uPr6>R_(;imN4P3nfGN1g?ChfJrzW>g>5NI~Sih%oa$r zqw(p?js{C_%Bal>oDjT>fZb29Kaez!gZWRSqQSveW@o)4VUG&)?DH~M)GfOEtOALK zjX#|kHfDLj^yTUT&X@xtLonH6H876P_^A%GZ(aw}KKaMdB)t7n^ywsL zAPPmEjo6H$I<^wIv@UpNy^UNx&u5jU@$WY)M8;AldS{rmaOD$=(bE(5H(T53uV!Lz zsxwQ9>#(Igu2<$Q$}7gW<#sDotj>5W?V&1FJYUArpoq zBFK~0^>wU7(fO)`8&7x%ns_?=*<(l0RFfkV!-g8J$GHP_z}LMJXdZX1la@wpW| zg%U>@$eiW}#&lJiMlTd&;*IXRU#zOd|E-YLNW1&q8#_jrkb_ChoM2j;(ehs8@Hb@I zr3*O14BL_OreSAUkDF8lF<`H6XNsRhg>e8GofX~s!6lb&xA>>jUdW9kIvRnyFoIE}`uO3ZIpy?j)AVmd41B|maD0GSnS zT^{=kH^>XwgejFlEhmhmHI}`62-SO5l;<1VrTAyK%l8&F{mXFLy1fz(7a|915!x^f zX7=<0!W5m)M=*JPO{5IK*X=oc7C9m!cL+tE1qSu3TbY;NTF>G3m5>qF$p~4hED4Fe zkiRAFcv?q{JSq%H)iwFc2VZfz0~y6HEGi;VHXPU7=D5iDv1RgJ88Z`#VPKWpmk34$ zqC_To79w1p>}CBrjYMh0`a|~qrFFo3A*2K53MOK%!T@x#1S!~6uAHLcy#iEi`e6@> zT(*x78n%yeGbZz7heUZ^!fkmHZtKoleYL&Orx{>t_Z|KIL4#mlG8n+rgCuC;GfMK3 z(r08LLR;qY7$OjPK=&wA>s(%v>1ewC&fw7|n_N3h`eYWN#EZ;X(b{mp0Hj(xoTlJ& zJ@JH%!te8evi-(Q6j5GIPEG*E%|%Lvl<|w2O-P9{dHy?z7vJuC2c_yxs>rqDvh_W6 zOE6;48ZksBcEG;So7xs}n0_Sq5OsoeE*6x3d7lPai`gdWT>d-pXxJH8xd&jo!XD~U zWrhd$J}O{F3dfDF&@JaP)Kb3&&2mm()QyP7yS~MkqPmVqQzW%8v%Q+$c>*x<5#;s- z&Po~P?i2sdMkGz|Ry9wu&h)c%T;IuoOn(C~ND>)yo1vXao&)9ceML7z5!T8S+nnSP z`-2|<<0m~nL;fxFzb-fp+@A)fe-&CN#jbGJhQJXK>Xj#Znqka@)?v^zrmRl`d?uL_ z02RF{FC=fp@Gfk}OP91DgG~nW9Cwc0cLyd8y+-NQ4+@Ni>|fJ}n6%PSC>F!wCS`I7 z)#y>&qT#yv)2`*NQ*AyZI6;~kLsyHqbt4{uo3eO;exaQNPR*Qw*9HB8Rczx03J?7w zO$ncb%!ex9Lwi+8JvnIxM?dtN&K+gi5iInc*xg_`ggT;F?8#a-pddUbJ5XYiU^RkR zv84**2t-IA5YaM}q-apnk2J8K(YZA`f}>iwIUY=qL=#wvt8n6R0*SO+eSCm@nB0ui z#`N6oak7$zE(vQayk6W!ugoids9Y62V&4!4!&bUfV&m=o$j$mN7}qNYg>gT&JkH31 zM$@9gT+pWV7S2b*8=JTmd>qsS96HTsJ5Hr(#IUy^_}B83e=j(2+V|;t?g|>EPjfSZ zMkN|xO!HWHn8v1dUp`cGABlYxNo3G&zN{~*WN#J0$X9d!H0co(Dlw-`Ll{c>|w{;ix z>!JVxgYPXGN+Ahdy5YztJmLU3`&c$Wm{(y~hB-hVi^vp0-28=NiBC-J?`s=dnKRL8 z(G<#(P+k3@fw+8wD|>EhW;H^swSq-GOV7-wN}iZ~U*2d9&UY^Hl1+HFcolnn>s6E6 z`^YVed4?jlod=k*T_!Duok#OG=8oX}77k1e$QI*r&ZOhsIk%3lJM@^Qm`*HGnhpHe z$erOlj!*v4)9$#io)ULWObka6(7w4I&zi82Q$n{>7W1;ia4oXqeQEPeJ$&BvD{OS^ zEt4ygdkb9)BHekP5BGJ#zMgJ+)!Yc8bv9excF6ktiN5P0tW`jzYH0Q;)8ikL=`czF=WfcB_8T zu10ejR(`N5+c;K}7V4b-%@w2HQexwrEUHP~fqRmue|`1`Gjnvs^pchiF4DXIz}nTg zZiA)ag5H;P;U~}goA}EjauYAF>0WAoDW1@zxGYU}1l1*XKZrq!t)~*k1@cUHIsD#9 zq&?fqYJ4a0W&aMx%1CmAFuf5PT3`vl`}Isob*P%l9c%qh5F|E`J5B0I7~Q!47ZVEC zST57Y2Q#Jb#M7TG6g0jQug7b11PWL4G?bVoP}96ug}+di)Cd$H#uue7_#d{tL&AV4VPLOV5erRqAw0#!4cdZW8v-( zO{UW;p$k1rQ_h7s{qCbDcSR>zg7GiUvaMX#Ik$uh)>lxaxpcsrXq*(DonzUuzgKV7 zyRx!x&=@E*<07I^jlrFF7@VT2sHO_?FmkW-WrKrTc_!SE*OG8Cgyu`fOK~`NR0#de zAO=(zTgr=_)rwaNOf3xasxnXaIGqQQn*p6%1N--*q}@H@TsRuykjVk0JPKHRWf1V% zbwP~qBKrwhE2AKoXqI3tC$1%gw*#!}2oy=G8mwJQQob1IJQz4w+^(L4LrCc$fZ>bi zVl#unq+ndAf*LAJpWptjo&-G%Am!z6*%!{2^a)jXg-Ykuv1(0qMRQtA=O@1257B03 z=S**<&A*OMoYGjU@{KyDqvBrqjNUFgwBr&wdu6#{Wld0V(!?lt$UL*fO(qYO?kPUh zR7hnwm&!|_XlvnRpr7~Z9Moa2UBgK(>!BCjENJ)b<%0HWTl(&pGF!W<`SWuPOSOQE z+3t1@p5oVNobw-vYu+En6y`vpLl8kzUI=-9fmO&PxYrrPzC?wp6uG-lNk*{1iB9oG2KK(G zBVs59*W$vLi5ymN26b-EEAir5`Ksm{ZkUOrVtd2pnYmBgWE6U>HqqlD#^FIPMT%kW z{I;352ECpwCjYra)=*$yZoOV<+2``3<-5+C8|FNFBkS7emX&3`qfKAc4l~iUbo5{B zRN+5+)sVfVUiF}Ejpgegri0)4n?tsxCvIVS_t;F9F^e7E)4+4ym*pjU%kFlSzWrTe zjP)~fdd&0Fs-q6=PCU%)f?76uXZc{%@`+QSk_wS%&2(#YRki{4Zmcx7AM%t2k3t3YAsTlAzG(^kOB^*VU4}HymmCm z7gi3|^SudAL_(%axcp!aQX++T6}tEdaMmOcLKVkC2vx%Yx?tjA<89%RhFytj9@8-P zmoByOZy1M-<>)<&trAUscBSFtDF__l`zzvzBha_iB;l0@n-39#CSidpL)%+079tR1 z=$17%2dT-XrOpLUAeozyU$k@1FIT$HZ}X`xprQIl;jwh{Qd^T&FTOd^ zIGK$Uhxb{9M?OR|@$OE3S)TtyhMnoRif#;y;y1mO-O{+U7A>Y1yI5r;*irSXILkTD zt2b-e0-$P7bRwtQz<5f!xx&w_tls;IQ<&RPF_Eo1J=qyw)=c7zYGMeGovXHXr=veV zDKkRf%k)ivz1Kc(Q_)nJvTB~+LKw~ z0KIWTs33Hnz*lErgn6WJjKHvQ8RWyN`DWSa+g|}rTpLP1w=mdMtw$qA%?u`3;8?~4 z(^bOue0)tDaj#P)MN#1hz{zu(iu@85_9p1ISNKOsUY*xn+@^l*_!FN9JHmAQ2hG8{ zx`R!3cH89O;B4$|lX=$Xyz0r_Gi%qM|1DcPwDzm0+aiiM}&1;)3BVhprs{h z!K!8TPrJ6J%;9+#-uQ6Q>wR^EwWcbI(Eec(A)qlG68tw!kVk#-3VM~-BQd!qcvmHw z)~jwTeJQLoXG4l9Ur$B!nS`twc`orPOta{+H>DEu0|W!-rRmpJ01td#kc*#81{@dq z6)xWZIGqi6b81H;T*7ZJ%XiOkAeqafIGM)qm@7P20WkuBMapMNM@tUSwVWC@hCf>< zK>m!5TOS%Mh{T+RY56P2X&*YGtZ|b=XZ z@mXH^41w!{io7;C$sfy>K2_+$@GLH)Bt%+F$7w=Y=tF%|?}tRK#8Vx!3&Fj^9TI!H z6SJGz6ae8zl_k9{}>{jWk7l5ZU zA+x<)UcwCo@szIzFA>-*`8d&o2pYKx~ccb=Z5WY-zdp?EWI6QVKXkxoFBTf zwRH!r%vx>T`#iUg9P+sr=tP`-8F+FgB23FbT<>v$%%}Nfl`{4Slf4+kgwWXOcYj9b`+c70`~3Rnyk6&3GoN{X-uHIh*L~f}8Ns{0g|ret zfdt6TL0vPv{g1UJ=;hXSN1Y|W}V5>Gdl>EEc}rp;9?<+}25)P6d<+oJv6O(+TG zmT%(awOHAsnO3FReOxRm=lHKY{xsn!8#vQiK9$P+e3GDP&HjXyNot7tl&R?U(%}@< z3}Ke|4mw`|uS(BY7qL~&pr8gn4R>z)N4Zl97P2E=^upR)dV4WsBd!#8P)+;KO{ zX@Z0{P0D9e4F?SK&8q5J`wEPQWwLU20z`(WX`}_8KJirJ3p#Ror;$iw6#Knf{dqeR z>7jL2^NN&ivZM|xfAzpo_I%y}EZR!(*~=LGncek+PptXdCw5$~^%k^a=hkHxZnKCl zq!3H*K7pQj#o$XP-{wnA!OV~*?0UU31as89(Z&H-P!-=r=|n6}LL(TeV*M8fC=P^K z|NC`;bP4->M=)}A1sc5?ReLV#$gnN>Ad>Ca$AOK5l){K&m}NT(DQQblWTKUZWt?)r zv}f){c82O9ie_CSqlCH=WZt|n6S$yhm)v(B>zR!VSy<-1jLD1#1{VZu9B}e1e;rj(*W-?RlFGPVTJ&+bJH+^_tKR(fIAw=A(UXovyb}iR)+;@wcl5 z$Y_Jg3$B0uEql|`$hB4p#a|+#gXfH2%kO?wKXbb(uzujwt0iLmK&1Fg5;4VU(oYT! zL|G)v8L&xckhD}5@l=Rj@yn*#Pci$Zn=3-&I!pLY!3epWQHBV)oxKwPEt*)77^G}; z2e53|_8=U@66!`f2Tao#;A0r3?~<>8J}a7@y|ww`e8^wnk&sNEvg^fa-*zv_f9Luu z4SsvhW~-s4qPqKsWU2n8d#zE6c=JQ!m6-+@pY*2~oJ*Uo?9O#h>6>;I>(s9l!p5$* z9G`+*pmgJ18Hkc~?)nBcUlJ+H5XK61+?u3G-C}DHngBcS$q?3RZhGHd>I?!*{OGBz zVi>1}2sYH!j0Z#dzDmV- zl7)nt6P0%dm(}py)aZfU?tU~)3SGCt3x!6)=gDYs1evN``)go2t7QQM1IGp|R=juY z)G8R67;3o^($*dCEN#p!f34jqF{cxm@l=@@$}?7%3sRoIJFV7D1>% z^Ilvqv+6A}mGpn|@a|X7L#oset*K53zI7t5_#9VOC6`@gNG*FCJb~7~dIF>rk`S`8|D#L~MvE<*2XR37($W?Hr|~ z?-QHSSzy=3vBg^hN$-w&XRZsu;Oy*RexYY*d+p=2h&xe<1n+WTxmO9=;?T#;8Y{8T zulsppV7>$Wz2?>?b);mM%qXd*thpO6hu~o~K&s%iNlP%%P4L}$oc9a&YtA`{2W*~! zYOrx;0Rwf{Xy}Di|FDK zC>m*eOqHjc_APQVzU!7LP9bgjkpn zo*^OT5&M-Kd-xPu`(58H(r`gRWm;q4?nsfC<5psQ7X8 zy2;I5H!Vp|X^0ad{>J9~n6yb6|K*p@Bf6|#>jPXs3BBc3kP28o!f`DsWF{E1vmgViG@F|7^aIpvFI zTVAF|bH+>#yI<4WGks`MURsff5xqiBM@OMF_d_NrH9j64jl+JEh#-7Oy?tk=NH0op z=@KyW5B~^VLXU$h{JjdC2ek zOgG$rnl0Cuor2HLe)~Uw%>b=-j+SnDC7+P`cGuj_U%0CI87&t<6yMbngJ`;JDv%GO zAY4M@OzQ!g`KJ`d%9J5}pK9D}Jp+r6uf<-Y+;AQ+!FSU{pXpfKUY4})jN&apR|i*j z(XtG7TMVsdU!D|ec)pvpxW<~D>5TKMG-#d-FJEZSoo+bXbuG~?<0)qNiy|am_Fq#> zk6C+vAgabREPPDzXaC6moE0EBNjd*J&66m?+#UDILfdG?52r5rsE%JAUjaa3m-It^ z?sISMQOU`?DXSx*aZt}y>#X0JybPJuEq-wx=JQ3a7Xn|kNm;e`K3O3nr;EpV;2@{h z7J22C7bhz|=^7?_=DjcO1my00_xP2%IOT9Wk3sR>2YoY!*>U9L3w0N)2g+K+dcL-6 zAh)SadZ5%nF;jvn#7;|%B^+@gZxdq>z$?k4h(ek52#K$gf%^U$TCZlvVi|)}KGj7Bplgx$}fMmzU7YFl+SuM*5z$10b zme>8Q_Shq1|HeNv08~!%8xowWA=kJBO=z$7FG1J3UFKS)uP&j#k#OP{*8M}ujFsQ! zN9q!Ca+Y@vPL~f*N}oR5mbhc@gMrEQ9e=mH$-}ID=yVnt$B?=J0p!Ypw9{1GTt7*O zqdf48u>OIVw>hiz1V+BhEdXHtj2lTw}{Q=fZCh#k5!0&g?st2B%GIDLG;$$nA+_&wQHpoo-6ae zB7B5fi;Hg?wS0)ZxYZc+&22lNGt0s)J}CbATcj@X{Z_Wszm$bTwN2{Ebs>NdF;L8? zD9P#ZHeE%Fs77|W@FU)6Z|9rxI$S|wUlAX=nGrtogPZOm(%}|mXKM;#2GzD>R^C{Q zXNea-UrvU@mJAS8CfBGa_A$(>@CC6qX~LBwyR#d?ZL;5W-;4coHb~b3@#UY{R-k9S zgpzCL;F6UcCiyT(iy|uNF;if>$TLy4 z?bn&rVReD@tBQJr|N65jz^sz(MRpf9>i~C_{>!fieZ*q&Fhgy5;_ByvsB7eg*VyIg zskO!TEYi1#78m5)-F3{nSdarSqC5tznv#~dJL-{r>SU0*f=h!d%9DB!0C%;o1c(>#f=L8!$oF@fGdb6ndjNUMa@+Sk$7#e{vroXgk^K11MD>CZnnK{ znPbbM>fmh`i;M5aqzFK}b2Gx|KnzMG5>q4*L4N;l-ytq3NPTOATmnk8b1+o3d}cd* z6mjbtEQ)O$?q{ycRtq31HPEz%KNwJTeW*mkKBS|FeFkR$7fZ|1b;kSUg+Vk%4+sV} zh}9!?3`s7nz-WhO zwb{qlU>G&i*PNKTB7cjg`eY)WoBwfI^w-B}=f00^8G6j!%6}fs8#5etvEiqRaO|COSnJiP5Kg5dytMHx`B)TEP>GBBwwG@B3mUHEx!^myE(08zsbx9pkw&x z$keAIMw^XvLM)>NfL^b3+*{*I*n%30Npk+HvyORPnO}TWgqk|c#YYjx-|&%SN~Ez^ zZg>#TI}SzJb=K1NrT8-$yr-vAst?`0*xk4u=ed7^RBbH|?)^on|Bi8cQ6^_oJtVj_ zjqD=sBvI*&zWL(B;~=&e&;}xr>#u>aTZ$r_^%??L z2@vUqqsuIruL53}zI4nRY`9q|(-pmL2#yYW&DQiNw;N<@pYecnpyHuqyCF&7d@UZ7Y>#d+^hTEvSWO)30MQwa!7a}vo+itQY4iCuo>QEJOm#s_X#AEF55 zmX$TP%+!h%zsB3&25hvqfP!y7dwr4o!uLK0puL_+kgf1xm5mR)gBTV?EIjkRDFWfS z>^cKN{n76RNtnL+&}JEGXyI_qVvl9gc?3q0-k+oD!)d_$rA|3&d+sA$|ad`0c-5yo=MZjDbsKa zwf;m#x zz)-w9*3<9`h0<=$kF4VQSLNm3k8M1oRnhRXn;H<)F3``l$h2*(Dd6r$ONc}YfR32( zOm_?pALC<{i-I3aFI-@D;8>h86?G82XOVyTsq*kscTuV=sz@G6^58ir4FM4TC^hI& zM#exr(@005Vd>7xHdANLr*Yhli;4XB2ry5S6h_!pwf7p+0k93nuX=iURmty<{9bVW7D@%L^Z*jC+2-YeO-nvthV7T~C(HrCKV~(ZWy!V*G=( zFaDs&xd1F+;Otd5ot8yDMA6AhSRC{ZyaVqSp7E$it`A75(1 z1d$~3!PE!gB0XGM(y8?+Fq;*HlWqG3-@?nIYK`N8WPs7|bya;WqxJR`p@vtPQ1Aj* zo2P&GKHU9qJIyqr^glnAK2{K=#?tI$g?eF`vu8Ms0$~E#S2ZRRGT@U5j>|(b4KKYt zv;XI_31@G0WeN#8M;@&Mk{$4?hY|AMr zqaEoX&;O4Bsz~sH$_SP8(b?@m2Em8q3yJ8X&;g0dt9MGZKJCsSikvENf}ZkLaj0v* zKRiJ>`|53x@9F6n@=bW~^n~&O@>4d~W?(@#4deVjf3ZQ-C~}+M+KBajCu7wr$888oTL<@j!bT~#E1<~{^ zkxtkBkRsFL?=23zs4-noE+`FTjnuw?@+3bo!;GH}sF0G~A-v#NOE4T}j`+Qs`{hs@ z$1i?8%1C#eoOk+Z~7=C=e3+e(jCQe|rHCg`qR1w>TmEPt2n~$^v~%#+feG zg^xOGzZt|FANi=;@e|O6Mh*$SkTdrve4XAz7uNb%c;Fcdswgns+LtK3z02?)&Y=?K z=IfrPpXuJ~im*wP!1?FM7&B9b{0Zd17XLsU^24FtOGLmD33quh6o&u(hr?vBWp*!$ zaVG!A{nRg`<$~==(|%SA(hKASMd=#)I*Tfw%B||JaX4yk3Z*+Zg0V~b7`zF8)opOo z$Th8`W;3|Rzr%P%QDRW-F!jI7bktS_EY6bE$Y(!!^jI3c=1>;%;)NEPLb+mO}Z zERYo|6cjW<5HibGzI(p6sO9B{G`WE$DClb3p6ASQNA6GpX|hDGpsSFIYw6iZYKk8Z z;?^oWYkAT|t^VsP2JPIpL>2GZieHFy8t~g611$3O@^C>yD3!4(I3_4miTL=hf@CjK zU(I`cju0iC6f#>jTjYu*b!L|>Mlzojm;^!wch`7jaShJr4$rEE_uP*BYI z|ExgMV38R~81^tfbcl{k&PTxgPJAd8MRVc>Y9l(#rnD)NNAegjghRaEp_*BV%h^i* zg{V*9MY14~dlPf({}Y&CBN(TLU0Sz6>}_xAy@EtdrJciF!raR_Rmv%EVT4lNot}iq z^TO!-Ib=JN=Q7`Q`j|h888kU5^n9$a!D#oQOuL<{hy_qnBTi+-O~E)a)BB+&pP;`( zsWCYia5FyQ2qiz{ghe+pRj6^nw3>XSRRCQ+X60dTr$9CWii0sPB_g!-UI40IcR4i* zS^tIbF+D#j(Y@4^R%NiF%VOG3agf`%*{#HN%JtDWW8UvK0l_f&JTag#7T`{QRX-?+ zQTXM<6gkhdMgF^!QikO?;uNK}2i&*G?>)q1!-_H{-J}#{mW%=AzW(n+mF+2K_^IPB z6PoFA=_w-8@s`RAZc=ymjOHWq>N++8c-ghDHBj}EaNq|or}iHgqkKc>28 zwEyl-s1Z+^zSR*vNuIF`SzoFIck=K-avZW&rpT!vZ zN8JJWy-Gy^LSY)mRArx^%owV4GG}bh@S77{d2@(83Fyq{7kpuJRc`%9xjqpPDg z;wzj7dy6b;d+Q9;WUkO~flhqcW(qGN^yGQ&22AVN!>)~KpborTU~vEP_dXb<{Gz;@ zhj5A`naLN4HxKO`&#;5HNe*Hj%Ex$M-vcCS|B(W5fZ7WF-d}{zf1NQW zKMuz!YhVf(PZxu-n`h`{A^jW!KbZMN2JadV9hLhLGFWj7J6P+H4{Q$Cq9yn22k_>; zA%;l0#Z`L$GWiE1kANG^PV2Vh!rlqvfpBManEw|A$4P-u)jdD?ulX;WVjoy_y@hOM zeGbB0AeTB6ciqIoa0Ck%{z{Od9*C-Vbt$F<;h*=dWx;kJdr+1^h7|_3IQjhuUsQZJ zRq5LFsrFGU3ccpv{F{F-E5(8FclT^;^DdJ($o1YkqKaU$3AyO{KjA9W0<5%!;Wua< zl30_dd`ilZFeDu>8w&44U*u|z2k^}&{KR+=eL+O%0nsTP1Y)^ox%vlWYtO(N6KR|h z4AD05lNLa-lP%`tUaI*Nl=y_Nz4r6OJ7IX1qxA17lW{#D^7fNzdH%2G_-%cVP%yW) z_Z`+EzZ)AyJY1C!8E!+?q#joxP`m@B8(Tyg$cbSTrYUcL zC)I#_Q4I?P$ZlMuAyjin?oRCo_BoH$K=BwJKHj4#%mnN=lYi>9HbBWBNB@sl9tu$h zm?kB7j+n`2^3ym>m{H!sm!v!8{{;G_(IbXrdg(h~0u)tzVWmTnO2i1K9rAv1X5d-P z7+({VE`Mu23MQ1SnX1Zb+=%)R4ucg)V3vtfR)w}Y(NhDG1^Z|X)@ARjI)q!@7vm2@ zHV7%+*nds#F2iqNwZ=@2k)P($`$}5r^+!h=RQu9U%1C0YWFm zh0Lx)6|IQFllk}q=7P)*p=)KSLbT(t*k>iOSWgUgFL@G0)At~$p;JswuiXd)*xNft zDc-0Y>2yGsGC!ArSin zNu!TGw-oxCw>fbjm%{$#3T{LvjK*!1^&n9xVl# zTLeLENn}#mc~Ao3+OG5)Tbmv_Z6|@;n>WbQoGGmjv#RT_@GXi&>8W zSJK+Dd$^XdjSoZ#-=@d2R7=pEQ-eXR8&G4VSB?*n*SM$S_ z_FLY>A_xASitazp@{YD~sOUVFw#KUJn8Yo-Qve2}o>_@&RUnT*^yZ1#dHk_e zQYU4f(pTL05tr9OZoBENcV{gqP3t`hW|tP6@~*U>E)G09kuv zpRAh+m5RrQ@|RQMo(Iwy2o4c*5NO42G`0(kfeA*_mWoS_S2f-k6t2!rw>4SbunOLq zikKW$9zCH|4L*?!vc-b^TCy#N7^1WA}b_;~iz8k0>MPRE7_=N>V7@g>k3a!G1TjhqDr2wS!#Uvki38+1Hkr z?SnDEL|{eLjAH(&9B&P#G9#%}kf|P674ofo&xG=@e>zu(^3$sSaCM?nFc- z|DEdW{_)&5c$p(H6jUOOO|b{C3_bfuc*^PG|0vz3l1Z@4!P z+5~H6zou*hEc5Dsh%#gjOD+p-Y8n#;Wi|(qHcrvxFw(ITxSwMq$Ya^gkq!R01fMXA ztWGQ%&m3XBGChD>n%5HyKTO8oZ~$&GF!_q5Ek@^n9EANyjTJx~yGl?I)Y*$WH7YxUT+u0CS=r^4mGE*ID8(Pu!yKO83 ztbTS}HZKxXDrR&LnCakaTJjwCOuDsWx^ER}Ew8m7Ul@#~YHhA)oOyG3*ygy<@$a`8 z+ym;3$8Xz{R^LB}a($4fqK`Hg27l=;S_yZi1V0o1amhv6sEGBl&J@%1dXT1SX(UUD}IEjnfD#Av<@j5oI%1I*-C|Mr3B#7 z?m%f_B_fN?7GtsOE&3ddFd1I*i)+_g<) zjm|XVoMxBa!qoOBt5-STmZsaqn$9B|1W5I96OYqMzl4%UdL<5m^WrsQA!PV3l(lg{ zEiNTwTyh@SOpL7L_0mW0PdIb;&lx7Ax>(2>Z)@jL8-k!8F0y>P#7c-U;gYhN#x_?b z?}ql2Hy#80Un$8GfhSC9%5+nB{r4NxB`QOkA1fP@^9dy(;@+$6KMyiF^8L<(pIi|mo|;kP9Cc8HfDjD9udz#yf|I8)rrRLs$`;M>_Ed$-W9Qp1 zbShq_p^pD<@U`U8kOz$Ua14f;P(mZkz7gEja$OuvWs_hyCY1j=ZEISnDd5n&G^=^mH&r}dzBDx{h_GOAXoOqLaR(HM^!iSB7oLM|6=IP3i; z;V@h4xZ=j^wsDm`SFd3VZXgbnYM(_b3~6V`4Z;$GmXtp^urDE&;F$C`@02j3vkhO2 z99PAS*K&Zn1Y6T7@(U%_-sd{BJQdRD^)i#5Art4a_ALE;AxuC>dbPFKFKzxB-W@Lg zyJ)bChnMeMu0IS>(N6~i#-^rP6=wXY+47iuHDi-a1N5CC+z}5v*Ve%9B>hmK0u2Z| zu7?~;$3SZ{$lD~Y{^T9wD=ORVhqx6bna|&052NmWC8mHzMw#Og-7~DOq2qk5K7x6d z-$u&vC|C7*S@UmP>OtiB+A}z}I^$K>*pJR5YvJfc&J(-=8T`!a6f|T-rzW)?vY&1Yb`2;Xj%uSFlT(C>e zFJ>e+#R`;nyAI~k#*VWp+x0Sd@8{$ZkLgYcn6Bu~CiH#M5y#HhrQUP0H#M$2lKFRB z5u-h%K1+3uLy9H!n(_s4U9&w?ETq>M+LP!02Gv%DZ2Gso9RzB-GL#Uw~$?`Bw6%wc%{#s@y*|2f?$Y`q^=1yU@DT zi_ppXrLj+8$Z}ZpcHhSiONDvRfjchh69;GDyU>N_i*XP4qP>*v(OvxO`uHOxfsWx; zH)EEIeMHxmc3LnTSNjMYT$BsaK+;f0F3fft2PppJno&}--u*M5NlC^LFaoDQ0c^3p zi+=oe`e98L0*EuzWY`K=h&;g%R2BS(ZtA=D?eM|hwH~gKI^?m^F`h;MoLGK*tuL85`EVpR z#6}(hC3vM`u7uTEVe71hSx}I0VwlMGd{ta8YG$0f>zC_kIA_NMeji2l`?<6>mfxPQ zvXsmS#JE3=J5bqlJe)rG(`Lq@{_fnIjn1o`MY)-&Vn#D(>b~^cw`|86QK4oeBC65H3e+wTpwhlz6 z)9(NvT-?fSA%8?k25T!m)c80I$Fm20f*6kW4b{$+;)0Q9v?dpv!*kRK3vxp< z*L(f{)RnaT%0moQ&r;D?XH{V^?7E7V&?q!oc=SfzeUWued|bP&X`x||)EMp#a@@%< zC1y4w=CwHH(mD8bfjnam`%U*w286V=;M>$rf~&7%i!05~=7>{n;1hKt2B_!@SsCle zZYDkJ2v2@itbzl^2-gCAEDrn(Kp1_`V~C!pV=A(L#i3#f^QFfVf8eNJAsR7IRj7T> z3=iu5{)|H7B9s~K`Si7)m}vOn_snPd;YcGqS4$eLM)SDCdH5w*Y6g;T7G~lT{q2$k zJzCh$hKIwgKe(lO0?bP7_elWMU*0&d@bSaFC@R$JyKQrGn?;(AcIkL6cB_2C?Kry!J3)FYJa^0c%I}v*(4o^40s1kd+wyt6|`88~rRb^69 zG1l|QIQMDbk0niz3Ad9q6AH6i7Ww(kV68~fV)3evmCx6MP;~v;soz@GTAG^Cq_N$*C}!QaP@SuF_o!od(p_|m1b6EM)7?;8x7<>acYq{ ztTlA+-dPYD(;{S_9U9-SqT&9j;l0b;&DHmKSvao_Yw35$urfr&X0V*+?zh2m(n?X; zti;ad>qe_H@3>c7K|st706}!;@Ye+nvT?xX@dF@Kf#~{|lp?z+krSvym@##_u?XW- zL%smwNlH!gac64Oyv4`AUn>Y2Hx{v6a12SlAq1%7QLDPmj(Sw9P3g zIqSgSx7{$xL>WR^uW(R5ujtir+f8PONzexk^rdJ>%e=&LK>}%-vp&l z8R@UhFG{-Sdkmu_Z1V%06;h=J2*Mf8jV9@~39eoB=BaTWSkC}iKr~Gs$XXjzGDaIm zlk9DlWg9IKDNFl~lZd7?QSQF8x5u7h$n1RyjZjeI$nc$J!Q**=oPd0}NUPsfJ_BjI ze?fpTJ!kL?wA&^;jp_MBkHrqfbU$Am+-VW%>_}>T$E}luC<7F2Ni2pZ!_+S^+!XFd z1s2UP-+Ya4I>vudZ#?q{qSv+=2&nWh$dVjI_8OtHPlZ${AL9{6K3Qr*Z-J2K^6r?5 zKEK(U-RIAuE22IH$k@htw!HTh%1g~XJ;Ph^=VE{knGFG@TY(v6&%;{xLjnsQ)ifMu z!eTOO73yQF<`*OfGo#M|zsqB^*ky(7S9>iK>ZXkWwL~e@J{8%4)X2yw8+BxN6iMNJ z_nRBTh~$fs42*9+h3pL!-IJb0$+VG3Kx8n0B)Vk9WUw zOA6jBf;jdnA~cYTkK;u*G;l+BAhZD0N?@9S;k(1s$cuhm3GNR^r6%*9R4fc!Krkk| zcNU0H#w)iqnsGP@cp%$SxB_=x^R;pKMnZN|U|5(i2)ovaHtKs24u%c}-2^}mUUsqJ z6ELQiMbY#WvuZ$wv{bfRHhh6?@+dH&L=pr1%8dyI&Nq=Gtf~sHy1L!!`Pt8NE3lq$ zlNv^+POFdTSnDKgxV;qy+(6&tP9tBW*MtD$vv}nwX`pE1d5)Yq|HWvNqW2-Nlwx5L zJz7OB$B~85jMcd#&(Fusz>s#ncGC}kb3In=NGH%eYmIZN4zE7KcbCzuMka-P3sCr< zs{qe42D1Sxh-rYl3=hZB*_qXEpZWtNmi~|gLO2pV3fJSGyNjYR9iWl|%=6LgD~CnB zkggMl2hngo6ut=^{VP$5)>E>1EKzrt~b(^%7Eq zIu6-Vni;>^0bakPQ)}GF@T{x(x2;iVYuZ_XN^#mrdC)kvv`!oeSm~UG+jbst3W^7n zo?a0OrDc=*%y!QQTI4t&sF%!eU$JyaeFC4&qKIxJdRSChm`d8&HGyK{thM&}72~dR z?H6Zsl;T2PXEM^KcdL_`9l3B;FPu_qGIaBcN=iuboIIY-Mp<*F(k~`Qf1o3~1}HJS zpQoip)Q15{)^79XYzbbKg+~lZI@q!+1v+LPftJglBJkS{+_XU`N8j z=GYZxOH~L+L%>S{woPkW8+4MZu0xThybP#TU50MvKK8;x1ZvI#mW>V>#TIoo5;MYC zg&Ow0N%f~pnc&cwQRNEKFA1u(tMzm_dY7H@DdyuX#h~-+0A4UWCrET;Fy#|wVEH_Xz%8z*3&r4c#=Q~9= z&aIvzzcrVoQe&wLVd_7n$kv-5%pktc4cMds>2S;Uej?)VjP`E12RQ(cTm=B_YM%<%&O3y2p{df@X2ixw~9m4_5b7-B8%0M-1TJ^kx z>bhEe8Tv`BTAxn*b3eEINx;WTW`-eAI4lQ{nn#2B=HzChs!o#2U^#DY&^QH#eZw3? zk$9Mtx8on8>49z|Y*}wUYQ-I%bGxEmB*RPXh~$@d-jvGI(96eZT7CYGL zIGy5D;`Ss4mfPcHo*$r4=0G2HvE(Bs5kg@O-ewI?-{j6!!IdFRZp&$H()qThgb1{_ z*zKR_xRRL+WpOf~MkzLsK_k31Iqa@FKE$0Yp6OBIeHsiqoFC^pWka z(Dk{(Qm(Ofz1=0)pu!!}2mVO8wG;@7#df^k&v5hK2WzA{TS{%Loa)RAIY$BQ+g1sc zUZLed&!7w#kqT-iO^Fmvgl9=Bp1WrdoMPkiwj%dmh(|M(S5AI1@l*=28-^b?i%3pH#6)*6)4 zF}1Dj4RRKq)$eaS8^8EWaSK<+t;e-c^(}zyf5Ng6rSw;xwHylhJb~qR=WpX@YT{eK=YemF?7ot+LLjAr2ri8|jYK3Y! zK%>J`Y7T@9OufyKHk$zsD%&=Wt{Kjrfu(RvpKiq?R_VpQ3&B&gL>-sN#?Mfg{JdI( z#(;bY(q*jqaG#-h1tY^2o)l7cUSyAQ{{qmvcR?Q(4&`2t;}6cD>7OUxkB;CG1w zosef-T%~d?f4q8%cWZ62TAkAkuWxMqNg4chxF5 z0pKLhWrEzA0@?|9d-ELN=}-13d;$rG+naQOvdc!Bpq&bd*V4Qu=t@Y4fu03yE`9U! z``{-jg}A$Qp)^2=rHVDvFQHw|*;=&5vjtiXJA`BY%<+9RY>_$pAk)EVlh*H3-6;~} z1!v$t%!M8SuTNF3_-cT?Bj7f|z#OP8U+`3j283kJtcg)2G-kWSa` z6TeE-=KsZtfQD5MqY6GM8s{dqwY6Ay?v_2{%={T$^SKA$YsR25U>VM)hQi{mxuNPY zzd|P#6u@FdhuQYSTnBec7l;)x2^n=Dnp@MMhegl9KGm!Oqv8~jl#8>#^3L^|%Sp~y z$L__d;oGm&eXx%@yg>mqEqHfhz^TOaRo=#o;;&nv5ak-Jq^$RkeF33T#{ne7V6KLy zfr9>k1od@@eHeaA5WO@2a_0K+dC46BR{MkoMJ*)T>tg3Wj0#Nz?b*}YIAx^a?g5M4 z>l3hd-qOOL_&&>FcJeGO+dWl;wy-1#St^XmN7tMpeHLU-MUbQE;-uT;&=i>LFXadU zSK_ZCbA<7+{dh$(5ay=N)P3&3#OxG#UR{45W#np|_VE!bxPx)nbjj6HO6)6`G}A%2 zyj!C%|5%_popvj@o)^+!NwL>IW2GX;QBn1ti({i;<|s=e_lZdU6ePG=aY@6j$Y-XS+lJsLCSR1kAAXP~xgD^&zp zjb>Z-#|KE5J3%-0g~ELTXwFot^BqEC`e3`c@)Zc-V2Q{&!;-St>ovT`DO9=d9d+4> zM<&6k@;=(xK$xX($_~uSr6pr|kmGeCIft9UN*f16p>xD0GyAIVPZr~y1)+SRF{g|v ze1=2myKqH@Gul%yJmJ!C)PkWVhu{~M4-Q#$!)9&HH&2XN2=n_!ua0NAre-~(OzO!D zw1WY-3$GHdN+D9n$m~aIqv;g{>%(|LAXkaVbPLKWme3f-7t2~7J;S!sg)g`Iv9*2y z@xGvHvAkskJECTm=0VtHxCt1b7-!J&H~wX14A4c9d36_CyrMMT64}68Mv&`;wVSu1 zAM*@HK+f|jD1k{+3P9tkDt3_NwIQ7R0-Q;7x^4HON#>cOa(BLjkVYzIJljR8O~#K# zuO=;ztp_W(5dF!{d=pgvGj*tu9yJ9wl#CiH?kU0MfURWx`y<}jG znt7bD`IlFv>ZMXzJADf8Q}XgKM}MndTkS$DwW)HZ&tArbwB;NQeWB)j6S11SK(ea7 zjK&;op*xS@Dw*9|eJ~)GE}`*Qh3Got6zjkrB_R%bA#wgfTVht&Qt2C%9M=Rwa}^$XUFR~+-P0O%Ycuh9*0eKJt!7(7G86^g=A+gje zEpt{69NmnQVA$i#-N@wK_o^%J=h7l1V7irD+%N%j(XzT@VSzrc%?Rw}z7>pztwt9?ET(B}MH&{Dj>6`& zHa6B-*kW@IxGH*yj}l;&=;vsR)-)i#*SKvS%Vi=<8Q8vFO^9FXAx7Q$ieM&&V;I|{ z9-O~dhj@#su+Y5>dcl5+KvI96Ntw+tTNN@@7Z&Wv%g78?`2){Q8(B-+LZb`Q;hmsX z<^bR9B3As2HuNb(vZo-`!^N1NHp%PWA4s8bMdI3ZbJv3i`fxru2`+ncsYB*15XJ&A zrcb}Z#-_u>|L6p-$bL3VcwC!>=R_8%dPNFV!Kj{`1e;I^@~cDMF_D9?mEjAJ;@%+n zn>zT(SffZYN|xmW7K!pqL(1ehtU0A=Lr@ez$>NaavRobj6${u2^I;>|D?#~pve+RM z=DhRP`EzBOgoXmbZ07NM2phtLbldy0$x;wkJG%E#aMW(D&KK^c!xp8A4ajuoua#k! z@2~nW)IJNx8qwCkBHiLe5|F7@bhu$NuUJ|`mGbhVhSfXwM8uFq3CO~+x$+*a_lF1X zuV%|^hzmn0kvel)5BCpbsup;Pil{NnQD*gTCyQ2DZpR9j5JlQNn#Z&}K zhkdaxC!bujoA%z(IkfB@g`8ivL*^_L{s2p*dRmKCLO!JPW6O>OF?=~-U9B3VMRUh6 z+G{sceqlCk0!qF4FTDe(z4L&CU+#0`6DvwniTA>07gHg9ddx*Uv3I|>wPo8mK%fcc zl=4K996(YG#loXjzMDtOAipFK76B+?J)8R5C0-bPY$v+@l)_usMhl$W7#8ywtY~R~ z)voi`d@F4JKFDnGXS{5hb+?#A3zr3Wfj&3bczx35M%!U%*_8=`R7#zNury#wo$!(K z>W^~dGbOv_J`JgP1j(DVC5RABJK|k@N@X84GYEzw#NeO`jeqh#2)G0S#v}P|vdIyS z*LF8=)(;pn)av`lV46J0uUu_Qd+qzDez+78-VMq10Yh05xM@)5%%(1GN(Wdy{Zdnzfbhi&IbK^5`Z9a3;EPOcadY#>1I$ZkJ`w&C{tnq|al-x2d zqCOdlM?Ka=>nNEyfvsci$156M0$oI9^xC^*$|I+q4|iL2=b!7Lkx_{%be!bXzg_@o zi&VGF(BfQP>g{rN5e78<0i^wtdk%u*l1yR@F2{*NgV)SSCcVw+Uv7xrzeQUEO2MZe zH-~?hmf9c^(8Fi1b?tzXjRv*NbVu@Epn1Ag?D_D7v^YELmf{dHDtbA7B!`B4F&IHA zZ|qBT2gvvpPhMoa86__Lh#$d|-RwpY_VvU~aB)8*a3EpbDH5igORKN<`_cfohM%g5 zU0LFB4}$N)m08ZOyS>QgfCf&pnu3`7p+obouSN#5UuO7BP?N-+fB^+NlC{c|+YZ4p z*>j7HTP3%~8(-HgvOY}Xr;c&?XjE)QUIK0ryhEB>hmC7y<41J?=a^T=8Df@08}gyC z7bVjeu(3QHZ){aol}tGK7S`i-KEL*i?6j5`k`2Bvwfple#xTjtcrQ4QPF*?89pfS? zZ9i85uQI~T*uw3e6th1Fx~@i8=r(Os3Ws=bgS`$=N`EeE^Q z>!*WbYWvsth#5ISYu;vx|V2;3Y0 z8Qc3%*>WflJOqbFhGv9ExtLoqq;90U+;oviITZR0F)wwmY|}ujh|nR&iea1P?n{cF z;*=-$l1TnBWVVx2mAWM(WF6CO3RqZww`tv#DA)G*xvBLkXY4Ns4gOJKp+U_E-FK2)4+iH&;9#npPDAk56zbpYNTgk$U!8cD?yLmSITb5Y(uFRbd>{|+F z;`{omQ6-C3|L*_B*^{6e@s=3Qg01{3J7)4gvi^Rz>Aw!Kor48%Rh#n3eF@|7!59np zNZIcO$C_X=ogy0xNhqe{$gmC1G3sAyyr6W$zGw2VUN;;seMh>n^Pc%%Z?P+vlYIir ztKOkUdAyHeR-E){VaSKzI;pl}l4=Jev{g8&wNdZ>7P;vp+waGG8y@a+Z`4$1u_Aa_ zH3s;44*IT44?bJw$p*R@i|>;zH+{XBUbCPs#qrmpyfO2|cdWYAMOXP+_!=}gubAKd zJ7gcSJr`Iao1e>He*?IGFQeCF$$`~@C1^B1BSkD~;2N}l%&Kp@L~F1wd+T^L3x?H5 zdN0{~tQ32j@?&XIuV>yR`IQFEZCRG-qhH-wN0Z3&_XVf^t*8E^Rx z$M7)<8e}{J!B*#H&SJUwdryd2b)a4`sk1c0!IkM=d58e3^yx^Q_w9!HV;6CT%uByu zq$lzyIiesdS5>-vW!cU3OReOAGIH-@Q*dy1WKVLeP)W_(&3bMkz2JRZokrECDW4Dv z!OY2Fr5tF~7G&~98aQfO2K@fVops^z*# zS8Aaw?{y};*)O0TU=@3iAD#w}Ed_WIQ0ro3X>oq2fgR`*y0@M=Fz>;Gj48;!_=1c| zk;G4*cla|JXAozj1a7z=4IIH+paT{m5n*vx=QJMHRr;&mwenuTNb`K9*~)0GO07y; z4Z4T*d@k${tK*Y5NyJoG*r*bC2GTB!4gzx`WyJ?8*z0G}-d?;y!NHRAr;h9P&CQNllN*Ucbn18`-SPrn}5fuU0Bb4&U>UJ zgfxSIq%ic*oack_-TVE$?>X=9I)CkJdx^|E&wB24uY1*sn|xf+NH%ATCCieB0&~DF z5YOH=ums$WOat8*#y^yXy3^=RvukJ9u5HSJs^-_;Po?l^P{)`e>TOs1UQmQa z>y@6ZY5i5dcow8=cnrMvvMZFL8<3?%FE-!muHgnVj7dS^i{xF10f0-%I!qVO5g2J; zZt=Z))$O+4P78%W`Q2GrFe_rt#bRc)dYSX|LWW!yUO)SJu?pNtIF0B%y>9SAPb6l= z3}3Qp4*W0zc^7C`7oMCd2#}rTH=@B>*kKSH%TTX;CVGNU_@=WcbKKpbBxnkEnFf>7 z6CWkzN759x7amun9rPrjRoX=R=Z`KfZRDnS=ZHvI*GbS#*;3#b#|?{P>NA!Rz}30S){O#cLoC%(2U z<0fX~i3IuEMKCMQUk!A#9n$p`<$W1+Jj;-%Am>mN#;_=spbif7!k+`JqR`i(71d7= zMtMC^eR8WTX>h$8j8+v9+sqKIS*@DeB~ju9GkOeDrjo=QKvTwlbHdjz0xM(cOqOJd zh1x+$ec{^kR%x$PA|y7}%B$2Kc)wuFC*!)O;6)2fg?}``fYV`mtx#by9i*iH?@pHq z`b{yTDUx@uF<#`H!oMBe6Yp&QbuC%7%)ONwwR#+iDLDo#Y1|rWumUUJtEXO+!LRw? z2-SxxxO^fK%u7oH70Dy?_)yE<(u}N0Qe!@I%bA3sHymtju#`)xFt7xmKm>{v%J1GD zs`GGh+B=QTU$6lY9ZW%&rC}2WtJu2~1t&~KEA8wcAqA`rm~|7R{EU!cg5+?px~!b0 z)3N@Nr0>80)W20N^PqpRmJ?t~X_+EbGYuFkjwRVH0LTD^dU;#8xzLLlAJ@pRt1CyD z{~`hLfRJD_b^0E5m?LZ8w_SPjaW8!y&qpWmW)nNqQEUiW0gHvwiDp0|G6Q`w&%nCR zCWVOHcK1*cvyNX*?jj!A{wuJEUjzXzZXI^|Co`U4)yi*_fM%^oaXw!-)V?(L^Nic9 zXY>#m=2pJ`w$L`^W0CpLJF8AFvur1-zX_ieKDq8P^2$jg4}NV74S@Q&Q{{z~O?sBj zJyYR3x#IVLW?Q&|m776|)h&t^a~c6-I7aACxx!fe=m)UdbL{HH6zZ9QxL3lx-@!ne zLqOQ(xP11)9*rA5qcIbwSlKP;^Z=teffRgNGXqURJSm*;j@D5=CqoK4!LknA0U)Tt zzRoz6S`P=yF728Gk_aC^!+coyItf_QKdGmz324omSV2sLW#426Azk+B=CfSSDqaX{ z2T9}>QybBXOyfU7CFEamifiD$z^f=tjc$;C()1i;^g$2Dq+wd947eE(jQ@iqK!0*| z-UE!d{;gPmVa8xmoM=>7&126i3EgkF$x@Bq95T*uIx@U4-XA@ngVvilHPTn<_>oG1 zO{7rz1aQ_s^9Vco{zC1tJ4ZidK+C~{^qyy)-C5k};@{{a#f&0DXEh0<8jd!owo6^m z`s$rCMVKdo2&Q6QnJ{vWpBE>MIJnx~v%xaMBKwUS2`K9D+8(I4?~!(hwQcfmBXm|T--(lZSH&ScLN?&EZzgg+zPFM-qC+qdgXCvHo~ z6{#~h-kzEP%iL)zY;90AsJmvX(_Kpt$FD5v6ks$@*auB{-_wfTeCXiVtZ0ia?$Q77 zRU!F(d$rebD3Qm2eKe7%H-G`Z*kp?S?LzB|&&GPj<)32XX=IZ?DyEeI;1&PJ?pI6m%(Vt)% zl#`US^}41aTay^yG^o8IP!>ZOo0}wy*@CoCR}76osrNiaax2hnu1ZUx#|8G|z*{&5 zg*NRB9QoG3;zldt;`V8l{XWIIS3_o<517No`5y-jO%)AxZX#^IU5ZXa*=*kG0M5ak zmqopA_Yy`kwy1iEb@`(muzj4)AAt$A#jl;nm^z~)PbtxpXI=$Ob#2}49~4G%L4Dhq zuN~DvyunxBw~PZH@fcmg$IGeyzJ;M16TvIv`Kww0jI+G<#eHJ1@^qjrqU_LTXHx8Q z!L;s)!3E~e@MI?J@Zf3e?oZn=bDn9j$nwjI8vgbSatte!JfT8}3}0X7^mYq+BBTE2 z6Az6A5%}jW+P1g2I#hWc1^n1IE!^MJaJk{(ijm0eDSzgkS|P#qV)9_Cs9&?<&17XN zDg-Y0UaUVSS zn#ItE70tDv$bl(%$cRWL9*7(ClP+tGI0v8At@+x)LgSWYu*-SeG01>FKt&+fEb5Xf zGaoMO>txT_`{~lEq?!ufw}^EPXlv)WJ94O+l}lnwWdij)KG|4dz7@8hjD^-``v>FK z`4Z6w77t!7SoAb&o2M=BAEg_1VG8;U8yxj;a85oQeeoK)_PU~{{-CC)E^p{Zmt>m2 zqY{xH2(f>UlDdhRa^@=ubM`Ihe;=}p|*5v>5Z&rl4FXZvfb-5T1eRXij636Vkc z>$6MknGqKyzjScc{LJ9|>6B0qug>OukFLR(X}A6b{B{)>2|@k!2lMuL*e$0%rG55W z!>IFkwQq{*Mz;Chot(wlm&lEG^^r`cV(>{g9@DvwV2rS4z~{pyF(a+GWnqw@gvH~y zE3L!kqUgYDH#b&~&IWY`BDgk}%y5T6>#*z*sBm;QTmhSaoOBtL&`ejP`DD10)vJns zB(w4*tM9$03d~?;)m_lBfW7#BztfAO%_!mfeTCM814|go^Io5R!b?i|T?qKMpV%#% zk9p5Yk1)KY>kz2-x}O`+9AC_VopPZRbe88=Im|C5Ke}}*VD(K@*r%FBP-ix~I7GT# z=1+cd1@3Vx=|$j908ist9BRZQ0bqPZId6{e`2u`__^n^5nQI0%L<4Wjs2}M>5e>f* zTZD2N(sN3K=fuTRz%EcT>jSr?QvsH*=DZn~`ndtV6b0G?+5uiP`r0T72?PlSeL<7< zs2sL&|EkruWwUPVT?mcD+k7%1%&}D1B0yO3Oly%=CzU2sUx>_7NA}W=AZq8qmu4|5 zB@8PWegBBdyB<5jLAPWsZD0TAF{54#p664H@Vi^D99LRi>Ao1O(5rUtm;4lA5TEDf zv-+b~=~@0#V)3@PIxBfzz=c(2{rMjw$l{vR76NCiF&oT8i^r#r3}m7&-6V^nP$l2% zLddL8N8_@E);BkZ@9xQ`;rSnP8Wf%wIPDa7Zyt5}rpc#i5oV#3`DBiHk18t_Zpu-Q z$dH=B^Xls*xRG$uyw`cdTGrq9KlD&m?2?(mVZus=!HfUmTd^|d0z+*^W zXaU1}>QK-*^hD{$QwLT_@JvXhg9y9`x)8k7VW#zQ#yo=5VI8hMf?bH9i~Dwo$fl4c z!gF->D@S!O_K4iPa)K4y<$){#cr)#4jqG#GXmi&P=2F9zx3q5y3e+kpfUHrVX$5x9 zcI6smHW=0Vmg>10V1ACMTT8;?8vYIUi6*arQU%j+M(eC{IUqNpN4C^c?A_n0+? z3Y*$>%5Mp5SRBj=O0uihmJD;GxnJvQ-_KMN#Jk$iAoj}lBGyxN&tqT#xnPdNgripG z10(&LMUoc!m4JI&mEpFwf4HzG@Rn!<%tUWl9;U26mJ7sCUmxZV*k!p_`m^jj+c_6}0xZmT zZ|WM(Be5&BZExmN*hM!iynfbUP@fjuR;8B$WM6XZUw9Wr6)}&IAh~%>wwg&NiaSZQ zs(Cvh=rE!&>UCzB+;vz9X7XUkc%v?jz(3lwa;vHhgI0PMmbB&8a&!NPKVX)t-^b$% zc2@I?@Y|*^zqP7Kluf*dlU2Va=}QSZ?c~MbLqMYTj=SdQw^C%<7U}aMKgc;bRjd}* zAMZc%O0E~C39QML=1N5Re^35=G@h((iZicaYXN)B9Ewk|Po9W05h8bx6l^mZX)KAS zMWb`5vbd#A6!~O?R1=V3D!S+}Hr~2qsN5TshV1a$DbSW7jGfT7b|_}_cJCtM{zrXn z?fio`pbBKUzqo{w3e7ZYK!wW5YdycuttVr{tOd5t&`!)Xk(v;;2Z8Ff(nWPsJy0_= z?WO-v#iby7E-1Yp`2fFlAo|G-b(2T1MoKDU&&5&ko#9)0_AO*fp#Fh;Xx9y(k6ere zjOAhwf56Q?&gyaGtBgD&T(1`c^Cp+Ga%5e5z7k$je``V&5%Wr-a^`i);Ef?vLFCJ} zi|Adu`JQ==q2+^nxm0}i)mHuJ>4n*A_;A)#yDi#J=PxZc9HpT*vm4|EQ0M}ulMSzXB~jjz zU5LFeTc}Civn=)|^s0$S#Pv?*u|PME-h81mlDFQekA4sCD82KU7Q^M-s3hac)s?_^ z7hMoIvt=(5%=fF7?azC$*>P{sW{9tfWlIFveR@L~>TiHu^?NiBp=b^cC zZAKhSit9FsNPPM&6n{Vqp#BZ1T8(=az^( z#Af^wf%c9TshQt2Z>bGLe82Ecdyy{kwq)ZPj{RK zD+HHkzgEqEX)By|bw3>Ma0r-u>?OhMSW4K|jC!!6vt3gkxiEvue#D@1T<^1M{P5bv za@)FR$H%ojn(HHKLr&(&gX(p=pU12^HnxkF`Ch{4 z28ELq4w9%#cLgq0K=&g@y?Nz&XI?q36P1}&e$ka~;dyHD*49G<-hhWg+$?S;SE8wA zM;{ey#xN;=D!N%sEhwIFQdRP3i^N4aOeNR`SotSmP3Iwz@5!MKtX%&E zvt0^FB3gvVW7Z@9i2uRtQNnZQTMi%da!Y4R2kX##a|}9Rjh-8&*ymiFS9?2j=lC&7 zaT?<=6D}x_cEMkQAZ$R;Zc=wGnOd4rk6UH}?AsMi`}|D(##PTXr1kz>h1Y zJ!U={?*MBB(zdS$gN@J^B|qUw^yKSy60|2A%?UPKL=#|Lj|U@L$`;j4UN#Z>^XP)D zjxEcKkQ&Hy7g+Yo>t6}guHgeTzBzW__7F6nD{283^rJYv9u_8Gb7|5cUbmlcnu%FE zj%YYrUhZdS(Xm?X@ygd2&mob$xf+}41{@kXpS_oY!9yM_CZ&WO8T)qc`3)M}KizYh z3~C@RzJaKE8koL`spUEJT7SU37qeIN=BdO1-6>&`!X{QLP|e{J`g>h@FPlAlF49<$FqGcLJP z+XD!TVc>C+S=8g{hp@(Zx`-983lDXaQsi>qUEt`q91=QzRVqme#i+;B;%;+0)Gh#Q ztygdRU|;&V)bz^$=wvAby+&Cm)$yOf8HY`jMFtJt1t2?Ql(f-TVtm);9N434+|Oi^ ztI&F8Hd9G#{r39h&>=MiVoUo2nJ{V14QYDEy<^E>e7__l~ga0&!a; zZ84N99THdKzVpcy-Zb31$440oX8#oU%xOrVn53my5|MJZUw0v*%0Hbs4$ojNP>FV4 zYV{>KiUBYZVFpj|j5dAvm15a5ErfnB`w^|jv^J_@1pdloqdN(_*CFkrZ7zH))MaeejInYXVmV9acd8}xMyDU!YJOV-?I}2eLLI&rZY9w z!o@@fH4iJx(-|JJyhsf(tgS%n2UL(EyN^d~4A zzDIIhp*N%hjKO&WX1cxsI^+WVr%LOQrpzs4#Hmkat$B2IUV}GT{VSoAD(Oqv-~5^# z{BfHR;9)nWy*U4a^*DUEfkd+YZwaA7k zN&VW|2)BJ5SQfg}!IWlwT-UDngLSqXE=qmFD)t!kYCpA9zuoIDE{3JR1ZF$+d*Q6w zg*-l*PxC37bepH4X=tv7AL>6(x^nsQgTtdA`(l(Q(;5up&(K%b`xKhcP514f(xAoM zw0mH5PAn(Rbdf&TI<8r0(4gOQ_T40%%$|1|DOnKY^ zy37u<6fHMJn-U+1ZRUOEN1^!u?GZNmFww^U^>E%6EPMlY2i9|#&vkM(Us*WJ{Kuo*2^1!+}P zAmdD%-6bU>F*SxbUTDDYh>N}&n&s7ZIH(ZM~-7QhUo0VFk7^} z8#HtG3J+c>sd4J=6mov*_i@ua{F&aYR)n(e^#ZXFNipV$JgxBkoi<7F`D)Gu^ppLB zh9`OyjD4G5|K%x}_;5*0?wsGHl4eA5bDgJ3I^#_qdq$vx7aSTMZ$LUq%F`6Y`32S0 z4>%0!bqn=s?x7KZDc*2{@7Kl_03w2T^@n#MNq7GEut09czXDcv%%c1@ z1Ew6|TY6??^v#6HNi-0kWdnysD$OVj(LbXE-D@EL z)uSG1N6iPNufb8J2ciJmXVphDT0E=N`Qp7?%^A|cd-GrMF?AP0MO!-oU3D|^syIMv zBbIU!OTra~Bz5SVp2xD^y7UB$!Rb1>r>UvgalpZ<%O8LZsMab(wLXX2II#ZvsX_ozl zD0G<^JLB08RJ}GQ1da&%wmG0P7^koiypFbqL{uU_5OyK{ zgd;}}n1BvcV)I|45$I^{cM{Ev@duE~Ei_od@R*9s;D;9|C2M`S$S=MdnY9WD#UAmq8U_Kw@Ld6{adsrdtgFMk-)n% zN;S~0SXduq$OFtT`7P0%5%sPUv3@JE^1Q4C^v^4{&FdJ`Ov!l8$!95RS<7`|$FrN$N5mK|7rj@_CRG0x z&AQE^((UmcHa!mw;GVxAw{%%WOZ8RR%MoGV^&dXD5zxFYZW^OfYzcI*WL5}Plzz0n z7e2Lg)$dX$DZXIBHlr$olb-GR4eU*1V)1U;}?s1w%UQaRzG#rEy6-zjpXqqO+jN%6k)vpUZqAjil- z;f@H^V(nD~%Us=6QW0$dh~pXzWk!+#ke=ai3B_dUb}vWAvtt46q4-^T8N_ddl91`| z0;!bvGV}5R_g?|cL09P;fFM3YvnVjgNF@m=adPO8h&1}`-Tp7m1p24b!}XAyCAUIZ zV}W)YzU$2S=ytE&`5!;B?iYv4tgnEkk-cN0+<+-@C>oZ21C>I69{Ih|1#sKFj^_aa zi=E1M@2Ri}RBO{B8{Ooc5!uY2ZC^J;k(V19zFgr&5`Y*pRz%xu7w|qOnY%tJ*q%Zw z4nq~lkYxQ=`ah~UgR_{vTvte?*LUfuBd#uTP|GvX&Ov_wVlN~FY)m}!(Rpta;~IHZ zw!Zrr&G#CL&IHs{wZLzj!6y)r523#jQUf4Tt@GeHY>eoQ#$pf;Uu&D17ypz-ov!i{ zP?h-j0~pl*i7Q@d{{yhB9{dYg1i|$`xawOgCU7ewPYr*YcKuPl(q;IgE~i4OKgM&B zrQYkSGzB=Tt(CD?X7JEZzxTEi1)EBgItobwiZ}TA-*Kpsl8K(e4G6j_`mWio#>uN{ zyk{jIKB5~6vbK!Id(aC4t^yM(*SVmfW|X+qxWv8cM_}UhNIbK)p2^qTLA2EeG@x}70X=LQlhFQQm4029(;_zg^ z`N;ZHVK~8m#hX2Jh2?(_I6lQI|Bydo zu=#K`?%kX7Q1ZQRToks-Qjp!$s?%4^i{<@PS3yFmfh_h_j8UDyB~nz}lXF7hXLxK+ zft)$?GXoS)lz@1G-|y$w%)9-{ETI|x=RZBGJUO$Vl}B@GGo1dYL#OU9UK?^U@IFFKJJtLw4O7|>Dt;;BUix$U`%LpPESC6*p85UyecGkw7-b5` zWBu5q!5n>}ZSla2mxb>`VP;PT*SGwLXU^p4WlZlG)b?+v*1F%T4XL?mod^|`CPIg+ zAdoa-YZtP)IsG2q3O+FJEj}6EU&{P1rgMlH<^S)gPQ%d7srUXR-O7eSGDP^<6D8Fa z(j_ojOYMjc(W4f;EAYpDUZn2|Oe#u&~^M0S)KM=Te0A4WQq8nyR+E$ZPHHG;rq!f{#8O0MM`O6h)# zWJ2-Iq=x|tJz*Rv?o`_4#<4m99JuGuIR@Q#7PBFe4e61dl9)sNA`fI=jOp%i*760R zHH3lGlhL;=1@c3-)7uE31N68&H@O^b)mK-jgMJ-j$&6KWYlBd<0BmTMnBE2ySW`Z( z*u_gi#;BAK5@DMByCJt1Z36R)idvK40+`Mk$9?refy+}*5UT-u>jZyI0XQbYc?}4$ z9awATHU2HCK*t=>LW$auxbjz1k&ULpZV+VdZYR2&Z-lqp!rM~$;)E^sFKS_f|9=&u zV7xL`Ipl`_+ura=exIexFysol!a^G_eyedyZOeoEk>cU>QS9!~^aC#xJs_o4!Ij1xSxI$2#Ia*`oG zxGnf{u_3@QJSN?QsvU}A#S60!w+CH3(}?~HsAnZ0x$idN5l))Jf)wtnkn+24Ej(|V zn@N=V|8*=<`V!2O_ZNidrHQC`Z@+y68<3v$!DrdhLAnXEe0xL>Tn5mD`Z0V^(c7Ps z0lt$lL&FTN0IzGn!E*H^msX8suyi@QbH(Ru>HTJ-Hzxsre=G~orzj@M)bsFpBE$}v%i%WaSUKhB&1UnXvLC4)w2odm zF$~wgoB^T@8~Xw~lq0h|WzPk}O+u6%6!{TU?=IfrQThtGx4&rp@&E&ze7yuNh%9=c zIQ=hAo;wz&4y!6A_BZB{oHWn;E1t{p1>6j-^hg^~SqR#TO_8E`_03lZDON$bjQndQ zfeFZ{S2a`q1fG&@i-?5=O2Kpx3^KFIKx|np0g13QB&9}ycc_2_P}d$4k{$4uUa(V6>1&!N$D)MallC-%Kdzd`dRRH!yr9Jg1m) zx%S;Tvx}jhHS^HO+#bqMH~*`EGQOr_NXVdKp!^d+IQQiLJEiaifH-v+PyLFQz+d$u z282%xHXHcwzCByFVWp*Wxm2D&{;?;tiX8jso{GLw==P}0bv~#N$x{*s47PYU?wbJ) zB=08VvE(dajZ)%hbdwP|IHTfsnLGz!oxlA0^OUs^iD`ZJRO>xG)LqXsor+yHC%|RQ z6cTA1AK%@6b~jZ6;zJlJ{rWxIB=@zietv0H%SAN>Z8*m*E(V_x(VDrRycesT=Tltn z5sUB4*A_WFL8ECkMOVsWmg|e|(!Oya1<3(22P@43gAW02LgYTp@g39KGVygAtu;$) z23HrW+hi+^BZfrNzwZk^Z9wmjKzUrCTCW`FGF5rEDP31g%k8e2x>PY{TN9FII1D_b z=7`-sY&CA4VW>f##a<%G4~R-+G*h!^4fJrP#AGvqtFd zyL>Wc7BJbIU)j6yVy7nI!3);E^`pOb`73@KJ3BKL-b<)P`HO&NB_h*HOLkC4CuI@r~X7r53HpZynbd ze6tRk?(eOfcB6vn)KsLMB%YFS49#$4Z3QG9JJO{3u% zEN>oG_u8ymZ?5LO?I3D!Jcw=E!89g{e|Ce$qtU!putZAo%JCd#+e5+Un}LDX-L5>{ zzUc<9BA-`kbQP8$So-6Eqd_X0#7u)OP}otUZO#L@+~21ja-loFsS!P$%qsfis3!K@ zDTbXYI8hc;_t~irIf0Ht53V$S?LGJhe`jf8Od#7I$A!Pi%CkNna92f-7tyAL{~Cq= z`HO6c{*{haLSFmn+o&5;KzcLzW%$ghr$7Lr21_~uayj6RsSM4H?IasNMzcc)9tc-s z&k_Ksy5ryUQ??rTc)2GCS0&Yi*shYjY<~HE!f2{QWHgXYap_tD0V&MLdko3R;Aa(T zZ4%9*nsARme)l`W1fjqAizhq2%N}{wq>ld8XV-0-Kgu)y#qE)J2&_P@vFDJXSOcgn z$+1gw`P z)NY34`eXl(pVJTM1P`tayh^9R5UO^i0G$9t#T|eWfzKoLpUfQeVJD$`_$|nTZ|L{< zNkAnv%yv%3?d4+mBVUqR8SuY6%eo8UdKb!p)<5FO;i~9(c8YSlKqa_zF^KB+=pHSB zK67ZP4l>t2)_rJ(q2s?lMQZ?38vI%kUwR9N7X zwIG9J?nK-lUO7rR!)YIy@l!*kLFL}PZ;s~~|A&tWxR*cqKE2_cnKu(nD9QekJ(&-; z>oUpzdJ)JJ|KU7l<{HLZQu@eD*vNO4HhJ7tk#8y_CjZ#d7IBVbV=2fmG_kVrV z*&js>$L^@@cDEAcQ&0N&G-CkRD=yX@G`gihmDDsh>&zvJyJm3^ogPm0JRLk0dy*>Tc8jJ`k_4 zzVg8u|M}lz%r=V}9ese>MsNxfK~S18O$UV@fbbCR)-_N({w^pzlAErMbPY8571aa# z`ZpR~?n1=h1s@HWM}V9|U@gfd&$a!pKO29*)ZPWrpWFsjXEyj_#ijIaj;j|Cz>=Sa zFHc|l`(QY31R%Iv`V|HDR*{lO=(FLP-fMV7#KkoVUIO30l9|N`bx4?|ekQsd@nSzHA)LKA&|KHup z2rz*w4d4=F$?#{z?9*^NXg^jEj;G}Mzbi;5FI*X zt|9inM#LoGXA6maEq&Klws1?96(9#V!}<4pR3$esvqPukIGazcmf{2WzZvd4;I}xl7UnK6!KjIP6rk`i6Z`>b4EJbApX#8Ov3+` z#?C2u1!yN_1w7jyt?l9!0C!+?C)u$#(#POda^ydw)c<#{*aqXnO%+@y$8B!TGa>LX zIHbvLC_a1T=sIJh-ybMqpyg`^C19Qy0O|7CvTELM8OuSW4rOaYT-zwrz!v~>h;P=C zTTfJ1EEY`T`6Pvg;va!&0BgSPpyy%$be!k4Sptn3Bqil;517Ga5)npU38hsRHG997 z#EYr@4_9Q(6A;MQDa0gK^l&Zed5==B3nQor`K|i=e}Jn1@Ks_Cz4;eq!+g|41IOiu zEfEO3Ts_gCe|#<6`Y|wtW_NXd5mfUr6qa1xi-h>G zRQhi`fl_@NHv*>We$>&vqfPJ2K=8GLVO0grC@+`OxI*%k?_wi)MKhuQdQUnsU;vtZ z0lHt`Ry(g06xHoKNKt1C^4~Afv8~Gf=l73F5Bg3GSJMz7E5FGZf(*z5k)}OwK(g~U zmU{DqN|Or8V5Y$4q{WwEUMqXS^u#U+$X3Ox**O|Yfb|FcCZ8o8gy#&YXE}=LLftOT zxe2J^8tM}`lx+#}BkcW`qnSOGL9r5~UXcm%wWp=7pglnG8Llj6LMOsl!c(Ws&a68r zqo5%#3rgE92i7yLn!yp#wY=oMx5sDj6r}BE#Q0Ob_whw)j@@y6s`~O zo}ENOU1Is`Hh&-N9so(2%m`P>0D!KRs!>!X-O0j=TFevVYf5SSmTDGhqhL~z|2E2A z3z&&2$cKfpf;1n?MW}|O&lk=m9h%XXot{$s6-R~R72bp94#Fq<+Q(DZ&#kHfl|__qC|4N zzRvq-KLfyhO*bj(4xv~_lMCrD8C4CJ5BD3Enhq?3#0DNb>&#P`8m z(f*fPcT&P9Eb^08D=OxW<^<5DfC`bGK+d+_n`^H&JX50C_D|1 zpL*0oakTH5!oHb^G~-HqO+MJ8Y2aOxHA1KsYC_e8SZo2Tg47Y)gAAc#aGhaZIHC+MTmQQg8WEs<_Ad`eP6cYJeJRJ~qtm+Rh-SKIRv0JlfQf>D+OYJ9k# zpC3}yW6kz?gZ`JCG!uXypanB~(shQ&M#F0^!Q8Qv#F(>R*)2$d&=iTfF1`#=C7)G> zg|b>Hx3wzD{wa?(%ob?;T0mPL?ygtAGYE=)09ruI0ADH)4%Tn{DffDsId5%9oAw~SFDQs`QAr8cU$iG>D z62Pm@Ga8hF)$)y53V76DC2fjGe!bVhl6&A3&AC`m?G{sX_qyNH{n?ZQGY4yl{oHcg zA8i;UNbp0F9N>4n0iIR1A!zN#rM~=NxLfm;pV!bfJsv9)fU$c%5SP{*>dN_pBOp6& zS(b?mcy_Q4MlT*VvV=|Uc(&QuZ6+XddX8ke3iDNNu+8T`C8_`piS8G|04`&S8=7EN zwu6J2i46twHN()T;s~wbkF} zMR_6|L^%%1*r%%JKqb&O$y+B8T_?$sSU9wv?~bb&X48RBx-gidcm?I9q$K=<^oGM+ zCwaIyw?0y=M6bqq+I+QQNPqOXA4UbhQ~N6?P#5hq0Xm@Grh++FIc)wx_vfI(XV4)b zICP|RL3kFPLfnM0)Bu&8&LgH2>-*fCAEx|sl%s-5#SVU!6@Xt;Jit*u?xqi=Zi~wUc?)y?J09IS zj?K_ms$K1hhz*QNWQpv?^pW7;f%=_%O9u%J2XdbbazW3cr6E8r!+-1V)&c>uiQJ!* z*oO6Ft0PTtBKGv^+$+Ep+k-1sOs1n_nXvmvNwLkRC{?vC-V4-tD9m1R0=9Mk@jy|X zq#vX!*}c7k8zPAH8E0>FI~I(Dz{=`P;d_NM^LuT+w5ac z-DOedw;)ofaP@+7UrOgN?*j|o26)=2>Aj!6lIN_tEq=7sKi$K?7raH|`>3cYXA!?) zP5%?i74C2uj9fApNYZ866}w4f1X}EeF!EBIYag+QNznu|A7&!@9efdFrQo7jfP@ti z47twhK?{y7p>prJS+?HWd{jqn0ICC=8*hW{d$jf$=jK>{;B{TEtr!u2wO>6&Dg6@h z8$|*E@y1&~0a5x*f&>6v{+Ub~%e~nhxh5`?qZ54vWIMD#{jU}%748e8O$p)ad<`0^ zStiYIK4AzqGyhG*fj0vL;Cx;8wm7kpbxf-@@bFJYsgCb=kqzlC@di9k;g|9{T*R^Z zc9?H5jr$ z#mqO4@_SswM`7;UE3XFLxfp1om0aZ&lO^tT?RrM{5E?zNYKZ`O`DlG$m^ZmEfoENp zi-oA~QjEZV7GjqvaRip|6z%SS?ry%+YJcfN%jb5Pf1)-Pz)ybPD?GWDZsOVfWOXFL z5s&`XaSh3Ad~#E~S~cet+uHTkULM?04|!$uD0rnUK3uU>CSYvAw0OeJq6-k3x1^Z| zkaDGf>6M^O;-#yXz|TsQ_r4cLzKT;|h8zb(ug8J?cg}lVtqx#e-HRq?F14BGA}NRN zklo5@)4H5_f#O6lEj7=29`#4TGR4ihI)=2bO&9oa|;U z;=nwl?*PRqebn3PBm(ov6f*t(bvd zq^9ytL(Uy#>Z0r2uF76&+a-yW)^P<#1rG{`-)R)?-M!VQY?(>bEMX0D}WE%$gw8d(&#j@#sIdoaVZuirPfnWIPY%Rj=R1XEyNe$}TcV z7!fAwC@siN3vFAFKavLWiV-b;06#0U;nS?Hs`-7F2#h~A)g|eEHj0DMs=|l%*A)u5 zTgo5NCLf;T4t!`hD~{QXK`6Z=P#XP58uYb(TNj~!%-Px=EZ5O%bRB1niR`XO>_!EY zS6wv&?JD}J^pig7b&{sBi6R~uiFFoFA81HwdF|QQD?PxToIeY(D6)#)N`NIs0;1JN zf`SLBI0VYs5@1J=GMr@d{fy{pm1bJs&ZsC?{&!=2b%^(SY49j@B@0Sx`l#l|dDY!4 zJRAe%ex>l!lC#%TS*pk+s_zqEadAR85ojTYH}39SfYb7>+boJY_=DX(mi+^auXL{+ zkNbv;m)v9P`kU}qv*5#3#(CYBquYsaMayG+fo6R}Z!ix*A4sva0ty~q9iM>a2cQ0l zaJO0#C80(a@y>I@Yu#*OE&#whhrmQhP3mWeG^k;3w+KfFZ$_|TFI@l4`oFK zVr{oM|>~1M5QF0ngGnExEONDG)wx-$uikN!VHnZyf^_@~VR4j{}w_ zbi6B_mj+}{#ydb9nu!){a&ZP@8$&TO9^S`$^V{*nJW=R|6Me-*epwoqaeGj@>Q$_@ z_w8b6sN5+N1Bt;!?6X2o0fgsd8S8m$EqvWlXQc&P4180wbv4bE-1_;m!;QD`A6%50 z4sr~X_{jNw(u8XNatympwYzsmj46LW7DXb`Igc^dhw@>Qi9fq<QNoK3*oM0ZlL8kDzJ^B1e#;i17{ zS^9ytH1Bk?uC#(VuC&bhU9;B@wkIgumF_L5k6OS=h`}etUFSi&yugd+u41`k#(i=- z?Uk~p$J9r^d(D-qt-ouhKzj+qz_Mk5lKGI`Wy%jH6hjTx3<9RDk`r_cC{UyTJiP%1 zW?rd)-fWv{9jF&E6k|vv9R(wfo3E$LbOp7Zb|FJh*Wtd4XwbX|w2@aybcJCO3&h>& z2GK>*o@eb9Wfy9?MV8r*I)gpjgBlXH62ROA`F=8xpNv{<>EK=vWHcOi*+1K$rw4c*K!qLOB7Yy3lO{W|1+JDJ3ctja z6(oO`+RfFWln`&d66Hb9uYHG4=EHA3WdXC7=k1h#n9HBYrK7|t%ZE+E#>TeY#Ml|G zIv>g-84;!;5F7KK(8mPU=$~m<`BtNc*!wd^!PiTSZk@?uoo!pR|%F~aSJ3YO-fmOSXPRlgqKVNXn0Lz z`pHUrwaynJyW0q%l{+CA3Baoqwb~z4J;`>Z{iypf-*XvMm@z`W7u1VJIY!@fb9000 zg5>;rwHz0E8;9ab;$N^nKMi8x*zF~7Y9^R#8s60K0~Q%++z=d#OtrB(j9*QXONn%; zb@PE;0Vemkn!35NMj1tyO4!o;ooIGYSx~3@b=ec;Vq8XrJyW0niY@{P^7UeVHrhIH z56Ga3W5AR=bsPsEXBQk$ULmCW?SJvNMU>3oWmj$}!Gw0<4UMS;aJ&#f!!Y7HQPs%3 z1)6PDX-f!+&CmTs;ugYly_m$Vh*Yn|o_h0hy6JKHDH)nM>U9QnB&nu<` zzfzYhPk(vO2c*ExSV(Hn^);h5l@rZr9>UuL>VuHNk)u5E`d!emML+^WVTCvtU6km3 z`K`h6#;f`RpbJ0ENfW?F!;(e5*IoG73BTTaHTz(LCt%8rp^WI5ynSM>Ko?}pQn4-w zeRQE!x&!7@$oFeDN~Nq#5^(D~vZ6$!R8>Qu$0C;gero8 zoLEikQI4*0TQ0183jodG{cQ@Y}qbM=wjN@6+nbM+ikVX*^2%%2oxNI{ilU~}ZVDeg;}_dmp1 zQjC#ZiHj2hOE36>2Yh;tT%X5_m4!?UhJc{D;%0#ixm%-lOCWj z=Q>4_J7Smwi$CQT7Y`9^_S?d$rbF49(OEV%JDpS%Au-D0U`h{YSnPfcO66p^5bqn| zM*AF#qAwtsiv5>N#m{WOCj*8DIi+36vIhHIxKpE6yI!g*31=_HTp`=q0|NjJfvaPE zN@cQcG=jY<{xvK6^B`gAc8a&5Pl7Z?~ie?4K67NDP-Cgj}7gYzSthJ?Q-EReJoPKCm_4pdh`9e&z%=^a&+sEg3 z<0~ooaOYUS+V=s!+#g?Fy{ULK1Tt^YWS0B^r(Nq_XIAkIF8g;rS@1}H3QvX`IjhJU zH+60See;jZF31<`aMtM4(+5tAFY7#eaNZ4DPDT$Jp`>wHXt>Q}Yg7W3?W$TUcj0sU z#5uKZy0#&&PS9}pc45vPAx;Ygx6+`j1jv(Twi-bn%vRS-ZaCg^JL1b?;~0HJU@SR1 z197Mwt!Fz@!gzHIT}OA>fd|8y!y{N|7DZv;`BkeZ)jXB8)zju2$`9kwD5cU|2^u3& z*04s-KER8HjX)s)bIexi{B??Kx=wb?T$0_=KF%#%C0wLL*W2^*#-3waI(;s5kOCEN z244V+Pu9I8Ko}IA1#(4}xhXlEbz5rCFz37>E-^4Aee}#F=S#s_d+b|nAEc@UwXT9J zUyicv09a1ZvTR$wHwT19O+vol^44=;{y^iMxIV9uL?jRp=_XIw=_C2|zA`5w^RiA~ zxj9Jv`=w#_KBOmSOmisU)~&n;+d!((>G*8FP`bwBvJ%{4?4DA*p&z4*7kKNvJO1Rt z#yDd*2N)*~-KKm56k&!ZiOE6g0qtu8JwY9g19!g%dzGeEU6Pj_OOrv#E^k=D#WNdm z1caWu-aH{DSj&&*gHauNE)!%T%8gPtjA63srR>Z%ThB9P(=!`>0}AU6^X_D`Tfrm9 zuFh9q(OE2pX8S;3@rSAyrRpMpo+3nnm@b!Qza+UHyIOCKtt*C(hQ>Mj~*<&;KI(S3>`KN-y4hv#qaO^*hPvJ;L#|;HU*$omp!o z3qX{v(A4d8N2ntij5+7@TlnX4z|Q&ljyTDptg+V{i$Rns})7{|FHJv;Z*P4AGg^SYNO1O&qejMso(djYx~gfSu-E02K`;f&mUx@sfuA_bd%n4 zyeD$FPdqr3Quk@wAb(X@F-qdTVu14+rJIfs2tYeMB744_))TR>LXwzE$}?5y=A5^l zeQkLBIlnv{2=177kgR0(&tZIdy%YlT6#22c4*6Y z2~4ZDb6pQX2yve+5oLZ9_f><`!fY~ONQ{ShPNdqOxGP78MaVo%HSr05){3UGuL|8+ z=0Cr1wV=k`vZx`lNs|DHGOH&G-JADnW<|R=G&JUUDi{5VWrHl^R{CW!grkm()9
0IK6Q;*`*E|0E!*!AhOmROGdIUS}R zl9XK3^U;TaiTq~)J*ecH?_WRc$O+Qd*S>K6j6Bei^3PbbYeVSL6tQZD5LgTC!X1Y_ zcEs9vo6ELRr-l!mZ4F1_{APiz)9XsHEJN-}(eRqe*=XK79=Yo6eqHq(C*ND4 zr_-{%9V_OaJURLGT-Pj%@5Tp+-wJ50&{dV$i&%+IAx}=;JonrRy%E*z3%||>4tS;L z3l%KaU#CNZ<>~x`g6XN?33tzoBVBY_x@-Df)8zK|PgFsU6bspk2MKiUcq~6Im@NSH zr(-LPh+vsG>Bv{Y929Hl?A8%Eiezs5_e^#WjGsHfmD@(x)BEGIvs>CBy(abz#gq<= z;g!#P409uEW{Z?oWtUgvO~{Oa3sM85k4Me?92$Bs)kQJmjhP$v%$XNtnr8F6nSsLe zMZ*PKj)6Uj@i;;xBD9H6_J2A%H0Rx&RHaIX#)oza#BAF$D!5k<9g^v7P0KGW6Mon9L;Vh|>v;y^7;rYvw{7 zn+jfFy8A6hTM^;TMSb9U}I3Jx@+y`y#R@X=ae_{s`Cq9zMCz2;9c1n@Mx z+IH&{(K%Kdg*X@_Yh=MOn%Ulu~&yAoK8OYwPq${ zm^t4y~gpAUa<}# zt_}FtZUoIXK4%>Jic70xC&5afS%1{BF$= z-i)2pit!;H3%O*u&1E0|=(au!upNSh%5wjqsGs0@ggSCRwqEBli z3EY&}Qg(R{kzO=jQ6neK1+<=rAaxyyydQ`e1FcZ0t zV)a0F?!JB_jWoX?KR?A%1u4h)UvCQUez7fQ-D8kkp8ZWbQU};QpFe;0(l#_PNq-&( z^M!i9Q@{KU8JuW4u`W@&{o@q{#E;Jdb|fTx$VVFWdUWJXs{FxKr)@Bu1AROfe)vRN z26KEiC4)>Z6Fhj419Fh^gQB8jiZGfdpY+cSWOp`cdfX?Nig6L>FA5>qRIgZ>2tM*H zh8SK;0yNd0R-tUiY;JDCwY%*O)<|}(uBzpv#8;DT1klM&gbUcJM7$doIM~B=BzUAX zX^)uqdRX*R#l3ePd?ZP*LZK#e9eK=DQEv~Iu$Da5;zV<_?Tq*qoYb8esdsBvU&)OS z89UcA>ow>m?@ zbH1EZCfedp@0uglqP^#)A<=lld*g zRYdE3L>zrH+usI@E&I0Drutw&oXPbYh`OOe|Ng3?Yg(wVckPi9O9xX5zV7(}BuFqd zxRp2vGzb@mj^4MTpN+M!?~VL0NNft$SEN{e-U*bIcGl6j%`3DzR_sqnTcj1bfm6xm z=Li1H^V-_lJ?CQ@yz!rq&3h?>*bb%hvsG?Lm2*X`aFw}HDgnk~E-ZHCc!o;kZmriL zW~kjNL2?+qP*nzfqPBnz)ix*(9bANgbo^sn_b+{Y26dM^$fX4iEd^Pxja6ALEE()i z=7y&BVKrRNoBC^4!2Ki^oE&mr5_@^YYB)*I+qoBA`V)8e`vW7qdPRcWVjQZ!5O$XHGP9Mm=EK*zK@?$cYY^S}(p%-$FNo z@=a5@0GusqzDwCuR0w+`kdo0TR`T?vgsX@(Z)JBP5Az|orhnHR*KjUQbzkCH3uBHz z`r(8t_vOG>ejqnN^EWQ7Q9U}Sn40X{NpW6#@xe}0QRQ^^Vt<-yOi?_Or3$a@DND7y z4EClAMVPlGEvp{b&%@66X;9$Ef@LIVL1u*5pC zQR7+)6_xO}MnpLJ%Ped*wU+4;B^^KKvkP${$OcVKx8iH-vUH~fFFZG%k7Q3dN1ISH z$6Ns+l+QTvSg77E*JFI&r&}v801}rEllvBWY=JW~ zJ5)LWch=K+d#KcI(;2!)F&^7-$uZk*e0x>^=$6jk-$TvtNk&EJ$lWbxa;UJ4wtzbE z7#SDqy7A#_gbfzT2WHN4^OuUCa+Js5Sf$%4#if;;>8x-q%_(e=KDx82f!6Nl-j=(` zwE!FZuvFj!t3pQOn(w|crtL0v$d^rvy}R0Dpn*4NpK73=g z*!p@%eP1JzPUVvI*h6LFJzmH~PnIap>!Yw*_T9b{#k+leyYEQw zl{D7s8ydY?URQd4dMMO?MlC)v|E(9zg6gL8c@Rfqa(Eq{3+(Vj5zT-9%RNhw2_e|B z%pSe3#Lmr)`4uefbFB3G(eam5N~iI$_~ok!>n3aV$u}>vtleCXz_g%*mtVHT#0uW) z<|^5fK^CAh-^&lWLammd9wZx5SiCS4j18Tl)e){44R?RU?tB78J!)WJuvjZJsf=8( z<$)#Xg1ptk<~s?K&$3H2)GA>r_iJpe+XKVk!K?IGVxh$oRac1-;S@UA5aE>an7arH zv0H^hHyG&U`Z?Hi{ekBueXO?_vkpQiu~tTBt)86kuJe!Y?L6A5Qw7l2MXpHY1i^8K3Drr_)EeH~T)lAWq5c%fjr4EZ^kFD>wj>Z@U_xrRtzkueB;QGwP|Gr#L*>a*^KdnP+3*vP zcxxJ2n$R7bc(2Nzh4%~ja5(8yX?AOww%mIow}VR(EbxehNb?*7NOJ|9z`kB{enFTM zPC&9K0L|2nYc-hna=Uggpl#n>hqWCe*wawOHkwE>?Q-0lm)}I-)AH8p-es2$s*|N3 zyKDV_KG9hRlk15*L{R5|$`FVQ-yeuA5vu6wuN}M6tK|LoV3AbPHKhs$vl5VqEM;1e zu_9SnV5jW!Sh>5JB1(}YTA26g*VCi!hQ)fRu9yy|ai_{%5DiLZvP#LRsMz=Ll zVEd#k?@@IHrp~DJx}vkRq%VhF*)N`SPeCZ>ePAJn6 zl^yDLQom~&|9sDPjFNwBWdqD&B)^!9ozl+`5nK?HjisqH+ zZQ02fW{3>F>o$7kUZ(0?-z^tV&-fx5 zlG(|!J;DMlqlY)|grfB%Vr4(kicQ$mk~G=0F^TQ-i8*}b9*q;tKz&^wA#vtze=L}D z;R$J>TC zgxSlz*Jb1vUT#$e;wmD`MZ~U&V0@gI)aG_@y_GS*p7hAc$@yRE(SH?qQv3BT(}CVo z`b+OY)(v`Q5YVS(+ZR9HSgGC;_f6n!B7f`}L@5}U8CRD)u=^4WE12keoigiMu$-R1 zo;)<_NSFund?tOd-^O9h6pRMx?jX(w4;@;BG38kE-Kr<&&7ZlyCIAeJ?`>Vg)nUm@ z;4Vq$unZx5tyih?doe+U(}T+DJyY)uB@L)RZCW@ly$>--8Dl=Bzc(``m*4u9Yr9Pq zWh1E(Kk6}Dy6~~~u$^hqv{r&74PVRy3F8&hU0v04S$6d$(Q5JBiC1j|gOf*l&?v>T zOjGu0+(j^2sM`9qL7}PF*A+hhlmHTfO5=%@TNr^Ec23_76Gz}!G$mYeYva|5#lx0ruc zUzfY8g^@7%P`=tlyI4F=C|;XvG<7N{=_ZYBus*az{}v1!Krkps&nfjJXPsN@2vheU zCfolm>(sM6AOiI~T%NEHFl~*{(~h@rs)swqdHmG2Z);9NIX|My(9uKm=RVh$ zn-Q2~<$0ev+5V`DGyk&b?7{Zx=)3OSt+|85L40F%vQsm-6#O;HaEk(oz}>+WDi z-5hc56f^lCY+l`j)I^bn1;8AXTpz%i_P`;ZIU2~vP zpM?(WbL*X8bJnS*{#amKi(i!?cvu=n%9(6QeFQXRU-|5V0%zCzV^_DUKt&Vp+UU+d ze^Sh2b>d0ERdek|kS*@6?Y2gE9g*A46hD7W?;%w9?;IFuIFJr|%o{3H?rRb?lvWV> zwR2FNdUsv1HH@+zz-PkWxe$QKE|YzsefI*VH3i((H#pm)SqY-r7W;X-quPrH8A+4T zX|4RWgNjt7+(q$|w{ef;E;`m+GVxLsaD7i$t7lTd{Gmx8RdP~83ANql`ST%v%?6(9 zXV-gqz#UiTYB7$w6ZW&zvpzvHp?YtOp##P^YB4=@HC4+vK>JpDvDatBw1l_B_hQe$u4(Qx_ z$MNO{B`V=ca+FrGpdw3poXUNnpox`8ro0=IcnN63D=Q;ne9!%-?s9Wq6bGahvM&&^ zs-~6#FFUtz5Z8Kbvbkc94X5hrJL*!%r&do1HVs zbHWQW4Uw3Vt)}`?Nj0X2pW&}`XVf>UVx!G;VZgxnX$52R_oJ+ej01bLvKe^p``_F) zr(ff3PkhJ6*@9t=(1UAYbhRbCe&G`r^V#acDwViqto&lqlj6bHwd4&fThoOX5ssP(oHY3w0XO{LrL7IlNH)-#~%Jq8H(D7CxsGXZK8IpVSLPlvG#W3Xb{N>AqmM{jM zZ6;#-8x&6NZf}3n_jyF4_NJsVy;l*NKk4aDv2>CVbP>3Atq948;oWh#13{s)O z$ed||@mAmMJb{+eS&h3}xZ#(xYAk$F3nykm47V(>g_=818Y~bjyVJ&xG~FLMaH}P7 zZiEUdnK3c7$b?ew82a!>m(B$X)wCISSWWY(gKgWH)zDz=UhNN6auzvb?FTX!9UX4% z%hIsAg*zSTw@JEpuK>bvTjhrv&+94)#u-{71ZjzQ##eC4FFqCV08C2BL;TMnA1M~o zjZXYR;cgK>n*s2}*eZ!jUR6^9k16}G z56jFJ5sqPSYZY9l^l{h3DoKP2Yp8b$U!)emN;k@XQ&M73Jh#Fq#iu_1$id<;pyhsd zB7lm3BeH*!Ua4ltFlS+aPxp*L=Rl<%Qm)LX2tTJ}_Ttuut?D@Sg`V*uLncl$kqiKv z0{gKgAq4$1?KFcz3N=%MzM~c1gQ?EO~fFDtX=PORyw*MyVx< z^bz&U>0}Y}FGpLq-MZmOu}}-|u+U|pCSbRU8sxKh4VB_Se(E0H`(cq>Gx8*%cf!@m{+9f@z zQPlH<`GzG?9kE%)>=(EnI%GZbd`FIm%qr$YflzNo@IsFddP`tcVdha!AYl^SOUgQZ zvV=G-Qk$cWTj>dzBQ6i3mMQEG5Je2RK+ zITAZf7gUTxl9L@U+x+qo8F!Rs0_&#$)gDqB{$A3Y+uXsDM4BobEJd%G{fDzx)IJ|; ztG{yb$88AWZ=KE1_5Xm?R%$ArV6m6Uv%D%J0`LVS{XGutbL|pjmM=Dc8b9v;@kkMp z?t!Y4rKTZrSx%0(W_b0w)5*6ompFC9Og^Wn#h1iLpQW}b_65&xk|IYdx`Q$oB-O|%_Lr|qd(8?GF&6d7+-} zTXwi(Fm=P7z?b|L$|itRZ^b(^x{C6ZAH(&anfSGmym}sGjKf;WH1@cMSkxrOU>)%% zH=6MWOOnrsCr_C@50*SKBWIP!-X0OnGe9>kZduITVhKIJT{%;d>0#)mo2LL&ZMjK% znXNx6nXW$rL$n0BH{UCAEjxx{&ucym#on-%-l=P`IAxMEh|d}Nr^H~miQA^0Ehcmz}GJ zvd@IP!a}y>K9G;+F?BE*B^4ic?p6XmNnvp6lgoWDNoGed$cLQsjeo6vIV3a*jNuHwDx+{HlUd32RiQqZzi>}cDmuwe z&DLNYCxmGUqNTF3H0~4h%Xtdq%&DUd>bRJ;W*^&(qOhiq zyZ>voNK6g?UMW~jQBHJwM7s^fN$_(}30S_b`3%!4q2H-$(G;5`5$A+VgpfUzRrDcP z6XB^@j$v=T16aF#zwl4&cjJ=f#G~q^7=Fl7te=A2)Lf=}7(Q$uG!glDLkjXRmFf7$ zx{z04<s{S1p>r)3tHuIwg`kLf?aW*8t2Y8RH|-OQO@_Kl=ZLq z^5E;uj@1kg1ImPl?CW}r$zg0@tY|f95RAT zxIP45ONFu>UddUpycHqXxvikw#X>3+uc1sCfc{$Sa0JI*nL*yq;V;*h#%Y1wo!R2C zvNI{#7(px6K&E>tbVU#rfAWo(RS{%b(XG;1Gi}~IR+-CcLu5&SeXEdpEYO3oIIdVK zcIkORL64##M|T3wgJYf1*VO6aG0MW|sO+^Z<%<+rY9Bjj@fAOp&?}y${;_OB6d@0T z2_m>2oSkZoGdTC;$H?r33k%Z%3!>O>$Y?|CMuRBRXBjD$PxsfB{&m~{zG{_UDdhIR zl9s&|EA66rJMp>zwWhfc8(F;s-QgLtrL1>wb@`>O1BSPRri6i4qUp)8!8NAhL&*!9 znwZ7wF1+|cu!j?}``r!O z78gII^pttZy6Fo=TCQ_licg)fL;F7DWJk}- zm)*W9Kb}f`mN|*^b7C9$m+CFk>XQTsP2x*KOf+DP1pJi3>6H74O6_c%i4u)UoCI6N z6L^7s99`&*)&@6@?sx)Bh-i>_;HyA!yeZ;@_lBr&0@0xaIl?QxF<5LW$NejK5mE<- zh=RxE$%_4NgPsm96-RicXpV(+P2kZ@U>T@xcPOC)$e2kL7kFE>?N zm~ht9OC+lRaTc4*szUx3u+&mr<=Do+0S><1W8;qXA6og})#eg+z&|2Q!?QWX`t zd=6PkN3RL?i>P_aX6$(Kh6}N_gwe7us3JGYWTRw;lC&b3QmQp&1xvhQzMAFV_dt9*=6y#jGCv9i_C6)Y<_=hw z!P95tc3?ohK(IXB2fQFXh>IEP#;yqf-C>6ZNEf_A4GrCPWEi5f%-iBr;WSACbJ_7g zpq-ymDqa@~az~+nmT{i=l{Jb7aWDX!Dww40OjI)ZP3z>L>UbOFXQnp-738T!StG*g zRG2j-ejYs8y7WtN!nU_nsRwnDR4iLAAI+)Bx&!^=m# zo??nBmLW^_n6-$GC7^esQ(hI*oPZ6-jo%g|{Lqe@y)CkqWrmqnCt2mViZp~QYObv5CDtnHPgm&zSbR02U zmme)Zl>d5{0Z}d^U{=YR3;Y!T-DDgTX~l!vfwyJCwf91kG$TG;{$qI z9iq2MqJx)DXs^cA>gS-$sTlU^*eu8Wdhnfv6pKG#&=jdszb$x^^F1`vtEZoB*k)WY z4ye1{UFZCWOoKC1i<_Y>{6z`}^xON;*k~syaswOmE#?hbP&R7j16WY?f$m@nz5ZTy z(b<9vk_)a0yX+;Ze+nnqD7q$0M^3a5huuL4@7FMMt_3IP+p%j@;q`wBs=BWb33;?y zdKic@DEs-{LIS971Tn2$52A6KqMH+F$FZQVCQJ@n^{_W73-+sM4K|fM9&{RUYI&v0 z46RXeRO2m%LlC;RE&OW}ggZ7umtWyEfCEtT8xQ(?73=rSd;smR4lqdsDpLmmk=#K* z1in*ej|?WQbg5F{g&d+;Ky#8q%^ba`C!XfNT$*LhwwwlC%#&Mzq1ZuIJoU*2v;`Jb zPk_diBdF+RoIX(s`@tUB4@1&a*Q{6*g>KQJw@=N5(6cu6@poaXNbht#Pa_e=lr~G4 zK|Kkzf|aS&OnE5NLK`$8UAN6Rk)F!Y4)46>N8ndasw?8rEN9h{?pwugQ&|z+7qv;q z&&>t=MMZ^_EI)LiXj1fUW`9%9&9J`rud7HLR)S>9`um(J=M%H9b?FT#Elw(~^Vrn0 z#t&H-|7}^PD3Xm88=mz^wG>ji0(mq95R6f{d3-InMjaTnXvbk-JY3Xua5N#-i@u|x z4tC1Lrs!Y)XnIPeEZaIR5wS_PNoC*j`(il!tAYb>RKEi0v)6b*M{wfg$#q$IqOhaK zIB69GZPxnJ@80a{q>9bY7!JO2S3{cHefThw&$woxY4rx4F#%E0sy!fVyjc0&loe~u zIRnKBs!p?lumZ0u;#6iFrdA|E4fQtK=G53Xj?(hi=1{)jsZ!RZ4!Dw_nfVy^9J9y; z!9lj9W8?T8m^F1!>nR?C={q}IC^$Jg+*u|~G_+3#q1yZ+D?pLgV=Wd}GKVeuDnUn1 zK<~_C*k1xSKypkr?S7dsjE=u+J`^p=oH%4WQZxz_5DTrjpMm6S_NgH^Ex%0sUTtq+ z{Vp~^4__4UBmPLw&LEyqIC_GJ0uLrh9Xr~`okoklewMhtE}morncTLszt<_RBhK+C zgfH{Q*vd3AbqcWj`A?pxWuSxn@DmjXPCD`OQ%Y-+qnmU&t<8h!I8csV8R!HiJ?}^W z@DRM?xYHdLqf{=01x)Ea+}`m=VS>a_DIUGkfBU?^^5MvKVq@hHfgh9}^E-?%^AKZ= zMHKwXd0Ax?iMs6>r(58mkhE;DslV{4I<`Fcj{~foY|@x(28i%i(+&9#6=WdgUBI6F zDQ^JS`rxBt$hNDl)@3O~HcTINRm^9979vv#X}9BlSiDfk!Qmf4KdOj^q7e)1~~P9Udf z^*aTwg?sr=pZ59^yD7J*+^fd4GKy|v|-L6Usy?6rl( z#Z);S!`vz88ea2lfuw>tL9#)7Ro0_672!c3!tWM~2J@6lJj_a+dyCm&;&nqBDm|n$ zaABb!Z?GFGSV`oEjDD>WcnJPVVEMzq8SuScM{uCxe_b92?;I}pHln{S@L((Sse?5Q zkf7h&_c)AGO$rBmWKr(8(c$aEp4$q4wYnf%?;cK1=ks5n>O=tYt2eR() zD@5}!ZTtb5MRCtax$}AG`@~|VHjeSOsZ!FBfF=7YfRzvuqoy>-`{LYo#nnkgtRECb z!Yh{$a`!KGj}7wu|Fr~w{6sM#Jurl9NzEJ{t2mrZdolu&xsih?0uVj5ox55AP&`MD z-kS$UFSPbIXkx}*)QKt_zbv?E_K=HlY^>IabK+>L3eKiD%|jOO>?MPbHH3t!WX7*hy|1++cqnSY&r>lN4rdf4V$a-tHhfZFmz3z8fSGGK0t ze!qgZHDT(bR-<(PF(FX5L}BJwtlW;Z0ZR~p@Iuc}>2+61PcV^H95H8;^#8QmxTOJ0 znPd52Q58L)OS7MbtLwm!|Qiyv$Hk7h0_+~d5rB*y$lNRdai4%HdXnyQ(ke}gBjV~qQ}9c|iup?>37Z;ifz*`5Q7f+POjWK-8bTK+nczbs z-Efr9_W;nQgOsiuN$Ii?x|vXBnT$EULKkI+|qEO{%{@ znt#0i2*D=+k=y@=D+(gFl}$N@2?*!zR2QlB(-y4OZaV_~R}Yzld|O-uWvUNM)xwK7 zSL37EIlPOueZZbo!H;8kSh4y|%h4bz8MEnltn*s}PR7Tyy;ESWBH@Dg*6cG(R^AMP zy*!%_Ow&@YO9Bhxz{2QOF1>S!P{suJK^M{ZK zwHpqmY+Q^MG5SGuRh2OrZ{mmqH2U{N9y{q`fsQha79t4tE#x7vV)FSphN~uyTv`kY ztEYJHtd0PVE)l3V_CunhM<62R_}UV~;5zXY>6rl8r}qEts|otQ*&3;etCiKA#K_jT zN0ft32Znae^Bn;an6ptSAw==S%!0y7Re^8Cp)V@G416Y99=hh3;t5k# zUDK8J>XiH~4=Kjmu1?hFdvAT2^fjMY5rgT*Z(=!>_u$Q^0c%)HObiG!=XV^xgU112 z;FtrXen@-?cwDXqH~juc$)`Mjs=+Pr-`x+unn6BIsqtXR@UR2U5Hc2v=iu0dvv{HF zVQ``GGo!L^d#U$G`M*2^cfs=+zKeX=NV_Y8;Uq(7XDB*Py3b7SEEXCVy|U9~5Vt!E z{ki9W&qrNIitjJYyYRHI&~fLw8donehZsV+XeQ|k+ECR_RfLIPK*_uPw=)^J_F{T8 zUH;iFO8Auz^$BaIOP#k9F-)bq<$52;(Fb1@ROA((tC6Jn1vbkd{Bka(E5v}*(NCk5 zQtYZh0A8Izb(lvW{Txx6(5y}H*j!yyG| zhkGV40R(kGz!Ez+WB-z)Ajm{k@q-BPuTdP_mkfv%1FDgT6(j2fRXcOH)dI$0Q3Tip zWk@ECxHs#|%Zw>S1E-0wI&WvP4W9fP+oh@f1cTCv+^HRsm8W(&QR zlTEj-=s=@v)VL!pSK5srFNAZKjro$OA_MJ}P${E)_P4N%mWmL0bCdu0%>jB7d2|TC{;D&Vsoz$JtFPX{Pze#xfbIBzw&|JNC(r-o8NdA>JR`!Vf;@TS1JbfkN*qvn6e8P$=K9;5 zfc-slh}KAB@~i*2Fi-u{g-HqWF7j>DwCjwC<4C#8W$rq_fs}h-pNAxY zwEeN!8qmYounrndbX%WtYHA5z1NOPEA(XTa3Am@trdndWZqGnZZZA?3+8F)bQ zfi(SG`rG9y$cbd>G3EdGBC}i!tz$mTy8d=B)Na3KI zAeA?SG+W>Hw86RQKgvl1GB-VR-=sGX;JQ+fnxQx15+!ju0i*e!-tLup+~D0k0fX!PPd`MNq1%HLF<9trrTbrYe*Zt-`5+j=&QH>A*g-k^ z5(|byLffo;u%{pcpX)RVb7*B0z}jlqAji{+V{pAVb$vTR7w+xNy>rqa9}42&Ay?r$ zh(G?v`b2_1%Vmj|Ux$C{ZjRgO7WD6(`Qh39@W0Sw#NKDi0xYZ5RAr&YQ;Fxz{2@g82EEs#e)g0)%O_kG~CkT(-5SXx>tRgOx;p|)eI z51){S_$}Z++*$8q*>&VTUp`ANci)8FG(-QtiTPsKUp1B45IM0Uq8$`BWirah5WT|& zy*er2(NSI1+$=QW5@0C8zz_zy2rPd$V`ntL@`=1%* zP(>S+IrMFwJe)|_EAYP@W-q7Sz)0YAS?}G;E@!Wc@5`_4WW&hI|KEDeb;jc3e=Zor zGz8(->4NME^0v0Nl;<$NBQocp0(2_4PGZ8>FHMD2+gd|ZgY@z9@)>%Q+7l;E=)1a> z=0h<%k#N=`S%Q{uwGxul{}d>|OOKP=k6!r*{2S<*?7&S8m7GtShp*m$wK-!4DPd6UFJ%T9 z{W4EL_=7?9xUQl5?6b~H*rE9~H8n7NArhPWNRZbY+w<~TXOr-a2?g>NCE697r`N?M zWN!(f2g+Xofhs8+O1-O<#qkZhKOibLO6!WZraibQbpctH5`zand?AWP-~vM0LsB^; zrGwXg4#K54RVLt{K|ggC(1rgc{Glw_qWu%0dvL(mzhLFNw9BB8uW+SWm0ZMslfls& zKsmDKx9pk%_(!bcMV$TgtOSl^S+kw^1(E(KU44Be|48?cqn( z4yeQkaEY5o(Or2Pd1CWZ^p+@e+%mjjK@}%Oh_u*Ji?yF0-&?#+!|-#9fg{W+o2mEK%kFCo!7i8_*fH5^NPhE`&3kr_k4k3IeI&A+$seR-Dw z5o6BJA)}RuEe`U(A9ePCF&oO1*36i*n*DaBVX7IQ+!r&#hIsYTC=DU7Z+G*Qs zPR)NQFWLzQBPB&auxVFG)>MCWeD{JL>RrDdjxYv{p8}U_z>O1k*6p`M*8dvNZ>hg! zVJJ@`Y5yIUhvUnz2MU?-(;5_Xbx~8EdH&iWhKZ8b`+4_j8fU(>Fa`qI6T|t0)iw+g1 zOC87G8^|eXSPkusxIHglJ0#|_bltY`$Yq=TZ}J6VdU)Elx18$1$jRBuZ3)_<@3>-j zy+s_1UehMN-QjSkPTJ|!)u#bBvzFl92J|t!$~W5CrXrr$5b=M^Uo#y^y#_Pmj{589 z|9$l>mZT`2pwT+_*`F>mR`QGW^(}U^?7w2dz|ld(vW#tJXGL+ zBSJtZxNQIQT=2M0AaucgorqUHa>0GXIh9`C)n&5P0!lS#_A}Rkr>Eq@a6O@_l5m)I zT+MThNn`xZQOg(+o47xh@(?WL3yz)#OBqulpLcj_oae-$gE@9GjYqD2egmPI*+XfyTqs7Xk->T=2lg@h@45di6NbNRRyJSxW%-d5vsZYb zR!IDhIkzQU+q#~;zORn=I2_~%vxSjS;z&_00d&i$;gF-mO=X2O`!}J{Dp$X`tjCT> z(XRc8N$~B0&6YBu*jGF?F~V8x-COLIP0ZaziPjq(CF~&{2Q`xtFN&kDkwE|lUe-?9OQjS zxektX;JBx{n31Mm?X|-FVfq7BHqs*j*?=lsy$ICEb*YK)Oh{5turzaai{D1YJR}XfSJ2ns+u6OZI>o24^ zoM`%+Bs}Cf51d|jId+H7Y`+o;Yx3lU#~={$cJ#FW1Zb&O;x#cZ%)elK1@xlD{(6N# z5BqA>m@qEffz9U?HxbO!?+fHaj9MJ?GlU#cjHN#e6wYdgYY^f~(WL0F{s2=Z`<hIsQ;VQlK8mLkQ8qzq zcmcl2d!}Q8KLs^kmS8h=IgSJ4q^HcO6^J!OG^&0tM@Z24M$4%Kd2VdFmd({XK&&v0B~*FjgidEZ zlINQ%KSSWVF%d8QMVt6-tRU7&Rk;Yzne%&oaAW}{BZBbiw(72%#dk7fx~(N@DL23~ z%B9_Es0Gb^1THJ_;C7SO-LO`(UVV~Yb63ONF)R$ijAlw0>y z6UZye4C<6h^g>_-q{(B_bXj)GP7(9ZN0?t7YP%9CO4(pp`c+{)o@a$l#x!t8gd-w<6f+U7&1LLoUa&J9vg?j{#M1;Dqdy)*|3(9>Ksfmsp(&2r z+CG(*t+7)4bJ32eOu=CBYw>2fnpks$r-feK4(RWNn{uEuay)~W`em6hq69kW4H_O= zWaZ93swCS!lT@4PP?6`k=b17;UR!JE7a;Zx0Wy3yjs6bN&uL|ifP)(j#`J7WuAD(^ zSi->}_X&$`P(?s%@Z^yW%m*NwfL&TqK`ymNp=cW~qS&0BGklMfm4mT_spO#@weY6Q zPrLKLJ`;Od_~8jUQ8elu9w!WjGUp0%`QlMTFE;JDS?Fln_p|JKLGS8X?tts0)(Me0 zD70QGwg^4UEgko<$(X>urzD2`EOmQvf*sNKnM!-x4BxFS@Ts~;E!~@3i8|CSS=|YR zKUfz4K?9q_mDN;%?FSd}TH;_?x}*XV>g_%9X~C@aA4%Cn4Jneny~;H;5_p$#kGpm<9*IV>1nr**tkhzn`X z;=5dF=;#%~s6Rqh0X}au{@UFS0o&ph^?K0~Z!@)k@pXw;1h^&TYj8>|8WU27v#LMn zCy!e-RWHw8QL>%-+kS!h49I>VIdth{UB&4;rzLzj=dRFmm5sWkI+A;(&`G)EZp)_m ze0}T1_YO**eNg4;gYgO~$|z8L-Xe~iL`(Qybi}RVKfb)SsG5rOd;|g>E&=3+gRkle zQJ%3NpJf&qF82nLUWO9tEqMtK&+EnMH;-b^PRTJpUg+n&dAwt?D1c`fxLE?#pG78) zqIo_fanAV3aNgi5U4JjFqmwBVR%ao~q0nQCQj`>IJq>@j*Z-wzN1@5Nv-JHw0{hQ% z|Ltnj6bZgE6i83{*cyj5yC_pubH!gkM{ZYrv-uRMYNBs%rQVs&Gk?7N?siPL<29*t z@~8px6G>w{A@L&tLL7;jG;7P{r6Q#~1YDtW=Y}mTy0k0lva6~_4CERoa`w4_8${5? zeTT`cnDOQ@LrhqycsQYXbh?@{YHSfegR&aSQqKp`f94LNaN>@Py*N1d6%jFdanAe} z)05rHktd#X(VTuF8Sze_cBJ&%2z3#^=8hbkc2B^|& zg@&W4f+BB#B?EIiAYFiRRp2jupN+>VUMoVF;HwQDYxe8Rn9%g2-LLE!#AX`H`nvnp z`|gc@8SJAbP0o_JAiw-uOeVfTAD(1c0jh_4SGc%Y3s&E7hhV<)3WYAp|xaZo)!(6@J$A@WLw}@jq$+V2M2QDfoCM; zmz*|vf|p~fUmM>~O}CkHY*~@3TZ0imjn7?YUj=^ku1!RS5EN<)hsDEvIWKk^a;@v8 z?-K47`^o)z9l&7aB%X8d`y`}DqJB&yKo&;#`)!k0q0Rs?isqH0QCSOTfhn6S){k6;bYg^+zCs02ubm1L|o zr;|T@M=S)($tqCr{_^5X1Utj;GbWObV@U;+VB~9c(JC}%tU#^B`TH62(sFK0c+HkcnvLNc_fVY}GO z)?pQo%ztYml4IVipvA|Eggc%YC!Dka#m;eT^$o33yg;ir+J9UWY>`CmOxY_6wyg&e z;(>w!`&Op1%m+E`bxC}$-#X0HR3Qli(LqAVUv01vRdf{y2YXgUfBG=d*Q)NVpbvO>xAPX^fyii_8L$qMUs*kN_^6{y=S&P zcnTrwZLq1NRam<{j%`D;$+G|94uhY=)QqlmDf@Jy&_=QxfZ?CbkT_03WWepl~d&tGl&AV_7R6@Z!w)+icCmjWBh=kO{{q*Hhs%Z2U=g z(gN0l& z)^3sRKN1@Pz+bDWhv%)ihy$B^Z8?)X&Y^lrt@;xM*3RhLW{?IsD+0Jjk z4=1?G;pCj)C8UD)?{EJloywsb_yn`}x!F0GGx>;)w||-U&Pjnz))prF+WdZmwyU=g zauG6NgjQ8(Tz1+2ak9b-5->gPZH-@! zSo27V#Vw>z274oBy#~gApU+8mdbRVJ!sPtGqqy^@M!oLs>^T0Mt>Co%yOQb(><+<6 z@NB&Ytgsqjg_Yx3r&rkzpF?_-O!DqM%{?Rh_JV6z`roPE+kZG<_zJmS9^F%Vg;31I0Z zHzL$$sVL>UIIHcCEq#R5=hR|okGi<}nbB8Cqj87cpn>e|3k$Ax*vs=DU*;4V3o2=l zaHQ6~cN$wE#B@b#itg9Yo_cOnh7~wFT;W=9axks8nD7CE^HXsDUvM1E_w)TIR&!Hh zgJt#U;@I(b1G~>XKgYP645*O*{nU?rNjF`Q`uwXbLtTS&o)E;xSPDhG- z-&NE_NH>nlJL!7;i;4mHeC@I?f57&)s^bN|D|kv8E_J*Dr2~712Mv%(Ks$C36X0B67d>`Je4nWi-FwNOszwSCVQmMf^Kji>jS(rN} zwq;J7%YVxK#W3JZ*yA0^>IyiD>W|Ww2xe@?pU6I&aijZ|{y^G96WUU#LE3bv>u5Q4 zrNWs(@%g~+Xxhy-9qy^a#{7&!XC& za}sq-b|ev8iX<-qA$Yh z!_0GksE%~L-{<@Mp6B`NoY$*j=Dx4{y586OTHd4!xRWDh3-61bIGc3`Lx(j%c9i1ERI zRSG;ZQqwOJN1CQr`i`CW^SaiOh>++hp#C49AJt@qM5>oJoeI12YWu-AjeZqp(rmP@ z?w#fe{vH@1Ep;}qt!TLSilC%AL^I<Hq4-IH;S98De7$o5K= zoB~-n0qd&cJd(l|DenPB?&sIi;SVpucRhP=a^;KJ-iw#-g3qwKuLVqsUTTepVmxOZXAB2g#DVLaL2 z6n9%&nO^zl6`XpmQn(NtJans~8iR7=V5GVWm1RIWkMV_t-|L&tJG~_kl;Po0!8PX*exmvV=-U1K}19CLO8#Op)0gEe18*jRQ!{m@{V zdZ5Sf09Y^#8ue133curg)bZEu==@K=#b8tgd$ef43+hHYOm zoEefOUj)5Q@%E$1j<$0Oo#4IKAv5<4j4lAV+qYWm_Ajynt)Y#1EY>xPw)@ZQQ~gPe zfdlXRf!v)yFWFAA66;m^1$BvFc3EGrm7bs#d2$j?1=}XuYsaGfqT0oVG{NVG2Y^o> z44T(LGw^r!@W9W#?`%W00uH0R@7UCE9|ug>Vjk_@edJWvx#h7`j%bdC0NT}J!U#BS z()|p%qTy1f(H_2qO#kh_bB{b~k`JviYX{DxYX9X|#k_w4Gs)50M%E%-u0ExI`J@q1 z-OSDxGk4)9qz58McXc-bi7{x!5P=L-&CbXzd*PSoFS8{*u~o`&gcfFY;h5VW@6gn= zXnGA4-oR;h=3gx}yL|a2DEEOHL9B5u{7E2>m*w9Y0f@oX9RR5rHM>m)dVcipLJGXr z)8QOn@JG{HxD>tK8RBoKZtDDWSuay{Sp_E2AtrR9#Cjb6-G|j!2#_=aU>>^kxcL3& zV8;>fNc6>vGGJpgK5YM)E^bSpCb7b(0f#;Yuszg<1T@=973Q`84LrSFsv(=Y(*R=K z6x5%5Sc@+`2{nIh)+yiBp1mlrO{{YOs0mG+kyw${RT%)2nnn|`_R1kR!6X6lHy)bO4V?z;^|&81S8 z$RhE2s5ir%de;XHQX7iEou&r}D!cH~;(vLg;kgM<4#*@~@yUdw4G|k&waX0o&nc`i@>>lz)u?ecpveJ-hLkBmMWhE8m{pimz$Md#XcDO& z4wvXD)hA+a<51wmyT-61j{!%yX%#+fd0?avKdlXkRX3LbU_R<#6Y5 zAwxA^xyX#QCOmX^IuF&9C`*s}E{x>)BCw>M^@J_20%H2YeOm~M`Me_}r z4w805I-eLU`Li=CY09sleIYf0bdv&>irt9asnbxc35?>Z@Z>MQaY4cY%pEk&?wv@= zaq2796BM|B0R-C66n?pa)LRg~-06NXzb?mj=hSHNRGClEBiz{y>%d?%=TIh|G!3Lb zIfE$zs_NbqJ7@ayy+O62gOM7fynGr3srLdcKs%5(B>V0~Zg@pD`ZdG^-W`9JFRed@VUDU{E@Y?&K1L(%b}b_mZy<_%cT~f&TYe!Ua;cuuKVj zz&n790~GTDTT>_dW4FDAWuoQcQ9E+^E;#O|EwdSKTypxyq8Qgjwp`nTjm z6u>gOT;f&-MlXTc+_cP0t!NcU7QnX&Pzd$q$*^0BWbs;8vRUZGoQaIfe8rg?nlY!^ z{{$F79Iv!_#4_qd30Zv2Q2$G1*v+?mWTA~*OeTzTbz%r*Qz}p z+gej904(90C<5uTFzwTr?AriDX`h?uz6^hpqC+Btr)1IXi;T(3=YOeSg3UoqEAzVF z1uI5?sJj(mxxz%H>>txX} zG0~KK2=%zFRN2-*2~qg2&9!v?NR=Dh1?|cpS#DgBj$a}i7j5#I4r$jOIuDhf*#llf z%3%O38(RRWWcWqr7BG%`-IKg*TV{h^NdT?Rhi4ehYP-4=3<8w)NOz<-(|^x8&;a!8 zSKKS}o;e*DaIeb!^DIEGEaT>ef#%6&AXnA_=`n*no?t1{6L6G<%)4IOfRUY*IlDtn zKTumPfAb;IEbw6h)ricJ#bdvesE>xR>3v((?D(mmW;cn>I${el@!+`-pikQQbw7|E zdI0fs|Fp*J21GPSMUExD6%`%by!Fk?gcJuqug$v8^@3u)JXhrms015meWhCW0Qg4$ zEAtaMhrp2AAE6AWo;{faF6%ePKT`Yvz~wX#en2!jBXsXfLoQD*!*zGr8pL)te=Y5S9*5S)fh(E~fq z48Q_$eS2vf$G1d~w@(N_ldum4Px>ahW@FmVt*yFegRRb>K$Esh71jk&er6jD+fHAO z@{`juHy7dAt}U#Fjp^%^fa4f<4!&Z!#*_kRT&6%UG<8~1)MqRjLR z@VwRrkJ~Rw#laMysZWKsCU+Dp#_{n#>crF@9S_L$DyH$!-9TeJtrzR!yj&9MFIJ?w zWOjMoeBq!Kqo?O6%)7br6vx53v&L6v=lr$Pfb8a(C$Unm7T;Ba<%TI@WkxAiP~@|p z+!E%Mw*JZ>fGxUB_AJJo9bY{)M7&$)QLo$tccIeP5F7-`HmQO_d5~`h!buKBE1z>s z4-Jqn25qQmyTDjD8)5g8vwG5%T$*)}!&~6HOCy*w-mau#AbdyvOU@P3!2f{T>n+hJ zvFy&SJ?QbW?g{(m2QCVR81Y+gy=;0fL7Qj+H1yC1Zk2dsS(Ru7UO#H3sRdlp$EEI* zcI3_sl=X)PK}q_Lu*Z+BC(s)B(W7`ASvbIXcZg}?7HL4+7>J$bp8eV*=cWtLeIwWl z-QlBe*4~$bY2NS)Z-Vs(b0z&F3_5%ud?cIy-8bNnD4Xvsuae1P2>=TE0uPt;$&gI;r9A1Z7i*zd#~uxzwlla zER|NA2gm-lYXi#gU9Qh)$sM|St^gE+?QVA8@ZtNcg-n34t`4-!bK01Dx%y?X6x|vW zaes^zIKS@8ziHqPTEwG$8Cl7sGk(EVm)s(-7wNR8$PP$SJfQ~r8d}x~rItXs=*HwZ z3@tu=dl|zgD)+BN8Nd^?kiFs)dcO&uBv&U|&G;JZ`xQ9hl%n;XyU?u4DI$09JYqmUNOdI%@C8cb)$wAPusOyyn8$5u2^) zW^P^82I1k~`RJZ;#=sX1jHqq<16&Mks9?EU__&<|=nMx*!55a_lhc4q^fWjNP%F^K zIz6ILu;ComGIm5~hF@%l6Jv#iMrx3dpWr4$G}zhz$nM5K=w)fh$b(s03^31gcJVi% zH7<~mfVCiy3^ee|cBw7Om2+P+WEdC)9*6i9@76=alndXLyR!Y>x4X8CwqQ_orNtPk zEUsbTpy~0w%&8Z#a!XlWLOt$6Qd3s)gR>grK%)Ru`10&NUrKM}zt>+E4&OyxDZ!?q z4&OPtk9kfst$)Vq71&QV4!~?GqO9kzk5l{aEnzE@)YYn7l5?n0cQ2W7Nd0{9aFEW5 zmeIYoj!H3Q-Flqpm2{boRG`2syx`Oh-o>vxp$ck)RFQ~;6Nikq#Ji~ zpMEF_t_CfW6oO?fs)158r{51*-J1e=&4R(5zdA-*SXz$&S|LwwWi!VGBO@cAk(qbq z)KNx8$uPN?x$Z5FQycm=p{Ej0E1ko@4l&hl$_?ny5o{&cov89anauojBqMa=7ei_`%cCXPJLQ5!W`xkEO{T!uOd zV0Qfz*9YdZDMJKE0$m8kVW)6z)vp1=(Kz^Ijp0~5fEpAtt}cA<+{fU{bXFGhTz2NE zld88-_}=6NsQ$OtvofAnt!meHrr+!G4j$5Du4!8IxbIP7nys^vo-;f86}yagCbaYu zM#tTHt|Da&7nvc@68>c6M9`A&aZ36h{G^OWPULP9~3Ho+(QDxnkfMBUXZMcIdej4N1HKV59VBBbqP6ymK#QP^Osk=CKAK+qnE} zpDG^uM~RUhJ@2G_r5xR)?7SE1aHAZLc|hI%=X=sPwBwIsd2)AL1C%;l;x#UxOF5px zp@S0tG6GtfpLWLn*SG_8J9Tnc)KQxb3UaDD0!TVxMFsL^IwsCaLfCt} z9%0K^ckbFM%2|OZdD<%@*6ZfV)w7vEo;2U2+07y`oZ#NBwr{ZghwF~uteQ9{LgFm? zf&yDte>9qaxHpQ#fI${;R5d+GvG85aGo-m9zEmJ+lLr5$5W$(a7y7cTU`E|3ab5!& zVO-f`01Ve3AmwXdj)vU93YF7$jc=svNR2zC0`n(5yL?=j;{qUqqt_&k!0@2Q0q{aE#mc zP~N}LFa&so0%iyqJzdg z5Vk6B2EwM%QxAjIqr*LlP|~YUZ)DH2>%&&X3xQQ4KMO9u+`WWDa5IZe(T!r`g>N>X zJpchd*`B1gFLGDcV1Ne7ZX2C)1sgETK^lLXJzk`ONZIhsx19~`^YUx}xBS_qiS->h z(3XtHXCpa>GCozngHO`0Loo2j@ijLGrgFMIwOq~ui0wfOdUcOHFu|z3t^0pA(SW;H zf@XSvZigZ?k4_GKhEc`d_Tukg6&f|jc9RHPk8;#q^h9XqCrj|OOdC8hez`qyY_^IJ zy8RzQa^-{FYfpOek2eJhnsI;FC#O4@yUxhOe3kCVca(>D8h@P1FiFp%&!Y#3kJfQu z(0-FdL$F>Z0PNwfMI1{zoDNjTpjO;xu4`OQ8;iuivJ8k@&es`jQ`dZFPWM*sHQ(6^ zBmp0feBk!e#41b&n=*z-I@nFq1m#ukNs1bZDFyHaZRUXDlIQDle+Hivjf!nz)8QM$L&eLQ^J1p zl+A?_w81Vr&^bLHb`bVGF4PCM_xG>?VUH2LL_;VF?A<3iCjzV>uzJOTJXhH+Ft;;sbCc@w_+<|c|Y&{#y@+Y-CF zfdlNGX;&I#8kSJwbqydcGFMbu;~!%sxBrf~!L<8)rR+Yal>O_5<}0)JAKlVK0VTec)~fPhP?01wKIdGa)c3R>bz{M-++@amv$Y)nw!Lw)e4Wv!d1`B_c@-$2{blaq4;Du$E zr^GJi3Ng8feaG7reh11)oPXbUKGu|~4t9F{x~LB~gloVzW9g@WE}m;^q1`D9&0qZj zfB@#u-kPloVsaT)zT5&m=l@Kx^5_XLP`!)byFpod##Veif_W5fpNZ`tFY(AtWULOD z{ryj)41l-I`wZ|$lr%L9XP!q_H3u?dG4yNGMHM062-Qd?+oL+m0E4f;xew4=)I0Dt zp?ec6U1OQl;og4WJ>CFOCaGi(*Av~1hOMiF0@JiQl{^shcnI{nf=T7Cki&S$QftBO z|FrT;23T_Q`;%KU%?42X4d5!Mek7+>JU26qu#)z)ioUxUiB2s3vRUZ9U?2Y^eX)zY#yz`@*RMUyXDSz!M|E+6>`!)~w!AGS;P%Rckkr z>%FcEOUn1d4K1*m`%BC*Whf^GoAW4ObA#0>_CZ6I8^Ynr{l$OW`kpzb^CoL9zD_u9 zr$x}!c_z6n8wp=h=9mcwK5aekTD~pdFI(|@>*2hh7u1sBG&abm=iAo26jrp^q6dE!!_o0ppGnm7+jF% z@9yZ-Ef}`eyO|b0V#;qGZS}{~m59-;BY$Qb(OU#S2~2wyLP+4R_t}x&L5?fmnHUHU zu4G!4SsChGN%7XSy@PmZ^cv`XP6qt_9!w+Z0|}67?8zF3Rz*SHz2|}b-+}XQ@>!W` zly5!g{Xk`fDE=qweYA6e`vxfa>Vs|ysSM-1nXp3~kcd{^tx49^fr)?O$F%N4pdPz1 z99V&#$h;6o#W#a~ejueqe5%HWKF5P!7Igi7@Il)9oy1pc-nnVxRRl-Df46}T&SE=B zib%Zj(&bX=x<9-#cmg5B>>W+1GXQ{s8S@)25ifx=uoPe9?fGITj4s84%E7(e6lkT% zO8&33mN)}nPJZ9G3s?>PCb@Dy1EhMJX44!e$jE)8d_x&lyu4JuIPTf~9t7455MTc<(yLnex7PGm_Za0?GT19hlUObQ-5zWGpZfBYtIYgv z(1Q9q5&0axi?b<}>5F>|-x{AT!R^1hoOtSg`1;?R<(N7(zY1;;c8E{{8KdGu0aLdw zYrCq4yS*+lb@IOwt3+iQ@#LZckwO;ls~-0Xv%^^Dsa@* zHq2KZuEd|89fZI!YG2Bkz&w(yCM4W`6f^K1e85Cv5IlIvnu0(?-ChG4O=U|)7Bg)S zJ((Eee2Y*I$3JH>!TUeUfE`>~!H368p#=8nSHl1XpW4O2<*D_tY|XeLh-F>ruR(8D zpI#8JqBnV$>UK3^KL$u)KJhVd#Fr_ya1ZE%CKYI>&O#I@ubzAR?vJeHleqiW^Y~nj z$KCgdRd`NNM}~Ivu1;JIB&OK8?u7YvA7Brp;HO0Mux>mMdIH)gS45h|W46h@Y7=-;eC%QZQAV zugaw$XeDJqDYfz5=9rn95>Bj`SQJe(R5>b(u(#~N9c`@MOdIw;-QMEdtXR8K$>gvX z&AFKvHj6wM_-`+F=Dn=G6F!^6)TTb>@$b)9ncM%{JP)$MH9w&GHn}8$Gx3BfeRYLS z;nQxLwLKqFzE)WdKrk5nUhpq7O{SweW$@m)f%boq?~~arb{fV%By)5gd6iX(WSP{J zaPzD%-dnoK@sgovBq%dpXRL!p{XwPX)uzyD)O;VFytLRl_7?USD+ZX_wWgaLYOU^P zhJ5b`@wwG_%Tv=sZ5@%*lFy0M>&9W-Kq#r&{{1P!gtHlzPYTo#$cs$`w{BY@(hd48 ze4hGsPrNJ#skn4;qzx`|B{{||oG`+ufXLF!(m6>&JEP!zY1s6hk3PxspDneg%YY)b z-VQWb0H>%ToN1CwQItB(FibN5Ju3pP}s znEQ1c@@9%ww?@1*6Sai@jyq^xEAe5RSN?JuKeCsNNZblCZ?QHsvVL>o%I3CJ+}Hr2 zk1LihE7M%*d$P2-{>{hq;%;e$0bjH9JH{F=3%AT}xFLLaaCvz>=J*}vi|BqH<2U5E zM{J@xtE!KA0FC)MRqrb4rw=5P@4W#V07(;Fx}~+-x}8xdj8j|wk@Zx;r@K5(Z{MFk zsB%J^0#z8+#@xqI!e_Ae1@63VjwqiJKi;{di}&xtCfZBfY5uOXmEt|t4GI%~rQ4Ud zOeUf4^I+ZjTn+He81C+|6Z&Sed1W@|t2er~RPa<2N)_ZsU2(Y{6;4;7l{p@W{UF%2 z%7=L>X9eT4zhWNkv%@dc~E#Oy|SMI8$j zPXz7V%IX{DKtWHwik>|{ zMXSob{17I~4Fx}lwdC%Iug`Qc_logITj}4XdXEu98p$RfEmL0qn}I&vqKSh>HLH%E z0Mi}6@0)n~HRf1}z4IOCwD8b&Sd>`_F{w-Xh5aANoWd!b>P?U}HA8C7F^B5vftT}$-q=Bhvuoq^4D z;)uUC%h!4=P0->Aa32gjzJg_?JGy(cZcXsMIn zW<%R|Av%Z@rB^}dh|H&_7pb(bL3bYg-BL{A9SD@!7cb&!=3Zl@*x{d5nh`DxBNjU? zj-xkE&D?Kx^P%6P1WwB+41Z>;qGmhyzF)5Ih*{hrcQbFOe#bcJ>!2#OAOdz^jN%>a zxF^@K$1QDHOm38p1)EeBm69Vp;dE5Rlgn%iHs1a=b+VH0g!ScVK`Gxy zz^nXz`UF29NmVUa3hj0JFR5&jyzt=j!#Kr4g`6#VcBY7pGqu4FeACs#&*V2Q~CDk)d zv>rO-1~)(-HK)&yYLwZ>de8PrPvd<9gRx5=4^$B_dnJtSgEnUwljm75%(ZkmVkOKU z#}#aPMzv)T*}tDw<{=WfROgM$V<2Ma3L_gk2snc@+gx_4&(vmIoa=nifDIa2VIdSx z^qAY)R*ifb6Rnedx^yck2^pkYMTJWEcoKjuP@*1sqKMImK>YMFnk`|1I(E zb(w9Hp_a*P>gHlk(VPt4rNfx>WYJBNV#UxSul;Y)odWWIpd*i^xQQL>z^KpD_)wj)1u)_^^5sD+D!)c_(s-v^(;jV_F8SrsE>ac@CQ6 z_b@(V-J^_bSl}EWEqd3g$X6(S{~ljO*r7t&eAPf^B<~uUV`py@RG&h$DgOmIo7^-wYK3@YjLg5kc+`o5% z#5*l^)0-Sxg;WcS9ViwRHkt>kM~X6^Y#-|a;1rfbpAX-rzB1ppiXrw*R6U|)1izc( zF#*$A>o!>eg237xKnL*Olb~R=h9WR@!`%`-{672h&QfS8;cq@-?Wh1Cn4>@yyxNnc zkd`EoJ-J);yk>+z5PQ#cpqm|y3v~c%b+dGWPXE@6gclObo_rD?Fe_0;~z{1C6W1$$`qHIU4 zv%jC_6i)7|VZ#onkznunHN>_cv?_6BPbh@JKunuM!M+UlOtJUy92DfbtHo3VB<1zb zobfd-adUieuY z$2gn%QPw2?c)}KycTb5I_XQ=(2p%-*obC0mqE?1jBc4z$l?v(RIDG5oo7;K)^U=XV zHm&wLwJ~SG?XkkqCp-f^@Mkfi99iVM3G=V1L;^XIP;5#Uef_tW(jPQz==yon72-9m zH_NpW0IHth)u+2k`~wQ&n@j_H3aP1x|I5SrJM}Weaoxo^1D>;rc&*4 zecepHbuFuYZb+39BPdwkBuliI zE_-X$_1#^04-xP)2Vb57IdV6s231jWUbfWbyzs*Ve0I#7c&r;{gQcF;TfP}p$E7NX)xjZz-PxGqUuZm0-uTZ9PM#1!u=6HMuRt6{hC17`mmqJ zEAjN;_zsgi&nnNpmICEJ4=fTKSViLbj&C0ls_|d9;569KhwFXXg^a#(CK60kUxr#| zvGNJ^M>yt86mdLwOvaOXd@>0kk-}j0_n)ce{|^t%N@(a62G92J*nLf=x}8V0nBU&! zVnlFXq2r)e6PcP+`(w8&xk69Z~o>nKK#@fdt> z@&^2lS3!diZAsOCzpUHig|78&b+&->iaAEMkG-MwttNjxDldXTxKJhlRB?2UtW6e= zCW@~~7OjN9EOg2+i(l|W1>@G%F@Bpk&m_>k+gT)Ebw->K5B5dQBAe$B(r+AD)`v_N z1;7<$jwk^ykHT5mGs;eU7tJK_(gt-0TrY!{v8Fh1{;9CDLk&$Jl@vXRwe9Q_iQNdUt;ksf9sja}>Zb%g3! z;zbN!?i~B9vDC+>MeTKUy!2Nx?zI63bAOH1@~H8sV&aKGKHBJ|J4qmS}$E0N#u3X*C*1I2M(zSwo2&k zmI{*+iwqyj7U<#7}owkK*oovcnyG{Hu?BvogL<%WlM~<% zggc=>U{9xUPdhg7V-At&kqdYtPnJIu5&Gh*U=9g-`>?_85(rpZA%^VT+oOnAe@V zmwHe>)OM#EXHA9KB2~OZd(b7;d&=%5?5}{%1$?jLK?;j8V{OUWX)KCow;e6Ra%^ni z@31C|a91ORc2 z$ug;N-~4-KDq0cg)>>=30|E9gYWP_4&KTtCVDhdu@bg?{w7n3iBC!$d8OGj7mPv&xz4lcX< zQ|$0~pYg6DeH?~1kM+#zacR34dN%H(J!&s1dm{fPt5;Urr+_dVP?M&j;q*;AXMB=6 zEsV6|rtd{8byK^1mh&ahD5o;Aq%+LK){X1g&W(@!eZ-HsqIPrCgBy)YH*e`GxUm;} z)3o|2`*_d4H8zy6_8oVnV=7|K620hfrW3w;5q+zF;7Gupdyx5Q2R&#-o&Fiphnrxg8Rf4$_EckLMbEhW%bqLJ*Y9u&Ud*VL;3V#(C4cR+nBWObD{Gst zlY3%Y<=!l#n~V+V-e&(129xe_nC)uEgTYoy|9`wOHep$z1h1?!qq*oS49(%plOh6u(%s+{@Dd^2XcY=0Hehg>qWoZP0;-Q6AHqebu#KQx0g zVFtIi0UliA$0;ZP%yY0Yj2^YQ=NJ){+bAs=VYZ0Oby-vnu^$K8mCD%i65j^)c#7Uk z-+XZka9A>tDwQ7@MNar8rsf&ui*qmJodL&R&=$e&wo`WQhV$pIwUK>pbe(a$h>bTD zsgJjor4+JZa3$C8G%Y)P;;7#{x76zvPd2BfiWEqx@j92v+E;xTa{q3os{wBHQ>cxp zJ^9^}Z$Sn9(G;i4Rn-!}`;4S-{#scyomf{0FQ8d*}Qm&X#A?qMJ0VQFF>>3h)} zK_r|s#ED%lpi4iX&F=2EFR2gmnaR?}8)^IpmoOxxOxGXpzehn91c^wjI0}q%v9Rq$zW?tYL8N2x{m%3zJlo5G ze1;ZDZ}A?SiY|K8hR*yn;!RmnKrSXe<8NlC zsgaLw4U@{p+|_8Gk(eMv{Ic&5vX^d-$i5&&38*$nr`5+S#yVr-Q`PWg%MGIhr)i($ z*AM-zfs=2xrXmV1RIc@;L&+Rw;l*SGi}FXLr4}s(1@jt zk3Yv3b-|;fj%U)&j5}*a(BDreSmDy_sdHnNJ~z7%NEgRL-5YE|FG{CUdU;`#TR91l zL@)8>#Vzpi(YfG+)6SjFGhcKldpK8^Nnb&YT-A0o0Dw@Gd_qpiK z4oT+f;!QLXnhQhfw6Ru!r^ch<{v!F`eI8hsG$Rn_Xx}TJmweN29VLbw3M0cSSX!OC zXgwmK-+%K9Q#JBBWS_5xu=D07R{uG=DIKhl-3Q<0w<}GIBm#M0c6;D~_xqzZPEYRq z?~c|^(|Wei$a|8RVJ1i{!nIkEe4%sZnf2-0>35{S6^YN_C z8?G^#(qQ+0$1p@SRqVzuv(A1WVxN7W+-IG+G`VB=IidDB0AkkN{lTPbMyRSWHPN6xMr*TgBNg4#RQ_&0_zHW{=2J7+SelmpJ6#Lh(#YI>dE4aU!k^Cugz(MHbQq15rFyNR!#+DyD6F4SALg(BM!nPzwA z%+RGI^Vtja=?9j3WGE$8_p&ZpGL98F&?mz+7T5zVK-hNeLQP5rrNG>XW+mE`%s;(M z@Ckr?gJ|r|X;YuT?QQ|P{Zv!R+6w^W++>+e8AXV*6L_D{M2)`Y+X4(MuoeRgWLE)@ zr#I+Ns$qif!s_9}a6?K5zS_eWg3)&0a*j1$_Ix8EA_AZ4ErO4=zJ`N|S2)cEikUbl@Pca(Q_%gPZjtBK#`OpMgQ)UbemyHS!! zm8m0w!rs8o&~l+y8bliRL>fR)C}6XL0ID4Vd)*+&n%xgz@j%3Q$;|U!oA(MEUVJWe zG%gCv>-T;9^h(**`d)mhcch9P_-NwS80(lC_!m`6k`?BvE;x5bjYP(7C59Y}KFb(< zjfy_mr9a$JonDF$<5+8Q3@?lpx}^YEte?j_Mft7b5&#NuO0yqx#meCMpDp~;s1Q)P zWp9qyydH=%F-S506BQUvOWy1OKdk>L1<}Ay0>0-7z&~O82YMiwSnXxGS@Iz)jrh_2 zR{W0H&9^NE^KHM$rD3BLJaeLyP{mP-$b4ISdr7c-O$Nv%JW9f*=dHUMnc0Y?#wAkG zVqGQO5367u7ujiNY;Nq(Z@L)YPnZSy!7U)_`Np9IoG|!kuxNmF{`3=Z5Pq;AO$)_d zPVu~;_=lfhXc2}`*dMG^HDY*Ar$;GlUT~J7nsVu z7RPYOp;tP?R4rqqiaPlx@9Fx$nHp$7hP+S6vOGN5jr0$HM#PnVGM%YUFGUON{UcH{TnFLs=eyZ(8$R&oTamX8T$3;Q zbmi?Pcb(8?I3n#7oevNGNGiR@%<VCTaUE>l3<=r&j?AtUZH{GU7w zL9p}ow}+86_2WBahP4o%t2`s5{J;I&r8zgw0)`tb#PzhzfdHwGPVN9 z=lvJ-xS*f@>qA}`jhA*bmgsb4qtwR{4A&iQh!|wZ!(-u|!<{9(CvZ=S`U1EVYfKk= z4oE-V#(II|cAwCw};;3#sOo zjKD;f>HA^elajs-@yg0+rnT$wt@jHvhNgvi@&!80B>7`* zwzplqNMjG0iXh^*fs|wAZBA6Nt$CXr+qt)|O_rX5V53X!H-W;~|2H;bkWu`@adr>O z8B0FA#Ut+Pz45o$tWXeyJR*Lh@Z0D%F)_ob0wFU%(^DGbn)v;iK0dcG<%h>gS=f>07 zz}puEMyeEV`X`;VT--{xbmJ>F7^fk9W&Qg}w8mXOz1@HNm_mNQKLV`q|141M+W9|y zsKHd5k)$b%%)(rGv*0;4Jf7u$vPRWY{|C3Z?JLOQelSc+Y5MMwLVM#!Y{bN&T$eMa z-U(hC>n08Bbxe^3d0)fQA@r=dSa<(YiRXMZu7NqL^jbKkpb5ES@1}x_p@fH=iSldd zrsBh~4p+NSqvpc{nrKU!2LUs@KKBI~%L-HCHULuKU+&p%%g%dig8M=4-y?`?KiFXS z1TEFUy5Y3=Z{E#6zo0V_LI`Vv`jeHD`H8yhI(UY+RgZkc_XktV?%fjWv+n^gRdVT} zWqKw7hd2&_)v#jr$12|tM?b8yXl~`j#Vj4NC`e-SO=BlM>HY*KLUEoFz7C343GJ8# z3M0c2Mkge#L3$H_TzCJ6?{fB$^M=!0u)8}TyQcg-ZZhM@Zsjn3 z!jrslt+)n$iE{JrViA$=gA_!?g%?f@q#&LnVm*o%=ev|oU`q##C26#y@H%;W44#Ae zfFZ{@vjTEq$=|v}FG__sM&i>2N6)>IAI_!y=HMNr_HMUpvNy3cx2LyI ziQ?QbsId3+ zB!=9_EU7NG=GmV+qXKBXhyJT{DP+tuR*Z`@R-ce!?9|)E;ppg4t;PH(ar>uaWPMXj zq>9f{t|kWe*nlJJycz3a?K01L2gZ%-^H3Y_7b_jY_1pWi{0a$@+VEiCof=zZI#?Gd zTNysc3QO&%RHW?qXEvSXT?~7DdEC2YJCYD(v-hi5 zB&KNH@Zf#)c}8G0m-g!5Lu9}{;M@cN1j5G$H}KOzEI!;37oC0w0ka5%xsf zcWGz@J7azvxB=uirT-kZ5kppv>a8srS#DqYayLG@K643W;WTH)YBop<{KKv~J;!+N zH!U?4{c)sEl_}=q=2KkslmaxvZEwlp;hU-R-VFtTbI(=cqNxEN! z;nx_CimkW>d$4GF0^^zl!)EA^-FgeO#!81t1@uOToVvrQ_Fm`Xl~5NzPMK94zn;zX zl<^(1y|$Y{dS`(mbL=}qTSiX{iV!^bECPEMz>U3O?FD z;_ToBiV=}2{oeGGAgOwJJ+bT%0v`O7!&}BxOwOzvhud_z?9)+y;0ebLDW8v0iHjro zMV&+!N8xE9MwEU~Cg}_QC-Zs&-!VO@{d+Kae3h{alo=Y?O&qR)+}4?hr3Q~xkYzr8 z0I%0YqBbjYA-2l~n-$69#{H~R-s*5W@;xS>b3vM$LnwB}@jslZ0(HQ?t~`B)#;QDX zY8Eg@7#s)S5WfK*XV~?~#=L=ldqB|QEK!%7GpHqxq(Tqet^|CIiXbfbzEhDN{+#rd z*x{F{Y>n?m_+-G=x%8-lxwE-ehv30eDeGLHy_EY+qUMoN+pH*Br7H0x<_NOd=+XFAFt`&$7F+}r6oJ0B3GEp~Kd*M^)0dq;unloS>F0Mby z_lJcJHL^&Ich~EZU-{jF!B`mfafzWj1C_^ER>&iBnAC53iNz`}F)3xg;&saW z+$yqhrKRQZ*FPTs;M=ET*>9D+`318Ae9<{e!PS;C{HLJV?RK zW8CM|dA=8h3wvU*r9^XmkWI(7sq!QQ0nLP!5R?^r|LVf-+N7z;nafz>lKUk4yV+nf zUEJJWl+8C3bl+={Z<8rH=J==Qm)!N|XI}P4P{K>2RNA)^-?nk+kXNDv4M3%OTbsC0yITByn)dLpJVL#J z-(8!M8#VX&)a++Tw0N6Ia%2R1qZD%l#-ZN>@zRJ!51*$Tb&OOQ00(-q%#CZ~Ko2OE zu65yOrF|bJ$Hfws1E3nRmKhzQncv7x)@?hVC2aasPSFag++mA>tamA&nH5xC?dul4 z5^aSC)xjZ3yqR}V(HEGgI8>&ZO*W|&Lcb?RO|{ad(2*rVN=})Dhu1ll1EeG0)igh zHJ{s~Z|2-^Q|J&q$%oV~**cp%sf0y!xa+<0nV!onv!~2YMo>%-A&DhePBi_Qp0XM1 zH%;1TGWG;iu%*UsQc^r2Zuz0`tvGxaX)x312B=?%N0=+gxn|S{82LBctc|yWsNb|^ zgPFxiY;YtSNnvsXkd4oWZc{R|Y7ai(H7AkH^up209UbroJX<}LVRant2kfBAf7P=A zs;DLc_f+@>@xeNuQhAUg5lO_Hy-v0|6shtN+|t_|w&v1E6^9ue!d;gVj)^$NODyeI zP>u^^bZPOFMU zv3TE%7NI)X(p;z{4R4d=cYU)FJ&l_}YomqYQJ)*U7mv=)42(3|6C1=4sFv`CIxp%o z=?4C3P^RUAncTba`+K)O;X8pYSLttF{tgq(EY=#{LFRJwUhYhzOF$LMRCMkr{y0`` z>E?tY++Q2@5s4D95*+;)z;A9=n${D2lRo=Bp|LuGRzpQ7Nj@ZUKD5Mo-WoUdzaT7U zeI{u)-Uh|eLr;&j<~ivVKz^PH$Z8QCvc)2(P&C>o8#s24?gSkV_3xUH_0-r`G6eN2 zg=E!C0RSpM#`J?R|JssmIE5jz0}ywAm2vu!K1Xepe0b@q>9#N1pP@;Ft%*|_LOYHe zt;88HoK{K+E1awDb06!i?i3AQ{#LiGf}j) z1uw`Y6qFSM_a$Uvgk^0n4|~PFJ2jSlvqMOvY_U$mU(R)(q`E9B&6x{Y91wm$Q9rYyPV9H<1u}&!Pic!&PrzM@W3U%J`}7 zKw0zh%wzlX%=EBPbW_ooobDs!u~tN8(I=#t`zfd*9k*+yW`^DjK@(6ztxk$|8eb-2 ztQi;Un$h2uFBn`{B-Gc9h?UHMF(~5BGMkh0ff{hoLNIHN=gIFH&V_fz%(s&t_ooRM z4fHdwr-{ypsgnUcxkVFC{*54-wPMV!)07i0#|T}1ro_d6#d}+ms=t0%$!9H(1<~2> z?bI~(dUG_nqhaO1BSM4ZB}fiHAi*2}-L-91&<`rPj9vaZAZS#F5#yXdE$?9%@-?n| z0L8Od`OA1|+F{A5J3S$>DWFN|gMvIVEvj!m2J%i9vHP77%Y}}3SA0g@QO6*y61;*UVVk5q zO(^-Dt!QU}ZuiIQjGr(?*4@N<*h~#n41p>M>u=|=6XT~xEGAA}IBL-Wr{a=vW7sP? zP}Pg0AVd+FOU+Hh$fY^WB<*;menMGk^9kp9ve*ec&!VMsxdH3Exd6lo>67&}{x8O^ zIxgyTdt-rtf*>Fwf|8OFg0zGpQWDak#L%F044@)P2uQ~e5`wg})S%KZL&HeTD4jF( z(EQ$E-Mg-L@BQ8X_OqYeb?5z__dU;f&U2oVfBE&@nX_O~(oU9pA?_UImAC|GGsC`f zEFM!mCXg~Htuj8S%RMTh;WWW_91QWYI!+e>K zr0O>=u-Dk^rYk(c2c*}TpA*}Sn~0mvE!>NYFPSmKS*!rb)33ZHA?1yR=(Q?fy-y{H zexHXCF^UiIq3ue9wz?`P*ZX4RmD>}ukclYcd#-8r0}8=kYk5JR*6T4bu&2v;Pbj^} z*T2jK_p>o9m942%2066vPJ59N-PSQ1a2(FNHKi}FMdyS&a4JrX-;STn*FY!+LlRCn zAcY)iDn43el}2}yrtziev;F`TK+E?e*LxDMYZE`#3z5|EX#{!NujK(a0(-b! z8eU+`UmN|QsyycW*Y~~$`N(IgWJ>#ZA)r8BT@`(AG85aA<=VpaWIhS6y~mxpg9QSo zx$Udf3sVxyCf#(o$gJ`aewW%Vg^LzG(<2Sv9UC8_VBW9kzt_a+4&*5U6(L?-3zE*fxb?A2Db)|#KHa^d6i4ys z--$miMYJxghTJRit-R&dpSl-Nh9gN$B4hQ8JVafLWzIkV{9oRnB_by-Qz%n5hSjbl zv_~|&FLJ|J+NV4zshsAm(8ihU08H^|VKNhIq7PO|6dw3Ej@h9b)4y%5JIP@;-v`MB49Ed{ztOEf8|VclvbHd93qPW0R|lDg)p) z+{Z}lBQKsb6D23e1U**b2a{5KzjxPpO5x`*2HNXx9F=2pQtnc#Es>q?3u-sWDm)7x z`sURcdgNnqfX)40i|)}7Xi35C2jhqK^~r7sK)CN4@)G*3VEFpoMDjGeoPQm+jf7%6 zdah$s4Cm84zwhL!yowjyqrzDb+I)i!-=3p8XC+7Hz4YW|zN49b%LBJdeqmr5Nw1Ae zygxsk99<=xdP2ZBzog0j-1$|rflGa|M0GFNGK)-Lp2_pLujbcL zZgc@jnzjsg%~yM-^IU0hCN0UOH1bXLcbCqLRt3gh?i8|h#WOK*=Ib3xZSDJTd0b z>KVveN>y>bhXt_0tuo;$I!elBqZCgbKjO1F{CzR3F&ioHr*d!vD#pG#C+-=b9 zmOu6yTNd5ItTun44YVmk#r8FE7QFB^Ve~wAZwel!@2|q0T~nUr!h9U-9Wk%t`=5Hu zfE@?V_|ZvMydX;;PSUxeZ|~G%LhCteqQ`1AXu9=^*9F^X&^meV&d*VMTZp(Ekh(`g>TH~lP=`AwVN zG>{g%FPoH@GzKxN*^O4Y0D-Su^NAG4?SQ^ot+esLl$2WLN+YqsKGEkNsDXd3JH@W^ zfve@;8$|P}& z@u@8j-7%W>weWLY*u1s$CibTvYOj#>j6%& z&JW9xDi=LU_RgiKj;1_-4&&{Ko-KT+=$z`A{=3>BV%=xKjxt!&yqQB*Z7UT=>AqWwEL2 z1Fak$0FAw@&p=cgjTeaipt30`_Tzc>L)3a{=*!Qws*d#Yg(pC}9X#0oCZdg6lXef{Fi>7&U9PkMn(y&H?TpPHa5;u?0@6vRtT^eN~G$kxS z<(&?)OwqR4I&vOP{Jj($t!M79N@Y}wBEpo_B2npkb5 zojbOx^&y9iSsBUFT006a*B$<@nUZF7KHCUk1A{i(v(BV{I8e6m+4$~zDLK(Voh*e`E+uz&AGCUv4Vhf8)F}45ND`E3=+2Ep z0uYMw_DFgB5M{>|k|1sEZC@d=v1WvG1T}|`^zODW>Y*VjQk3E}-l)|jukP=1pXZQ2 zHCJNGh)7yF_6G%{z0Qdn#r<|u>u$z3$;6*JfJ0ocJo8qe#~muEKG-0(?V{ zrbRTm4V$9+y!E2qV&@Bq-iff!qGH8OMVvRUCaqo<-dgIm!n3u$?nHXCiYS-aZ5gai z?G)bZH{gZhLBsay1wGKLv?M-17uheI8>VHv_gcR|c{Kfw*eC^txP>}?Y`(Fgux6^b z;iThgk}&h~q8N@2mUwz4mW|CB{NSFn_;^~Ub_j9N(`%!{slc>>1Ex}E@8uIDOl$zr z;&zw3UcDp!>KuJSL&f@z^&WJn)GBFydU0{F)giy^jrfH8nw)2$4fcDC@#fIednUh* zm=Jm}=KOw_eI}j;TGiu6)2i>DWJqA!k{o6SOp9*?{D6=Wz~@hROiYmNqvuB`m4#yN zA{oO}JkUFprlDf3UbXFq<@^?*8QSk6kRk@&n7*nn*9+V0;MwMvbthm2Lo@H`--7 zS|ac+lX!Y5>*>;KyfL%MHYaqTiz3WB=foIY!a_dQhx#FCG5bal4ZEH7VM2i3TQ$Ga@gB zMfa1Yg?NcS!A~xJ}d9j_R8ljXRm@RO3LXo0mej3pCS2uoDUV)yb z1~KptpmbAq&r__|Q>EBDSoubZv!FBorVZCx-y@UY>-8FK&GOEg6J=;CTJtUkZ9p%a zZ;#igF4J8t!-^t7YvM|;v2k=~lFxt}m=G}7d97cYa)51N&^CNcn{oNybeSZl4Xh%* z)%kj61%ym$=-XSN_lJGG4pdJ55)lG^C#xSpR7tv9j}*eYt$FIUX&|}#d+K-_HJKB^ z5Te?!p(tLDN(%0%B(E=S8e-qjpk9Xnz}~NHU_F)1n3e}2uRY`9t4HeC&SrMf1kSK( z!-B@x$i?s{`1YSCO$uqm5(C%Tj+2D@Ad{#)JQDweR#meE51oy{GV5Tl(LKWNv<;*`e)@`AabA3Jfb zq``P24mvHCy%MM+B(6?t?z1d$Ea`OGX1U{&3u1{swVGCTcKJc)qpvC`;argP(9SQY zf`1bWfM#R(vK{Dqu+F?pKaY`+nhi4}%66j&Pz2G!wmW$pM9U-z&2G)y4$jLm0bfO_ zDv}GL1$o2Ia@7Ijl8Rs?RFgZE*aT@nte(S7cOER?d58j3=zcmW%216Fq??G{|p|0dG* zhE@QvFj%6juMg9OLp>;-42)#XL|TqI$|n((bz6fNo3Diao`2x`3k)DR*wHcS+9^;kfN!_JT{3+ODI zSgocjRB199I0*vh4Wny%daJ+{e$X;vcH%(h3pTfnX_BjfYTcwL$i~v!x3G5OesuQ= zDBZ5xLG{g7qjhbo7t5zMh8=Kk9EL{%;sq?UCcj&b7`}RGJz9aq;JKi7;ui?o?@xb8 z(y{;?TbGB=3&$igZ%auo-oaVm>0IAcLi=#SK~iYXLfs8?RSpi-stCuSve}|hTb8eA zGh((TKRG0+yNovVe4uT>t5C=@xTWDeV>t_|&^ud-ybOje^gyTb;denlL5pXAOuy#! z6^OQe?0FG{qCYi*Q{AN3E(jKis??c$v28=hBY2E2M^xeTEcx?~lddn{w^YX*CF=iR z_aWp%{^rrjvMy{-^cnN$yKlW1YSNuZ^yyO$c5CVqw2gexT%J2rt4&^K#3*AEJU9Z_ z0rp18jZ2Y+OC<~$*lOzgX4*%i zw|P@DAV`YPpmuqY zqoR(yw)J2WOAj>i?4_U^JV<&iPXv|hlP_t&HiZTfCP+(xp9bFW42U^H(`FNX_0|!r zjiRKpC@jRGJ<3M)cy}O*(9hZhynO3j`Hh!C$ZWxip390(+{nrVI|P9*PzSaOX;{^w zL4xQxzhc8-+f)szU-aWzbA=0zLpDFMfE&bYn}+lS;l6Eu$GFm0-Dq_3#N9l@Y9U>w zF1_bI^JxW1!=gXDuD~zaum=GB=F`6ffXb_fqn7Vly z{F0Hef&hY1Rmo=ENO(=rOZ$&pcu1N|Z*2C&f?NolPF$@?sauZm$|(SJ{<;3&r}ktc zKaaj1KwnK~L+Gv*fWF1;FJkz8kO+ZZ;ITP?MnZmldN(vy{2~}T4q7WSjcKYs9J%fE zpr=Q7I?<@iD}R(v`HXqe-M3i`Gb3NLAO!^CQc)YJs$_&Ku|~d^>pNr;e%{*iXglwevM6X}aub>96CBXig(VIB}|vh>sl3o4F)|FtYfF z%03FPag%}X>P<+Y_Z&>&$U6`A<)tHfY{pMyxvaK6Pqg@+C)-2R0hkULz{%yoi_Wp52})wV}fl z2u^~h?=W8GR7<9tn5}3Xf_4u@S9FJDd{Bvj=Q;D7_dH1-8>}ZK2v~@ zXb&#O#cA@+4DD>6&0bRw8Q3N(x|LLRmt0ghEeqJ1IFVokmK$~cEhFCWbLQj)U&o!xsg8PzNAE`_2L2tbp8$(`K7I&DxCKWHa)99hotEUX~t zcS~AXVStf2D4z-GM|bK(p*RH6cXXF`+ZA&Q8viL!{w96pHT#?NRiF?;uIBJXNr9Xo zKDv^?-<`-errBF)jL{S!dWR1`H8JCGC0_Jw1J)?lzj&E!G@dJJR_<-X$(6R!9C};M zu+GkFhk-I2#`z#;v{!JzsMst}WGsM5eKO<(eUbxB+rsiz0_>Y%uV6t}HlwG?b_gm@ zH}W{F{l@bFZ`A3Jn@xkMn^UK<9*zzR4MPXKk|uWmXA8 zK6oMu{>L9rYeW-!#XFjJL@Q`RMy+_o?eAi4v_UtVhBMoa5Fw?rIITm+I<)o zT?IgU?crT=mb0Du+i6z%k`+!gb6hIArJHR89sFU{WecjV^nCEt3}nKg*a%(7!0s%> zbN<3=ED*RzF+8=*Atma%d;udlTRN@*yWAQDOYCsB1wWp{Oa_>Qn2 zJ9Tbco(WyhQ+DEkfdL4i+~RQHBE2NoGU7fS0{%B9<&=`eMFMF#M3m-f+Jbe!aDFlC zyRzqPpB}4?4_CQGf596-`34>k#?{Bh{v{!z2LctQ_TeR(OE7+Kh*$C-69wqYEdJS*i^H3-G<3l z9{oG`e;EaUY8@gm7j+9Xn zGnZ93LRqf|TSm!HDW=P=_=b@eYH}QC+SK3D+uor?GDe`EU(&Eaq!)#u!-q~ON%z?l zR5sbx?QLW`#!>;8wbIH46T^OD+M-{SiNE%1{=dU^E-IVke4Bg1$27nVaj>3@A1fPEjd_AC3NJ-6* zuHFOa0>ITApqp}4D_)VWug|SI zF!)9iSc6Pn*p(xKCmJilwg$q{o8>CUiZTt7IySoc0$VS)UXqTWEKCXsT_MNt6ob4b zRp6yztHr06zsw2A{gyAZJBz9F%^5LoY^qpfm2ayoI;;a6uT3H1O{sZ}5*tX%sNdZ@ z0;5T_(4BB`>L($f`DPl^vnL3Gf1W=k`U%4yVB&I5&KHxAaBIotb=(Hk0e0_Adrf&X zq$AC(_FyVugG(Per!f_1mH!Y02aI4t)Q@U(+DLm_XF!MtF_W)*Dt?H&{M*-&gBF48 zdGowap!S38EQ+aUS6lQ<^xZ71+jGdqQS#HZv5y%UxBl*NAo|ceOWLnCWi=iMm!o9Y z2VMYi+%H}Hx3Jz?x8)taipe>%uw*c6cQ~sjw#)_o((w!8=F6H=OGt!mL59U#j&rySq)s&2yEWF&xfFDU9v!VT9pg@6=9chLE%sWR zpe18DV}74UiB>w1-d2_;Ci)AYlv1&(U(^?g$^r9kmZ};l%R*}GF&PcPc;l6$>$ql^ z-vTmTRH31al{|zgO5Bd7&FpwQn0lr#Gy3%8F@I)Pfz^E#&h~7^2yCs^vqs^LbB#+( zq4`z9i=W1;I~->V3$v@>2Ls1yP!C@Ki~SsJslP@fI;ugNs>b~6RsPAUSL7_h602io zs}m(;F!|&JGb|ivZ)$u+P^3F;l7~OByP%ik${N+GAA@&AX0{RQ+iGlenZWYWm+O>b zsycmBG(*Q_*YCbNEy)6%tOw&xrwKX{VZW2Z#<@5xh5~p^5Bas; z6qV0DKrj^H_6&WZ4!%t@)nZB}JnZ8N(;Osvx5EziUYTu%siBNwLLD%xSt^tP5@+Qy z4UmC?!rM0Ukp%jCQT%+vCFx@H%)O|7Qj_uFAS4Gobu+f@2dgsLc+-Ir*TzfrLgFfXk9sigoR)%VL}byU-X1%ysWcH zi-qJk=CfOF{x%E<>2tPy0IBqEWXP#zp!?!-!C@}?MB#_7{lb%inD}-=Amyr@!j#Z+ zg)dv6-KgN&)z^Cbe#WQA3pfO?k9YVM%GlTv?y^!e1!JQ(g}b&q2g+mkCSUc@&&|-q z@5g|&>T=SPAG|(^Pu5-PRH&OckPLc^C8rc!!xw-e|K5kHnqjXBx^^+^D)c4=zSS2zkXXLvA#np9p>R*1z3IsIX=qTFhA% zK_30AVC$XI2LCqN6{kD}G`;)oD(g;8+W{I8TYZn^ea6T4VOtk&v$#qED^3fSj%*Hq zGMqSA==C&el^8`0mx0zbQs)%~nR!QXlc(K_?sBMo+sO#?mDDFYB7Xu2z{&JPW&QbuI%g>vRnnA}F7*x_zcKF+1iwbMCT^E=Y&lj-x3irI*C!lBv#UkZKnh zG&)nUX$)RyuUNLD8|Kjib&TE9FDGc+!F3|zAdxA0cU>csd6{uiMu#b2bmpE75x6kQ zjLs@@4hk;PP9zltXN=$6{Jokc5ol>****Z8$2Bm6586G#1djj#fGZHw6$dW{%_5c6 zQ48feu{9X(5qK@zS|8~(xc3HG4a)5`q1eWYg-K%NaO*stF<1+mq;thZeO1ChHS*wh z%Ys{nb~&~4lk?T(Eq{%Q1%p!*`eXwt&+U$vo_9_i;^j51&YucRbz82mzkuZ#i<6{!JE_+7dG~8M}n=iTvce;i8 z{AhLCyW$T@39moA0p<&;P3J+Y;rd1q`3zU@QvtpmNh`N4rGh{h^PRr-+CYI&*3L5m)(i|nhOWv+k5&u4S3I+xy zA`-S%^)uT)Iyy4qEwZ|Ye zeUH2PtEF)#YdP0Cx~gSy1Fg4x_bTG=E{6o2pma7IR7Z!m4)ON5^y~F*J`Spne|cj#TmdGxVj4ojKUR><>ft z-qzP(dJUa}0&vxhtGv87Q?6{%;-;;i0tu85m{VBn^Th$_Q-s?9>l78nqH659{K4E_ z2VsEL2q()HsOh8CdC=u6`PIwLZL3>#zidv76LjnTy~<8^LF;D&Q&h2O&q9K6!JmT7 zv^eRPXFqCRl8G`iivOIEB;ayQy4Z5#>$t*;s-s(Es#2?;*zuT~p&`bW4#kuofo{gh$WqxlKLTBfyvE-_i$IS`9<3XSfF5C@ykbhwOD@`PD= z*!1m$Ggc~Q%9R_=LdYbU#FHl1LCI0PY}aT_b)dd5a7pGuZ~rTKj!2W*pe>G#+>1|i z$A`DiQq`vbTQ3R_dB)>5Nhi~8rJ5X=56|2KHT&e#`KqOVtl4d$X$LUQgWklOCK>qZ zs`^39PAcAun@rW7PoL~wC%1RY@k6O(qLY0U8vlw`j>};CIqgPtp@)=qFjJTan#;=} z3BS{jZIQURXTUOSyCZ_L!04?TD;VYA1lDJUD^h5Izb_hB3SK_~vu*Qcek!RsAlYvR zC>x!*|1c2Y`omdb{$A@Hhdf|!qWM?XgK&m)mK9<{L&sSc(=s(=7SiG=*h?c5DEg>c zd;vfcrS3@HQgjS@PBG|`_WZ@Zo?P`opx2Sl(sr_Ta&~v@?%K)k`6+4Xy-@)#rn#QU zQJ{oe-f^D|R-n_=*MuSJW#ZWZt|%TqSIfW&YC0aHai$YmH?`E?g0-bAme;d&aDy>1 z)B-?0V735aJuP)di&m3bIkfCvroS|clFjrj7Ug+b_Kyn!FJ>yZ>)mC{|6|q$Yqq=H z^Y~AxRjUs(%dxzx+rDdKvZSnZ)hQW!PY(KHx#4&oeNOb77U=1BHqh1Ujm#Hd4|-CS z)1Wx>T`oCG`OwTHy>s(J zpZC_DHD4(BBFZk`z~%OTO1x~U5b~sNA7?XRTBDeAV(0jIJG^_n_L_C@7>P5lVBw<{m?tha*dWC^K`0Lc7qw#eUk6D;3TAYgfz)P z39TE3GMiGuV9lE_Vri3L1iaM*oErOmmwX2Jw`OAz=jFXi+NbMArz=!dnmyOo@iiC? z1%d+pLyfj{f$83Fws|_q-pgfu?dwZSVTwj*_cF>6_C zMQaUfeb;bA!tIfa_ig^gHQX`Lt>6_cdW{ah3QFJG}T95IpQW+x127aF_g9y;DaiD z!=f1qI)Ip4ViyS0lTX{R|1!^VIgdQd=#k|4t>^?9=OLJD&N=%a)6>o}_2MfDL$#u_ELZeQy)Bsneg&357f^0oI06!P zU5LXxh+f&^+3sfC=~{s`Y?qiEE~0I-oIaTE%CXJk!>kMLN=`J$$AYoliFcjokWU3s z-HGx}3^qrLV3)VXhT$8DC(MQY?gi0j8>4FVWCKhpIsBTU^_aBswE2u1eW;FUaeX3W z>_U*S5k^H`jZ0Jq;pgubc{grSk@Rd$K44%#7Z6{$hC;<*Mwcv{ znkC5CM505cR6D%w-dApw2TlPWNgmza>isacglvH+cO*4|H)wT&)qYQ!SR=(_xFx)a z`KRC1nnnPV7b(_P=c1i<_cWA-^=)cUC|O^9w$$Aps&4U|zFvNnQ`<$5F+WwiKDSx1 z{grRdPNQ+dNZ!IvLLU3u1q2N1kvZAV6LOsg`GJXG0y7~x0PN2G=AI|d>rp}4oZ@_! ze`eY6*MYn{f_^$xdV(4zRYx~Pd-Ts(+}G27VjOI7dd=pDa<%03rNRkU?#v^<2CqVK zj}3S0(RhTGwLYMV*mRla1)t~XMqDgii6~_@;q$xax|`~?Ka(C7;Wky}G9Q7@qDGcJ z$yn|QqT-BbWUe*7`KqVD`cjEaYWBUEWEY$sk*$M+D)q5gUgKo02rI`t{&_de+k=Me zy$*VrPhYmF|A7KtH1Z=D?SHXe;K>0`<`yi1^ZHnM`=9O_a2B0HaulYCYR z4Q9mQ3m4_^c5Z6D-&zu6N`vF5J4)j++;W>#^=|jJUgdBL?~<5DKO~O6W3E1^c{lpu zb2G0?0m=T!fyp60oy8$nHIsk7IT*45UPDD2eh&}LCEzmKr@TCj2*-c!tW6#R1{ z{qM6R!JxwXboi~n^X}+R5g*Z5f~RJF?!zKzbzb@T!TqSXBjjS%XRH}y{=h)3_mj?; zQ4v@UE(Zn0YeE768TfRpuBm;kK$&IcvmA8lr7+IVIf3bIABrNWV+FM;*q`Q}oG}6& zL%+5({%l*0bVFv1!FAF#t~I4LQP&5R)<8HkzVLw1qZ8RG#l;m~-dtC@wXkH0I2N|w z@5P%MFi7e+7RZ*_778rLFeZb87AYaL%+u<%JYWb^vm;9|(766-th) zMFWX#!sW*U&WmNuC8~Ol*k6czU@OCSx{-q;^4zNB(V}0lHJHBA;b=Bp2W|MvU%_XS zENe0;8ymKEyA$jX3#3U8IpO)s=Gx14!mko7IL3$dj%nw~6O+dqUuf5kZ-=*rN4I4< zR=F5%n&hQ2!s#n$8)CJK%+O3=+S8U)YVBEHNwxt#4vc9OW4i|iViK`02@^}V!w+p? z)}5#Tc>RT;bvOSJ)JGV!s;xr&VM#=wXmdTK3D&8)95hHf?86OHCj2?&ox|W^<1=ZXDVWxI6D83WqtLe!IoIfcRt2@Ax^P2Sib0K z+o^hu&b#X}5c2*rCr;|?>+|_dDCMBc`TTU|kXeHJC8q+xO_|apD$5p5Y4hrN%<@ZF z9gkp)(Hk%7C<0ck&qC4$+?7m=z#G+WOB~K&I?N$!yKI;5KfGCX9zY) zHA`)D=)dMl`_JfI@F+r<_-$an>YDRMDq9|XwynwYW{bL+3tk>aVj|>s&l5QF_@k3t ze<>Ej`oCqZ%j3;iZIZSr5;ei{e9IVL z)z07sW#Q32D`K_Uj}UTw-V3qUcT9BA?|mw4A#d+5TW@G)ZoP59h?aY_)#y1qDQkkzZ@iREdWd#Pqei6 zV@h5wp~_Q4IoY;s_F@$9jTm7QVL#3LIOWupleLPR-4|;EG-ISEiznYGa1L45FTod(UE9{Qu;e^og!KX2ec6* z|JMP<@WZ4HM%?aA?%df#OA|f7Zn|=T(|y=fe#hM4TT@VQ0Jk5v_M2t9X#Jl>1l8B` zKpBG1^U`+A)ZciXf=#jA7!TfQdO;Y181y~Z0}dZKGy)I=7~&45;Z+$pbwNO!6jX}1D`#)Y^`EDo|bcFkY^_MPhR(S?*L9r~V+jDNF zpoQb_FmjT}3c8qe@|TB;FJ5}?hu1XGGX`V2HZfo@mDP6fX?WnnLuKlpQ6S_u!_0vB zCNR}2jrQeC`o_^Ab+B7?7z1eo!N4{G^NRL3!7woa$V)3v|IO|EBbSFozK6$`6(jT6 z6<_yXY-RtjRa*cMhVPzf+AqJxbo1$W)B84F)=`O}>NrhT6_LlE@E$FXzBg${8PE-P zSb4;58-g24e(bMz0T^c!1psveJMnC7L8aee$~*ld0BFmLt0rkwB^)D#_w^yA-Q6F( zH||LU!@YMB#Benw9ECQZJqPJX6oIrxus|l;q6wyz3bkDV+}5=-q1NlaWXF_T@WlRQ zUiqtJx&e3>nv)2e3_=PL_=7x9y)BzTro7fsWvCr6Z*cEEoRCU+@nDtG06x z@(aL4HRq7!zj_%g(qO?!th} zUcX5^h3E=DF2r!ojo^<)`b&fXc)>^vM%gc#nZ7{)@>6MSm`rl>oyYESbL51!aAp709#F45y9V4Ae>3a&|Gq;U zxR2@g6Y(rI57YcJVj&~rhL#+ndZd?&S7~Y{slonsqhg)RZ$Wbn6RnYvqTPVYqvqB# zL;LL5m3XetF>f~4u-Diox%vXn3Ze%>f4v<9OHuM;ancV+fzmei1~-&X>!kb}AW|IY zR!hJ8z73bQxv}FQF&;ujLvJ@FxYWB>J4u}`ABGiQ9~uax6@bKn2NYIbE>r^DS}{Y) zioE7yt{C{`gZ<5t?4%$_Qa>#Xfa-vV9b8uMC&zvGGs%MK1B36the9#B_6onU!AkG> zscd#h5n zD_=$>fjAK;w0rN7zbA{UYbN@atrfxPFbaq^*ulmC&BJ0#_LhkNP}#m4{XD`idsF_3 zM+U8w4-|art+!(wqEvFvrBCcnb*4BB4G%|(-~dg51Uf$?crv3(z0hhuGN!|Tshi2? zzi2IjK`HS6I^M`wi}F6z`<2Gj|49P`3>t@B7U6#g`7lKywJB`@)yFYSI%P{LrASKf z^>7({x)DglJ}p0AD3p~O;9_E`nd>l2m_Q@g8h0ud@+g-59?(`-d^dXyGeIX>6gMAS zefB082F$6NGWK;T6WM^V;W=V=S8{Ud&I6P1{}vBs#A+h1NNFQD99Z-nYRg1s-ucxW zP#@E`uY50pA90F2uE!lSYiQ=@lR#DmG#pQ(8-r=s#C``^7(#1Ha$Q%i!n7NHW{>ThECtJUS;bL5{0yd8f*p#~3A zIiXu@rl8IMK7J8%c~8ZWs4&mUJc7|h-(Qxk3H8PJfq0+2p5}Rae7=Tgp~v2$uK>)p zSYa+9xhG4Np%*@e$;A>}h<-K+R0$W;mGH!5@Tmc5uS4S59|3?l^fxFDap#qcnV@_#L z)a!uc`#*-jO}8h9RWEobkZ;zPHDMQ7n)Rub>gMSR>=|h;tE$0K~O>lz& zSu%-I0aW*;AI@V^hi00;Kz-y7Fu{CpH1;B}J4`|$Z0%N!dg*3v<+kmdI`v=g2I$Gr zin-dxR=+?;i(%3PJvK*5@WA7!iE5yM%{b_v zMZxDyr0S;gpl3cDB1?c5d2lU=tovm|y3oUVI9 zhqYN_lU#>dM|^e`r$BTrH21<=iQc(Qy zfi);{BGdeS-(LD47~`bDRQ}JRMesEk^GgP&(&=Li4CN+I?BDr>e)OdW|NmYab|Bbw z+Qlx!u%-tzt>)x%_)$NHD(F*l^Y6L#F!ydR<9DVg?u^HWwfoF|D{B3kQdncs^-fyh zT7GC3s`a{1a+(IAAfOyAu(|^*D*q$2`4yxQ{!DyknM{Qe&;7OjG9mb4J6qnw$FRbK zUJcb~R`}i$KxPfio!RIndD++Jl)=YX*YhwCT^~w5offINAS%xV@VVL~{Y`7lIgb$W z1B*Wva}b>G%Pbn@IgyNftZTVSWsZL@laZ#s7)9YH8Fs@4f#U*j-3x-3tVqiIDfFPz z1U1dSp*#L~Y3CH#xj4F)u8J^0tDaY?&i8^P{YZc@1aFDl;ItyY|7IJ{xm(|#b$Aq@ z<@LDqf$;F5xIbRqC&PyZB&&vJ!<}G@$)SW6IA0K%5E0_@SJ>SKUU12vF_gUrhuW_G z)1~{_Vfkg<-B;>gMT^3@o!Z0ZY|pJD9ehfxwV{(+GaiE)g^SsYMH5@!%Bq_ge8t!E zTt}=d90l&}Yr!Fr2azvg{Z52nVv1u~@#`E>x4=8
f0QTa}skZJQ17c9M6Lx#y6E%iE<({d2VZY|dlwC=>&Gw?@X0 zz(Yy~)VH8-oq#$_lILg)(`^jjoCp@~1bI3E|6K&lOp~|h`8eqyY4D$I6YEwOuop47(7e%DK_<3v z1Gg@E78qBTt4L&1VXjE|)Oaj6(nOkR*${Up*u1A@j=A;Ruv(+iyK@dWKcij2{uZ;o z8NO`SJdK00>u5hR$+)~#jvsYpW&K=wr7bmk8^creWKv2id7P^%ZE^EDy#EP83w;13 z5Yg`tnjlbpH`rotrFE%yN0Kt~e1m#BdGV<0a6F-h=Y}1bFe9&xT;SUd%8K%sewU`O z$8%W6RHT4~O^34}CkUZJL;&V%MyQc5`SXXc)6E| z#F6^fUZ$VTn__WH8(;}GGe&(rACv^y$WYrv##fu=gkK*fwZGX{a!vmHI~~Frn`~Ec zlZF7Z#lC`76#!cBu>G5|zlTB>XpjC}24YPx!~U&mQ_}?Ik8X;+CVCP{Uf9a)&KB}z zL^B2l6NsW;d^aUYmTGp6rMDcsXY04cNUnd+s(d?D<)L%y4gp$T`6p;uJU{TWSV;c> zA#WTpUwMM7rHGdU#YpuJN}nX?N^5qf_v4ngGeV-p2HVT4jw!D{%@nD{K}G5@5aC-2mNEupg-ClQ|U3~AYzmGOz%=K7}mHK z0pxq9AL3vh8adGZcOK|3`-FdvanOX~cmIT6xR!n|f3$~VQn7Eln{WWU4KG|noKjlD z<*y9VF7+DRb24nyyl|u3<@weF_iRk#e~fX%3fG_T;*NmdjDrn#SFJ^?hKdMFt9&+F zBg8vpZ!tpw{mFr=g>+n!G9IF*uaErde+EfF6BfA!Jo+@YdG8j$!>IZcIw{;rJ}${7 zoecb{mTC{mK>#v0XYhx)fZ8_+@+q2A+Z9gYBJ-dHItx*p%!&Scq1!4&@T@@^jx*dm zbiX0=JYUoM01erj5@bJWzx8k~Lcrrs&f-5@1%MMJU%68|a?Eys`w(>YQwj>aR@el{ zGBOw-@7UvSI8{k>Ydu}~8G4{7;t&rh|7agjZvPKuK6?bM+!m{0W5qgHw=32onzpkg z@#^eq6Vy}&auHu?TQTaYz=Im8bc(EcH@aLlkm?pGu_yWp$`IYT;h^Vqh0jE8PWP$k z$=mzcE*vf3k&LaOv*kZ1wdxgJy*v!+13nL<6&+Z<=WpBwl&~PAWc9nDx_PNB5YkP7 zm2QuWe0MN4DV?A&`~#jw0^iDigQx5-XysNn!kOeVvAuVy-_H(V@3)@9x+N|xW0sLr zCiN9wLj-=Fnddz|Vyb#b`d(mJ^!jrVHJ$%4WN!Ge7s?gc;3W-9?|$N2wgV)jPjf(6 zT|Esh~*YI|T6Uu0g&!fTLLpQMuh zZ*&AIf0Uqq0!2F-$tc=bmAkp|@o_yOf<;jXn&t%-FuQ(xoQ=DN&+ZMW%~Cl26dZcBjib^ntsUlg<^>KPAr~ zrHvm<^q7iIlD}`$QSCh2GvQ3$s6{Bd8?@Zz{iFX!yhivFrq0ESk9g(Ev7T?NpbN)( zfe`^k?<|$ZrA9B)`ijBsn<#@wW;jQY%l__GW^HY)K$c?W$#^E2sdEsPXw^#Pglcw_ zLg%B;&yHkIvn1QcPusZ>k-t()Z+0J^>q?bHzC1-4adyM@n(w4%vZIc6zTQKZOA*`E z{LT0JQQ2?)c63O2;t*h-{1g8LR&=(CKLz4u>pSGc@D0QKjp zA#UZn;US~k`RY08k^(IP3}MUOPX6DP;-x!ojlb#m7NkM~Hl`@fy>wO8P-5NqZhYmx z;W*dPp`WQ5Slp)J2J}+ARC$6NI58P&*`eET6v1lQ7`ij!)T!)PwJ^(=L&>R?cLS_q zXrzju@J?*YD>eozuEF_$c^xN#J%4|6ISJ7q|6GL+Q$SZ$j-#S&@*1G5kZZHO z$~HE+x}}!ky5+XI5iNz6o5d$SLau@a%-^Zzj4gsBrB7mDd^57+p314^J$-G`LGy}{ zq;vn0@&EWIE>k>Sdd;n>L_Z;P0GI8Ifp5>k*ct*HiWW+HRlt#qN+5|31bKVJIAVw# zli&Z3_!c;vt4X%Ly(+7Y*9aUNt}0;bk;nyygZX(lU7$c|@MNJ@!FaTUWR8!@WCN)>AQOKsVY1@M zUq#;^HD)q!Z!ILO5W(f(_UD~ekwAMaGdBc`U_JU>{vU*IDG<=~ZRjkr_VxWzSVF-( zcR!47Yw8riUQP@s%_BW=LATVk5jt@gp6 zHlg)Fy9$F@0sDru!hLr=u5IHk?3jnbMQk)3>@apMvFmMQvHrF!hmlZiKyV zQy)xKm&)%ji0<9X@&hAF+^t|SYZi#5(`jXflUmJD4TUS&Qo#1^*E12`^N^X_ZvX}g z1o2DUmf}7H>}X`<^Rk6+O`&ZF_S-nOUW|mF_22S6dOkty z(@&!10fe08n=2J2M!{@trkhT(+pBn<7hX3D2HNgv!|0KTA_CjU&;`*z$E4Ezyqfls z4pGka5rbO<6N{E7@5}VdZW`0&HM}6!jlg;$xQ@U^#}i?$c?1+Z4IJgDwnb$t3`E2i+5hpzfk>{J(`|zf`P@^)MT@qxZKBwHqP_f0tIGJ15 zkH2&4QeYhAd+wZ-te|o`Vo-SxHh?ox{9b_r9&u%0SnKCr-zmp!U5xT*ijf@L zrg5L-a#YoR*#=oHN-tCdn~`&91BkDB@GsfDAfZt*2pkYi-Zse{PVx%9WKt9efXlOz z;c*YIMFxZpIAqQtL!hfKFJ_w)djQh|vH3eTHa3MR7yUf{=Yx0;T1SglWnX3N8?k^4 zRG1NecoaCoQPf5jE<)t7ttcQ361d0p^jNi48kCJ?zpB#~_=_s;2{Xvkd&!uPX2;5c z)*;B#7QlJUbYsMZp}L-;fQaZ|O+ijt(_z?gtHTX$%BmFiw&Awa&S0l!VfF+~-R7N$ z>YO-BCBR$#UQZ$?VQh(7E)HOGtS%T11o>L6fBh2kc0q2#%|rr}M*p%5`5y@wEUQ$Q%wZGO67gf$fB)%KvNR_|wXvIECi+ z>0<#tXGIC==cH7*VI&Cd2R()Y0TNrHz)ityz84S2*Y*jB&1OU*$b2{JffD|7(xv~S z?7HKz-ru(d5tW3Z6xoFA%p%#@BZRVfLUwkA%8HD#Wo46{-6DHsWLGvJ^pO3#KhL9c zI-Slr-{1e|^zwN>@Ao~f`?{|yv6ukIqfBt~O^Wq!)%a^d4jyEfWh)(aDEq?%g=x&E z$|{l?Cj0EQ;)q4cUu);6#!pfwH65(Wzm%1)2sU!CuUmGZH`lFaVF8M}Y6*&Ee9-Fn zp?>iDMzo-rn)0GkkxtZBWp8@WBIF^U4*;gN)|^E_Hy)a6$=>_%m6vMkmlmOU9)z1~`{|)j zj-V+qPE?RQHJ9-q6y&~#jSy|fgGp;#^fNOesoPb<9_xsbGpu?)t{^m2{&iEuj>6+@ zp885L#vQoe>e>t{BWJ!EjWh2M4tYH8h6t?cL%7Czll~2kXg>q_s9~^|?eeA``A>%s zM}e6|?DpoU`VO+pF*0b%{|SkUf-ALq8Dg?s6c$P60igikP*a!6(t-v`b|2 z@+VP_JeU{0J(INj6g>@ej%fFl!5&vOoNw&+PuNPQ61T+2EM5{%gA0T453H+|nR!3AB)Ii`B-D+c ze!GDz}G>!KS+XzG40=f{Ad&6NFk%eo7K4_xW_Jjd90D{f_~4h z_y9J4uwJ`B6!h}p`X#35$tXPPFEqh`7(`I+{uZmXY?8x8<84&*_WCW(W8(QLfS?Ap_gru^^ z!L)=YFl8z6f4enb%FxKjE@K`>cIke(;#YuL31DM+zq5h1zv@Qc=0jQ1ZH)k7j(qfc zr(EnMnySKZDm35)*>;HXRJU^jgVm}Nfh?WMLR!O*q)w!6oQo6>d!MoV%v1MPsh%{ixPIr2-SH#=?KXpm4=fy=esFq?%E>0g48 z`NId+6LRS8Y+vgM!j+Z7Rl<4na*yd`3M}OOG+C?MKU}{LiNcq}Y>9lh7>=;|#}VE; zaQv~0M#+G!+#*rfo|LOhmDOab?UXN(N&a+KX6SNq(H$6CGPEks^je^@v$JLNx?t!U zO6Mqo%64j`PK;q4>rzilcDj8<`M6TK(z=pRdtY?7F?vpAE1hJ)xERAufX(rcrL1$Q zwo+jcxwldm$=(`qY z9`Cvk3EaXH=8xLlbJ+U`_88L!`trO}aR+4!;bxnUp3PwD;49m=ngV%hZ6wV@Nml$xQV{NsagXU!%;X%M3C1sE`r3;-GsRL_+V=((J4{k zRr!;~GpX}c?ijn<4iV;wdUpl!ZQU6rmxB#n9o+3g(hHtrS}cmPHsthpI;zsHuC5_L z>AZWgEn6eG8D$Kf2uRewZE#kv3I}rlZS9O;o4wS0YAanQz5DLG4|VDSD?_&|uebLJKZ&PErH+087z@~j-$Jaa)2O7RnUR~~cp z=B0u>Ik@@NADx@0ER$;>-+bs^jykNW^sB^zWB-$NYB*4#mML>p>^ z*V(|AtUf@=!-S5MB^^E~`DcGV693nuZ`9m_g+0J#}ND&*jjoIOkj|*nSOAwEC!$#VPv+F?z0tIB)Mw zSM_=I`Sjf~t3Ao?`qbj1-=`Y$?L$)*2Hp8o66zZhMFFwitMSTsA-y89a`ayqqUQH9xU>VXgGa7)6e(2&|G+cQH;!nhh$pN-dofVXe_ZP|u2y@Z+f; z{JZxTDkP6OWLvX7(naC>H8Zu!GCp96SExI?o0!#NkHsO2qZX$GS1%&Wb;dcS3AOr_ z>G-gbtPKgYYB)piTpXT?oXf=2UgDBdHy_E!yPG`v4=3OJ+cjMHK+hO|P?PUg|4>BT z+P&Jx@ylXh35^cO(-w4ECpkNe39r>2x8?!biu-h#Y$10*ZNsHU|8Qfg1n=)C>wrCj z#@UlzU>1jst9%}5gI@-Rug~m?S3r)9Yy3rZE6`yw7M%*0oVT|&ReRLAjOv2L=}Y@6 z_Umh5ZLm9v!Z@ZxX%kfKEo9u2;sU$3zs2xux0mY*sLCpp)DmYOKnAtGHIu#5qr~)w zS>MST?oMzMy!}C0XD5(qM=NYADHJST&fX<4t8Q}{tsKrkTy&|ZK5m>P<9yhsH$9AM zr$?j@%ZIA`l81icxjhB1)Vkf|jnqK(iJ~QMT%;FT?mTuY-hEkSll(+V{2`ccPrzZ( z$u=i3%12zQS}w{>*%}-JRFzHsW2-E>Km(`k=fbaU_qZxCrGD%Wm5m57YSv>xVt^0x zNWCptqCY;|%{}ZqVNaya$iDtLJD3V`I*epL_n~qxFA7;zZ#ou$m4<6>x`bt}*@7i< z&4PBWSvQPER9dBL)8?ezlj9z($%t@_$)#6!F1@F`^O?g!lk5Iusm1Jt5u7ni^GY3O zdz`O9$s+hg2Z|0Ck)gJNU&*dM=*CIB<<%Em+hO5q;Z+%%loooYaM#m*ezio;m00R9 z{$G{Jei}()0^+qiB5`OH1Pt=d=;s?M20pUS_g>ez8Rl^d8h2pKdauRVwFtcXHq+C( zKL7ZY*FMHwPftJeMD%xW|6BxKAU_*_x9$7%iJf&$;JsPgliq0I6$e)-SxQ zx&|@j@GB87t_vR7V(+&qwpC~UleE5`wCV+`(VL#CxEAJfUtjL2WGq$Vy7m$W;Jpa(38xQ5NA*{Kys?1^ z+Q(siRTSczN8&ATQ{Fn5l#C~xn_p-Q{_q4;#CW9LKfzOX*c?s-=9BjA{= z`N?*cuy}G=cPw?-{RPMUlLuQl!IK7qWv!P66qxAw)O<&z*JGn}X8kY6pbb z(XRIQ4TD6w&*+vFvfR^p3ycHTh46N)wIn)afN^D&h=R-LP4)7_&SLL4W46xL-52(S zTfQld7p_QxvKWPT@I=HkH%ohU(AJCL=Fo3nfE@@{@V{!1%gzg2&f@gCK>qv+9tePU zoR|_Av~6T_xSqhNV}3Q+G-tJ2*J&_3V?}%fHT>dul)rUPq06`V;qx*X-{?1v+H9a^ z7%-qFvb}};JO6s=&rc2Jta^pK;i%qNRI5A+i#2D+Tz$q1MikJB+c%X@rSSUkqPim- zX1eL!l^;#qE4Er|m%G2UR*2437`(PNxz7v|eH=dW5de?0!dRTtEefoZwgx0+puC8Y7T^ELhX`sX) zRn6#u&_)=cC}!s7aPSZM%$=-j!pK>{KN(1qF_fcM+2<-)hgc){Rg2qE@VdxPJEgtT zGL1C`F$0uFwzhG1j&AhmgQ>9CU;zV>@Y>j@{8-%Xjm1Am() zq$HLRE$+7xvIBiOdUqu#NV!Vd4SInxKv;d{*?JBc4gzpNm4J2F&cD4%>RtO6&gxF| z+FNt__;7JF)#kKjwQQF=jon=ia>B|W*v}n)`3`uSAx(sqyOV5%3R@oEABc@>7+3$^ zFpK@zK9!-+^&3V9#qbm6EC9*UQ`MqhTWLSFQj_o>xgJXi3jMOKyUQN811Rku)i#ep z@iIZ>d2zlSs3-;fa~CUHhk*oeIbL+Iu(;TfVI8$1B(%)7W)L}doLkx*`9Tq$4Eb+( zGH3Ev$s7x4yFLMVkXe1Z21pv0YYY=GbB|}k8k?1^)gd06YmrMWLaiXU@&?Q~`hiv$ zIo9}uv3`rsL+jhX!ScD!tku{wW+8`apDUmcupPbUc?rH%Mxdu^>#cEJFpZ37bkVhJ|MgPk>CpXKjY2_iroVw^ame5N~FuG?~8jRP|w-QrMH;nbR>f+Y{w znxAoB4nJ}8<*xCELUU)|Kf5fqD0pM2B||+kWATljC`Tc>V}%GCiB)uDn;hcO1qm*Q2e|H&zY z&kW?3u=hIp3;ZqY&DA`Igf_puSDk(N`b64F+vgF)Yt_Bw@#f0#@Bk=^#onQE1u66v z7;do-9$rKA*Zx|V{T`7Ez46co)G!z%*xS9)Ay_zHO6{#=E$xg` zlr}7II{lk^ZtWZ3)_-biTzEE-yc7tzwY89tnRC@rxfNap&pWUCgnJZ zqEaKe<)}W`lFv}UB@juSVw>9frO*5Q%r~!oTniiqY@2y|lKbxXf9{VV$zNcq8jO9P z1EqaiY5ODF*G6s1O)4_}+M23WEr2ZwDdIA#eooZ2UJyBs?X8njHW*eS@y9No_EzFf z7M6?M{dvX}xzO>0{qDcc7=CFqFbg$Nw3VUZ!B-E(>747$ORIfLDHM2&^K{oii|a?5 z&-!#qNsXi42@G==g8X7U*ZH;fAi<*zS%)R+Hz{Xeg^XJ!qH{O0N3cXOdn?tsVAj)5 zbi8X5m;r}Of(^g}66fvRzma}1&FyeUf#zUWJxP@27clKjcU1BRxAe&Gj{GmchE3PX zHu-z;S_J6fUY?7GZL5&Xq?qT(wg$mgre>}}-(wGM@L8bfd(`k0ete2m3>r(9ooD#& zS)8xJen?s-vZcLTt}ix&{N-u2Gw0a;y_LYg$H@X6yKk>w)g}ERbv>Cg#JvZ#H756P z>Yz4J$HG@+u~X4_8m9@fsF}hU-y@6~ z6k?>ApxbnusyR+X$XCbu0^ zTG9fK-`qc>h`z{PCfT#B_ABi4*M~q&_&6uZTKlM|&XYr1GgR}YyIl5TJNJ#ohf)q# z%h&sFRHD10s;8zpT;yrB~%RuG+a4xZcmSHQl%}<%h8Jd{-Li zKKm0qA&3;E475IFTSpVSFe?+vK)+x)cBl79ZdVUe-msN@A$Xe=fIC;qE#t|nu|tnB zRb5;-HbiEm3^1%XKNymJA&H+5_-LqK5ch2lqk3Wm#h3&CuK?%Y>bfUaJ+5cOl~$A& z53Xz7Mx;TJW^}vboQ%WD`tY{Lbcq0@iO@@w^iQ@w?mI8NDa2C?r5(xxovBZsdgp@= z7w*k3;*qGmuqNcIRkblvWr3bfZq-6G-}E)ARiEYMG$^y;OagBLDo_JiWPE3|Yl#^@ zFt_D8RtW=^@`A|%-eDnw&;6@U0luU^7|+eU7*W|i7)TS^Z2UyG zyR`n%Vs&Q;6TcL}rr_b^4*bLuRAAq*c#dbET6x~m%lx+`>g3_|D@{Aa0$cWh($e&fUD$;mP|SiR9;Ilne$%kw%-9E|)j5Bt}qs%C3nsedeia z-Co-v=LrY)klw+Lu)2Bnu%hB($E+Zh~la3JNECf1W>&{V2P|_Do<-Sb;FpF zIK%JGb`vM=wQC+EjW`cJK}|^gy>>$FeTlVx!S>dA^iu2j;Q^a>RLmfVSF?XWK9Y-` zU=A(_n}PJ6j$9GfxzfOqC$@2-~J=dF(37S`VkMDl9n`pUA z(gLfQkufI|-?K0Gp*WUGTiw~wBAkhai?h2Ubx*uMab-jk*P`%wg0tUlzNjNJJi$<8 z)8*kd2eKY*0Joja;V{d9%*D?AIBP)5f`^G6CTP8M+b=$nY4_){ff4SX;0EVtzplQ6 zzOz*LJ>r_HWc<WjS}n08pO&)-Dy@$WdKk<)*VB-8^4t^2{{d zG{ZD=>k^7UTUE)83y=0evQ6rEah2&#ORPA`oyR;yN#ni95POB^=Es+6@#?-?x|`aX zzs~Lc1T~Hq!am5)kOus9U4|Rq7wTi=?-An?BQgX5N#WOWVtq-kDUrmFzlnO!d^W+R zd(`exeDVd7D7pqfwC_a8(4)(Kl`!D)_}l5Sd$$aWoBOuC;hZ0<%PnugwP8{ zyCXqqgG;l0_x*8JOH0f82#rmDa%Ns@-&gpj)8&5~ss9Ns(_W}}ZT1LL zx?5L50S<9JWsm}aa@-xh>GrZ9gJQoy3o7Y_S?xSaFmqq96yX*T61gZM7wec^ z!69NOf)ZJhrIqdleTFCF#Y1b~7pnJwQo*YH4ecPfbi%Tw5mg?I#^1kwa!@??|2W32 zl-mLGPRh4+4Vjx8d91nz6zq6$Nelh8q0u(D`W%c_kIp0$?mf!3jfd3mJPWDjTL@mhOUyzi>E|V3M!gQhq~60*43HTVqXIdnC=uE z0LiQ~nw*2j4Eqy7@#t9fxQ*ZHOfUEaXiY^<^-fVtYfVQ^_p<0;Y}IbP+NxK5r<=0j z}!(pN0gfpKv*U!UU+av`bF?Nyf zim@-u)L9{OD;ZJivSc%}xRLU-ytaM+OO$-}wTjvvE+4ZVj^-Iy)d&@^g^--j8gX~U zf?J!eX)V#nG)Ngj$b|;h@bqG*78g7{R8D!%A(}EaV29;F^Y$Im0w6heKTj^ z(&Z&#wkY%KQiiD6Apw;igeEAQ68sNxVmvpfb~q| zij$;;Pgcbk@23r#$&11^8cX2=H9R%) zaK*GNmV(`!+9%#mF$;dyAv8i{7ojSU9;eqvPH~N-Tqla0k4MqhJV9PXK)n?pTD<=S zfMcR3FGCLv>JA?r?mi)Le32>j!m5m4f;!wR(6ZI!)UD|y9myseUas-=4+ymB@wcA& z;~}r{33PDx6a>*%W#_{@wOi)L8Fq2y(ReEzPVsOBoGRU;`O-7~Kr$XtrMbZM`5t{w zcb0i!$-?m&(P+bm0<|bS`Lg0CT_TJ; zU)UG}y0bWll7c~Up1+akkq2?k@*`Km)j4tgixnZpDU0H_s$7}PnfJG5B}ZMRfq-|a zeM2Hor|3Y9E!lhZ8=9PY{8h95ho0;@J~F2Ok5#_);djUTYI@;=*`Yfxa*!K)G!=W| zd$UPuOsuL_O>!wDoCU%KCh}}o$DX9d1rCK5HBnPOLCm;AC?drEibC&zdYuUO4Xt-4 zO1mRM7JYSuwaYm$h1Xj3_!TJ4^Cy!8p7!|!90Y;3=XzrJv$6>`QRM#KwWG#wA1fNG zN2)v0kD_ek?R3j8Ji{ewcI7qc62JGbDaZy0Na;-xV$-l{8}S-8;qrdB?HOi$blRnS z1}Lhml9>>miY<Gp+XZG^4+@8~#QlH18hJ8y z>?JLPfhlDnwHg`om}kZLq=~5eSfWjFmHs#}?ldol8nL(`PZ336O`~6fsj{;=WXM+W zR*6>0*0c_s#I1|1%B`xcyxn=34cQYQC<6m4EcxK~dv1IHNYbfK_!vCbO^5te3cg?9 zHgUo$Vx<3dpydq3q*mPqVrRiW@ZGT~&r)Wrr{HUs9JVU`e)&cy!{xrOxe#CurgNM^ zLTmNR{#cb~c~V(v8&wmw0I7FfC3I}2&U^Uotd|i8xc+s3hJ`mjB!A4;nIQ8XI%Xb_ z;*z`Yc-+cjAEl>xfMqky{*KH*vl}Mu=PJIW#jupLaXV!$TMtyd1p_hdwh_! z`;1Qdz*599WM?lDd|sI9XWF!%yK5E$;DCDNqS;sUvBAkWU<8B~DFCO8I`D&l-UfTb zScwom=9hEb&RuB9l!eEAx7)tU*$I^2X5eAZloithYlmEEs4+dgheM!H4Qu$2(x$MCHGDKC2ZQ#mH3S|M-k$x9;zADT3$O}{*cI^eW zGbu>fLFdR_2MDtiegc=A^GAeFhxROL(@u@{Jh{j3$$H#|CGrf#zKZOQ~# zm(Q}Zo6qQ1RfMIQ@t*$Y&tovJ8tZmS(DikZqx2*w^4B(t(QssGM8y*rs#bryO1cNg z9KZirF3nu{@j09_t2kEfM=MLqs{%**KI9zI7k+W$Lw?`HbusfP0duRF%xL6|y|u!d zx_sogI6fKp-K+pv0SHm~y(ZE0OJw}}Dx1vo_z*^Y0SGDNmV%~)O6js+cK=n^nw}ou zUo=e_Jj^rv=Rhno!dOyu2s14_O)5!Q@b5qxzMqWYyN8#%oanV^NmclAUuQ=nQScGy z{}e%%oyVsk4YevABexEC76Pa_-GU+ip~`@?7_gdL6x|2QARBbepTcM}PfTKWv$5W7 z7F{d_h|_n^7MP3dk3yZ&2D@swNq?&GAS?wec$nlmU)giQzD>-b{T7ii>#%ZG#st1k zSnZd84{;dwRw_J%M(Vpt(!CY?ORTUDfY0li!A1N8c)C3&MuAE$kM<9T3Fh!skJYjC ztKYa1RL{FH-UYmgK_Mn0hT6F2PIxL0s{0JL=hjBc39T{KF`9jXh5z`jd8D+B?Bg^2 z9hoomT6}zYCw*AVki}bE*3hwu8jW!FkX`;Oe;4?`FiRA_i0~F%6OsDM`Kzsd>8;qARQZxf zy2G{RiNb>?Tpxk}TLAQW%}`Nbd?hjShWu+y+131S4t^+MXWed}`-$##-W)hbcwG0X zNwt5zB3NAHFL3M6bRy4=xw<*)mvd`OhhQjTwTa0XAc&`YFj>dy4x|mzq#HR|wu0gP z>8o`do~v@!=cf!Ic_=Kl``?@E*F1TYLQK+gqpCBVx*@Lw^%65-GBkerD8UG+r*F6; zC8itu@^?Y_nzn+Q@rsX{sQF=GZF&pT4uPA1U3tY&T<*%T=Xw%7z2(H_Z`^hQA)_F4 zg^boY>$AX(JFxAibk4j9R4aF;2f<_rOYP6W{4bmzML(a?>N_Okp!p6zPq!jcCP{hWaX$TEg;CwU4urfvfu1v-lw3yA)BJ6> z_9wJ;L;zoOXNXD7`cE@yEQQfxHYayf%v-z>X0Ov}2qc*oCFaJ!mn}l;t(29IdhOT4 z#Fr)9+LfuvxVa4qp0U&1h98439^@<;_5h+{@_5 zD&ULcJ>YINfwSW%N`G4t`GRA1OD{UDlV26`dNGwYM~NX~P%kx7xDNuw72$1S@7~XR zEg642s6kDEJijE=tfq4w;vSEv3l0UH za{DS~I#)1$I;yeeqz+;&j)+dA4^(a=n6R<*?VB4L`nmF(lHEiG&AYk0n9PrGOE0|q z{kMU==@unFZP>p5$9>cqvp(;GijLYCdrM@z~@c8*qB4eYLaw9v01Kj zo^{_uApg)=Lh>_bgG83Ea6LgZshTE4S4!19wIyJnZ(6k&o=eC0lxe$;@ZtFqQGoa?Zv>+$`~scv3E zptB#05PRLQ?1j)Eest+q&NMzDz2+ESy_*PsOziKm;vX^O4yZj;TlIZM;D1B%N)jfo z9BZbI_&_XPm)kIKqgkpuS3iE#)xy%Q#*Yla_kC%MAY<1ve07nMj}_#t7ph*H&TLlK z)D0i~8bHa*QWLJJ7b#R5c>Kv|nMPu1WcMxfNw?8QQY1&}keyzT9%ushS`z)csJ+U< zx~S>~fAtDhBMOaJee`Miq4ZJl?<2gR5; zkkBbYhA9NhunC~Zc*%q}PD4Mp0(Wc}#AO6Iy(hhc!TOV-bPQf37q@APe786+yIO$O z{K4!Xmh)Gbf(n@?JrH4EB5E)e5NJVsUz#|8ux&C_;S4non{QICc2NTuh%-c1Gsm%C zEOX}%IqFYA$K7xZPXLiaGDdbAGKdIv=(v_|W1WVyfR8H_uA>w?vhS2z@bQA%?&;U@ z(xDoBp1(=`C$CVcmlqHUHL6phZr_3Pz*NCw0-K=!jR@p7mXud*9T&&m`E<;k=fmFl zX>C5IZeDM*KszK85IG}i0SN9cul0;LlAb5^k&QSisbN^z!Eh{=_-e9miqzh z4W8iyMleS7C6G)_nsiTdj{ZD}=-)>V`H#N!Kngx(@Bt@JAK;qg*&i>kE8iHAt{FCg z@sdp9v%6Zd50u}fAH5@3Z`}BpeooBcp)81?g?OIz@(!QfhcKKFu%M}Ddn(gG3%y-A zwSyQm*~_ zs6JlvU`Y3^Cug)m-nN7XC10KoSIkB01yC3HVAD^#2x1JYj+day?1qP1_b}I8B=)JO z!CQQ8^NwUXnp%Zpoi^i~G?Hj6b;diq?|vV_kA>XGl!Zq+)jnH7@VnK1iSmSs5^e-` zkn~$cCYFLQ;)!l+B)Ekl!7bx2KXhE7WR|F&9m$xBg-2pUUiZ21l5BJfP((R6eVCqY z0DqURblgSX%SiP~~7k8#&@mTnjcrjT5fM(8Yy*Uqm^ zCXKpFv)@1}&BQtr;^=_Hd-jtF_n$RzE^fr9G!X<-DV8+JZ8C_f>IYv5 zpv$`PM`M@Pq;hsJdlZx^XDL7g5;KPTa0n8F>)|FlsAFz&DE21v zGDOQ|dgw2lXjTu~li_BY?aadX?bP;nsSv}QQ0_u#GW8m?2~<47;g zq}UOmy97IY|Gs7;W*lzqvd^jT5K+Chf$ltYlvRWVV0PQ^`mE<*EAHX zq%Ay{A#nL{du1jOmuH$VOLRk8enwOG-o%Ng6JlE^O8`k8fL=YF&^67AX-8 z7`_t`wh6;_aD35E@pyl;6no6XDoW;f5i*xJuqc%OH6d9yO~AwlsYyZXpz$S8q}g5l6Vd<&_x(@gO+ zkGOMa?H9jYFth!SV3UdLG-dR2Jpi^h(g#*y`#?_YuQU|i9gIFm+S58_Ui>5KTWYM;2-FU)g|kJ}7QoVTBBgQ}k1jOBsx z<=t#-Y$m{yLdq0G@wk3>(nAfEO%>A&z7r}L`j}>wE(`I_`XhQ|Ao`xu#mIHD&bPDv z@5>g}y*cQ@_HFoZLo`&pxh=?sAPJ~xA_;<5Q(Luoa!?76rO}&D4+(NaAYKs4ntqjl zbpL}dMLl}yT)w%dY_Bv_;8eatdse(zAE-e__29}L%k)7a0=Y^4{`>bcT~{Rx*uORL zitgIXQ4E)tfEBpUd9^!weJn{l3br#vFpwBi{_o3_gru~3$^>B`e!g(fzytyivP1r2 zny|{9GY+9DoQboymd*O$GLx-3MuM38hWYMtX=yIG{7ux_1}eB`3b>Ji4jG8^s7eX= zzTS8Yo*?q1YK-Cdp+`eOu{H;D<+pcK1^-;J8x#&igk*Ek!^90qok#y1@&7J%VQ52H z_YR$k%jo&hig#;adObKzq}TIST1KKOI1@~s3{%RvokyM;W;O&Xlq^q-K^l=0*=?Yp ze98$M{lL`=N7e#PxJ)9Y1ZZ7C$Ar`@kh2FXAfF+k*s%_*97k|$NszJb&0!<}-xNF+g$XE+Wz^r-co5t=J3aQ?brs#=RTDazFu<2WubG@%m&ai=B5 zWjb9ZAHG9E=Inlxdg`YyJ@tromw{nBHr&j^*=v0T%RYX~#)j5U%W@uizFXKBp{Vw0upGTB8?ug_;sXqhnn@#4Ff+BDv6bu z$Bp-!h!8HhvHk6}NuJ>BeaT)+$NlB^XzIH=aB1CC#I3(L*dcb+W766@NtlRWTuq}D6A}E;Jl5f9}Z27*>);R zMMAEM08$wx+i4cDO8sh&A=He`T;_8%Bdf;d}Py*TvD5KNd(ngK-`cC zi+xCN2a?~v6LPL6qdivy3d%(r%8}!C!X7uDm)T}BTl7l=Lc`~U5X_Z$!-RNqoQw8_ zaUVpQ&{tEiaUZu<;^Hw#kkpRJymFfVsE{jPdpjquPNuV+Xt;`c^%eh>VN- zHAEKcXiWe-mI)cBxqssG<_!_x_nJvQJV9{-2e2d*(iR-}b#Ev@vIjqnHq7)o-V~@y z|NBrB^LC|IkI&heIJt*8A^BZJCY>wRrj|k`zXsU| zH+t3TxwVvzddjJG->PDvc9e%0I^BH#0Gy1AvUNAi&1J|BgEL4ou!l{T2kaE*ow_dW zvB5oT#84*-f(L(;Lw~taW*?qkPoEHmMgX}WqCS|BF;6Z*CG=%3NH41sovg_dF>s-&Z@YlUE*Wj zyOZ1qQ8z^QTUOESH1DI4yD)1NfJbd;7}f@GB7(6Vo+4ov_@N6;X6J*y_4nVF|D;yD zY9c1(6Geg~j3eOZ`|s~+(W@FnBYpZj2I9o@>0ok+so{zDGa^L~81J+;m$!>cd9G)`ZCO#53OpxLVNUOceZl`Q~02{3c7sv6@UOoVL|$y2cY z#6?YuS@tKmvs+VJT1})Q`9k%(nxpzLFO z;)e6G_e@II8z%#lhW^)|u0>Bf$Yv(r)}9#qo;rqoZx%$(t2F5^U9S2VZz8*bSOM^0{O{GN7 zlwEJ5E<96+jiP%2B7hOy-Q90*(k9(kFv?@R%VbsrOBM>W4^Vh*7q0HwGiZL8#ib2k%n6r`+IZxz-rZ9WO3zpY?!n&iGagcB(W_vHDsC*!<%zXG9u4w z523b{Nc{2Ef`k_zIDg4|*90Kdkv$K1@1#{VMe(wxa|5U+O~fCym+4AM&6|cJVr5*r z)8eL9QMRA9a&)09B)N!QaRm%R*NbAOWixtJIB|-U?)ep_?hizPa8AAh_V5U~+0$ zp+Psng>!90Mvhj-@#puA^>Cn|ODfO_;;p)rhL24&|F|KLM%1Alpc6#4E}n*uyl5(N z1bGW2+ANV(+8-AA}lxumrXZGRXi*$$Ce#PsRGOhi0S)EtQJEj+@nK(v|(?; zUMJVn_moE1XrU=YxJt;GaCtJl(oaoK^T3b1(AU=&@?Ha}diSOw$UEKj3_H!mY6Uap ziJ>EVZhLS<3b~EP=P2dJhI}n67xI}qjF}H$(-Bs>BwTENR5|(z`e?#w04AH$Ie+4p z{B|!UT&!w?XiSew+b1QY`9=82{H z)E!I2Jxg)Q=%8|}itRd>t3Nq*>CCcsK>ky%^Z@ZObucG03W-E0lSb)uDK=Q2<`P`6X4r15$$qttpv={tnr63$)o+Tgq_1}+rmZ`Jh0?}U zda6q6c!c3Yfb$;x1b0k!oM^~?BR-<#?MEHh3ugptl1L=MG5C#V1DUS&h||pT$vK7? za12D=iQ&(Oq$?L)xjiX$UY6)Y;MY3Zpc~(x9b`7`oOShCbPZ?J=7Wj=gGUjW1Z(@M z&*3PnWC$iGT-u{t+7@O{Q*bxVN1kSS0M^^~AcX(7AFc(`AoB^lLP1^$#%oNkZUUC; ztNiw~mqeseBqGUd{FqZxh81~DvR^cJ`%n8QU6A#1kGwz{(J`Os?b$^}znNJzxyql} zQfL2>8`r@{k7q4LbY6BPV=Zy7>WW#9Vw2#ZCrDX? zy+>ral&CU?D`IBeey9J36T-}oKQ%OK9mZypAZ#|_#`fQHQ?A47aV6DX*!P{h_3@td zbQ_uG)lZcQOt~*^z{@Q{T13z!PZHVF;o_@aX4-XNusWoj^)d}*8jHEoSm#UiTH;vy zg#5-6iQWg;yqVKM7j+OypX0uqi2-TgXa}!}17uq$+Gf6y0{8ExhUv3QVl>==(`qRM z$%{P1B1>nc{kN(DE<}aBKqs=QS%)cEIGJ^CP7j4&oz`>xfapzA*W5$Qj9Wz4ztCMV z2MeP6oG?%Pb7f1-epS9P1Eg{P0`>l`!^CGP!@v=50FF>2MV{43?6We#M%KYl4o^43 zW5Tz-B1e&?oK(@{3sKebMTYQCr%`1%XhfXOI9M`$p9FIj$rg9ysDk2xLb{D@l}a5f z9#G~Iot8Dz#F4B!qoScE3O7R!5rK~AIsNpii)@Y>j%=&!WGS;ReHg)8#=&yD?^Pcq zG}H8uW#4)`rTmBM^PBq)ep3iGNJNiRy>!QJ>w`LfegOF+^(Si9XT82X{L7;;x3{8N zj+MW!r1xi)@%!z{M=3FcC0qM9o#6T)qwYpbyluu-P-UrcQDTn6?kfj|J88!J)^hsc zZ6mSe$5WXER70tBg`-Sp!5%KbQ3G5yMnz&*Y@-;)IsjFjV@(^$j47B=(sCvTNAT&$8&IrE*t z3)#63d4OuBmaU8BMQIq|wNG(zk)^Yz&!tt3X2+3Uc7dIX9TW$IX97~aM_s-SxUdh$ zTpB0#_M^k(;~g&e@3i(YQ&~wM5y^Jx1Fb@hE}f$d2%n9Z0t74&Fayk$E>-S2s2qwv zMGGeLj&T~+M&!aqM}0aC2$bFz7zyAG$#bSa&Kdy?6Qjj=zS__0Z(ACs*rrvfMIqUS zx=?{g*_rpYddNL_Vsf^7;Z&$KVr;JM-bgT||2D8_wtK-GvFIADdo)B9#g7+zF!4R7 z+mRF?>|}1taU&IK=IJU^f+m$EE!P=Ik8WUmQZqQ#zHk zs&5}iI4uLIW%Xz3;_p&RvH)lv1iA?bfOFFjboOKykrM2W6R_X?K!+TGTShfO$@6nf z!}0n3mS;{CK^9{cLzeV6$AA!eQT&BV_+FB);XuYD7zBGclTTAdO=rg>M& zEUL_Pzu(Xm{hsSo5sy2`9D&iu$bU3%R2J8wMckkrHDk^W7E*r@%zjn3@$Y^L)B*B8 zImQ%Uv=&kb2;lSTAirHJwtNObFhFJ6YSSrz0?3M@`qs=pEZ;4$z2dp; z^rrfI%D||4rUre_%;x7tneGiU*ffx_Lc&~=Wy#><(L(;H>a5bow*H_BvTUT6;M@z4 zdThkLc+aiXQK}xQKTjwa!A|F>7&)d7Om$Lp(Ni6n?9S76vx#?a;y&fs!f(z($P%w0 zz7Spwijnn=$}%tZtBJyGu7B8rnFnA!vdkAd^vWGAJMJ@mB`-U~?1hPFM!WotvEoGj z#?4QrQ`hCmgr(BVI$r#*j0>wO1oRvaRBA`2JZoo=DG&1wOlukA(odA=&O%n*oPO$aszu%i2$R2ceTlcsVrb=9lrwo01LXqCb0@1OmfK~S>Nq2;5dNky?!hev)mw)sG3`&tTT2rM0j_1+$UPev{xpUa9Xn+lx=CV>bHZ zw>M0e(iPoV(DUrkR%PjtT*jbar`;iQ8Zp{hSwep>@MmkH4HC}(q|@=!2O}GZgDXJF z=iguzuVn19i6q~e-`?UdKSV1TFxl-d#%LDv)h4#nJDoypi*2}i1+Q#doAJ(z8|Aab zIBl@wfzXX&KL$ZNRlI11XC8*w zFdVNc()M#&&p8^uJ}}XA()G)u3J!P>BbAa&njZj1(M5)Qr0qnXqwNnK=ezNy`gG@d zzOSNoSljM261nr=KYERR^WWNBPn|X-LiD54w}CEl9tPHQ+?IVj?*WtlMg@*^M_wVk zrd^HOo5S0e<7iXw?i4P6dFJ8cOmj_Uf zk=cd{9+s`pcZ&?BlP?|xoCMIhNC1{2D|+w)_0bBp=<#T44PB?Vho$HQ7W+O_sDP;? z{}X)==dL31v6{7UW4K4;NOK)UA>rB2Kc%)?P`o*0y0nSVMa%4T5gxKFWSIqC$Qc3a8PrbX9d0MO;vFvBS(c{{;2| z7tl@937o`zx4HzIpLe$>|BtWM)C}o@nZi`weh6E!8-*E>K$(ur2+c;+8;KI% za~%|%d|GzGb7O&d$-z2WOyS%oZzGW+6{kUmCo9_RLqk=cG8B`Y!sO{pV{9b3?x!(k z`<`;>R34i5fXm5u747>Y+w*WgkUurIX>m5yRXi;+zg}_Ko)~wyGcp%)9KpS`T;-Qm zh7b>xCCbpqa=i;`OoY#e3q!fIr#5uN{LxA5WqPX)6xJ%tnS;Vq4Nku zYg!xl|AUuG1;VEdbT_LLDpIAWk}TQ+;G{%msgJU-^&t)!sxc`_y*+ z3GJW~xr%N{s#n@pCKZ4w+h-9r8dtmWab$`eGLZD70l{x`DwnnG#v@G`a>O0Jrp7u( z2|!434d}=7o-d2D1(=C$0Hkt#S1&T*r`HcbtTaHBq)*%#8*0=9M%Y_wFvzsUcyS)q zd}lUQ+ABj1ZQdhntdCe-R2_dNw59Z9WqZjIyPr+0#>0GJu3HQD0DSfT1|mgam;eQR zFUS%Skw=2YJ`#JtN8d_{(g3X`8_Io9ams@#w^PR?7(@>^d?~s@7H!?*kDvG0xW`KQ zWYNHVIQv5>>yL;~PxM*HPi3;2Hq0lBW^@|@e28}}f;@rcO4Q1*=lRGlY3$Py0f-MM zpvW!Y894}=xy{J7Yvc)$$OgUd*G zhu$UL_i`$1ITL5(!iR@3>ZA{GdD$C3Ya6Iv5opfj)9|}I6^A50!czla2VO$QayXiMwrMFwQuigK`Jz|Pe_9VcKsdz-U(iAJy1DZEc z6Y-v+=2B6|m<7~B1JhMg9Sts%9rx-#2sdO1!9pJAUBY;bCoEb*y_LS+3;hNO?E@li z7X=qR7pnxu+s2sOzti^1^wBhD=k6D$RWsVj`eD<&z@7M=@UI$OV9wzY@w7SwQX9lT zYNR_LIi)7Zmx5Zc^bl@%LjzIbx#D>{5FBm1$GXw6Asb)$@h)N0oqZ?GM3*94Z&knB zw`0KPXMmSpRL(A?oR~Gn?Opekdc;d`1x%Kr5Kc_!0s7UZXnqc@3*nr|nAO)=JoO$R z%Jy8q=?r|`L+;jtCNGm3EWo&`cXHs<_p@A>Do+KsVs7KT^&J<;v9%v&l_5=9<>o+k zZbm9Tnbr=HoZ=FQAV9zIEhUpd{0F-<7j2c=jS*508>ZaLr;tG1ta6pYr|B%Rh!i{N zJo<=^vH0XE*z3J#y(*yI&(#u49P`EFRiN{;JA3flT`v4{ICtgub3M4- z#8i;TM9f3-iPtwbUjK3Yj8pce-O<#CgX)S7D^u^gbh{jMQM#;;R0mUC(%dc1yAHc5 zT0ee;64QRCE*qQFqBW7z^Ry+lZ*QqlJ&E<{_n+>cIw{C_;G-Ey&Jbgz{$4Sbpdn3% zECMfTgG#w$dpjh)ma~wN;z@IRi}}eNql=w%y?ku%;eu<=V?(_`g1S4+l7{H+1Fhh* z_{q40`;l_+D!Dh2CO)CDx||6*YQ%v`L7()4Y;iDLXjwEU0;<#Jer}>Xh21IL)SUC*Q0C zf#DfkCQ=gcd{NfI%f&UP(qwi3&Gp!FJuf|fz0l5?_cLm)*JFFAoCZEsD()5g{E9=H z7*Fi2%MHQtWX{_Y_}YeUH_Y zo~d>P8Zf}I0ZEf($}RAy`>;NgaVwShQmJ;2m%~;1E`gZ=vJItF1+Q1OKu6G{QPJ@P z)hqLar4H(cCkYom9%Vzm0`qf;rz*sX536q;ztBfRq;mB&^_2Hn{L>~i*Mt}efQgW; z+%Jy&eAL&8r3nulIFK-(vHSF|-$?To52uZ5T7+m38p9XR^zqqW&D6-Qa9->l=GYu0 zJjwCXckw33#wy)T{&-*9{_^CZ2jVo-U)|7MntEp?#0Ye)Sw1jRC;F!ER}MpcbqVga z30IrQvg<*$q1sU)F77U|E|rasd-pWT-wrrbZ6*eP*^KUM>R|G|V$0)eITj_vw+k;G zOMc-k) z_QBm#ua+3#OOyoBC-GpkQRY0rRx;3WOXApT>TsX4(&6$h$LK7C_!-p1I==3q>7$qE zxhE(@`Bv$yM32AVg{aY_kKTC7I*AVdN73}uS2D?=qwk<4Qu2^nGw zp~0-kJZ3J5D3wA)W+Fr856MvGdESbQ*@)dXd)K{HpT6Jc`8^$P|J4!qeP7qL);ia@ z&b2Nzy;>dYzvoK}bvFA__9~1i7HzisTon*H%c#Bk-2+Pa6jr|kD1*IIl%&tg_I?Q^mO$z%0sAHQ3N0SGNm75l#sGG z_@~Z@4aTm%|FD5{9-^6^>d$wN7nAK;Erd?UnuDmeP~;fJl8#Z0P%x9J*@gWm*@YCM zQ1FJWek}l)CeQEne4$F2N)DrRr-xAsE-F9$7yJ+v}&&I7>KoL+T;a_&6RruiZux)Ww zSS!zQir+T-AMb0@*l1$SHARUW68Eh`V!-N8&yJG4MtV=_)Y)qsC2+w)2^4w;4PC!J z9vk@d(qHbQA9C*PS*y>@P2o_v7v_q?RSq6$_%PM;_ygi5D=q7;?CrW(g_-~7ivHIY zqR`S%tMeTaa<;%au17_P4+F9A6w_@D2F?@p^A~ODyJ< zeB3G#pB}sA)0xq&MdZy-ki}c5muFQR8HkZ!$);Q~a_rO@>)CG-{vAVektdQ3ouk{u zQ%U_Mb~t~3WU#nzSCNxJ_g(*wB=7qluXaL_yqEGHc`3Qyt}7#IG5Jjw7AIB(*Vb8e z6d)^Izs^cpX<5&b6NeTVJaCM=wx#f`xiVGG7Ux-(=?mMK(DPRpAcrz;J4!Ycxggm# zQtec|mAZ59H5MWS?o*ttlENU>iFF^7!kuNe83n0KdCkG^7WjE-%Y{PdMl7whfvnpW zD7;Xm(N{TmIX`p}^#1bA9xt-ora`FnpBgQa>4!F`LAXzB}A~4sOWiey(}bQV4<1HUoP$H?)MnP)5b(zy=Le`(Cb#Zco%#4`Sy#~ z=0cbe-*Q54>n$hN#i95}wE|&Kn3GoX&=LTF-B*L$6g<7FFacZW;Yxm@OhA6uN)yBH zz@)2+uYD7>8+~`@RR806V1(jllXom(&lSX$d>*0F-P}{sj%(< zpwq{UIE*5$_t9neR0kf$SPo~FuK8N4z$})D9kf@bZUeOm7n4ar|gYA zn7Cn)1MNOK=i4gemPwvFSZ@{h!@lFw4{e- z%$Aa{FeM2Kqm)^YQy5W&DHfxS4XN6H`_#7Yltz3fXz%Y6Ym;;qzy5XPUq}}b-08i4 zf{H-(J37!G=l_p24t`2b=_k8;5^lYmaPMttX<5?pS*b!?T+^0|qer)zT30$eYT)02 zLMb2n`EU*iCz=>)gQ9Kz$g>4fd>AF2<&FRI(JMzPXOqgqPy~vG5xfcF7F2g-7f9ZPgtWG+yr(L(7bGLl z{AF3&G#ZaA#c0d!cxg0`peiECPbLPD{3HsA$T}wg8czp?>NsvopfnOZVRQSWc!WKI zOO<2~|8aT~TLB!lIA1IYKy~o{#B@krl)-8}&TT@EN)fR%K(YvCZkp{X2P|`RYbN7k z;hB0kPbBq;#O;U9&qY79_|UgE__5MCQb62FF}W$HH_il@ z`?v-E#!SIanfzv6z_Oz9&)QHUd5o{W97e_b$@DN$v&4j~<0L6A7?a|{C@gAWIDiq9 ztT=)@toEW?@m!TS0i_4~dR=y)^z^#bu&o5!GEQFYs4f4zWjIu9jMQE%Nh`{zzaFZK zZ)(Pv0Bksuz(xfLY(TsZtsrk!-3_@qN_i)8iUrjgglKcRdw+UH`Pv5@3b-@mity|HvcHKh z$@lu<&de}x|GB~5qz%4E+F-Ny@Z9iS2o-@1kjt3i`wC?jwlI{>p$sd&89k2EOs~xM zQ2qI2AH1qBPN9CC>gu+?Jed26KugNJi_g(v@D@g7TlO0$mqGu$304}_whcMU=|aj1 z3JMd%+;whQiyhe<&zur?FSQ+Gtoh?CZ$lgpr)4s|&r!e(|7T9TLpTcl>j`E$eFW&1 zq7z9jipEcmT5S($e@XF2pbDcIS+0WL{wUCKy@PDS^Xr2b zg7OR~;IWB2!BgfkgQvV6%H+819G}a2otvD^9m2{)OYa|E68nm&%@-%PeI}geFX6{h ztuVNlNmdtuui0dz!_-EC6bmFsVTeGA&Ru?bktw&|*4!gQCG9Bq6fsobu&dn052sRKRxXMUy1qk zRAFIZe=lR8g4n#NP)YR2lj99#GrH?`$(#IPx|x(-n~AzS+yDOPt1!GTem3rErEcAB z1W97wAQ_5<3B47A{?@)CnPtXx*6O}0wG(%#Ub^aaYnGXi*Y=z_bpPM*p2DN6@!0Sa z#_BIowqv)zJJz)g3*@kWM(l=qD-Hz6C5*A0Fs49ZVmyFi!NWz#&OpKyiZ+`9IqonX z;~Xm*$9E4dIjj;eEbb66E+1Hxh~#baz~4e}w(d!q1P)o>ijWJuvI~?(O1VQCd~I$q zgI$iYpC$fS7lE9WO+=Dq)`4}TeDe&NF0YBHEd6zLd{Zv_p~F&KwPH8~1J zE!zBP`|4nZP$iFAn8`JB0jkQSBL#hy{Ghn5s5uy+8r>`fETne5Wq{lwYvvT!nxXY) zbPIsrhZ#y4cb{A|=&f@5ub1l+*nv^~+Zs|ewo`f$xkqNgMZnQ_5w0Mx7eFV3a7^mN z&=eFoosgQ8=p+s*`fEuN!~usv=+pn&c^UlbG~9)oUUQ?4w~^A1X5Z-+NnM9ib<+k$4AniKcmbdhWU8iI)CG+9F2@O!sY_yLXG6=x)3{tPEh@rSv={B5XRlVrcN@ds;jw)ECDZSzdLG$7+*~fWO@B zldNkT5go^5!Gg5P5`LUbx7%rK2hm}Kq4$q)&_TuDku@lf%0>SLK?<~-cAsGKQ|ASE#=A$`b*>Py0hm210&3v$newkbT z@~U9Tx7?b838{o4J|@JGvYF6lzF=SDI6sO$6gN!Zp5AM*Y)&5~K>;9sI(yvA$2Jcp=qVQ7;=+U!gLq*7IvtET#vlF5)vFc2FyzHP# zzB5tE7B9SRy2;hE-eJd)WRLzuy=|BJ8$-rPIs-?u;gZw9};@e`%jtnk>b@G}Uq z1O60Y8UUty8PDwl8df&q5O5YD_^)B&Q->Cg+k-$LRH;kUUoMvxK*(Na2+EBW{l)Cc zP_2P?c`J)x636u5q*I5}|A>*6uLP3)aAMECuI{Qk^_Si4L{y9Mv-?5{{7+rS3x}D%;1iZm!g5GxYpg}8!J|J51QW7$$XQV zg$n!lV91p7im)h6LU+b3?XsP0TI(8z6I zFfADjOQ)$bTqId))+D)be)a|x*Jb;99#@%2iJ81eq62m486&sJB->8{g)~yfy5rVH z`E0-Sa&n6j?wD*VSHBK%y_Va)b$oxfnWv`}7+ofQtz=Ct8Dh^0)EECmZ0r3j0% z**_?hO2U9pou(4UHEF!M_lnjC!*pwp$=p-2U8n!?gnLf#)qB)w)*CL#(4DwBEQ|20 z&uW!Y-`wq6G=*6$H8ao(H|DTI3hr_rfYWQU(Lm*=H+yHT(ax(KYBr%}R^2q@f*G|~ z&Rf+CYT2FGLwStIfUMbh>8~$J!fL{u2W}}X%m+v!*JzcSF>2xt&o7OHxo7WRT6jtI z5iV&Na6d0bav?yhAD37vjo8I2`za?YGvj5!8G|0QvA6D}6Y~g{xy3ega;FKt-xU;o z?QiQ+Ull;DfD@$*zlI=&d=msQfUR~(V#Zjv%eEZ?1BwyNs7;0O5FOQ?1a{xe*z_7U z2Tfd+cot(b6zYB1cccu?w#54__Z$)(axq)L@iU<(!625Onp!sKN`fG4tXcIqan`uV zBpfbcD^WYuTkTY{G8`_3kHc_$>UH=1qBxI*-0@F8zqNR;=v`|Z zT4S$ocT~e}-PHq~^9^i&m{Bxx%hjT1I}~j6|B`>dj5crJUh>omrNVq7dGbKSljlX5 z&<9BSRv3JyNBJabUSL%$ak9?&z&ti0T*1R)^~DP~2>DHB?R=&|l(>38Kl>nfl9B7p zu`@%SL_s9mDjn!G=m{h7$0d)L{TK5P!gQ6_DR&~8`*Ts*{=N!d6oxqxcB$h`bVPnRo zr|i5O5O6pGJR$$&IjLPJ>;p&7YoRGlyjJ~+D@dJgi|w_HmsXuC;myyNmW}$`+QKlJ zofTFQMdIO(B&1EU8=y~gg9WS{yiphqB=WegfFz;SqXGI;kod;<{M}~RtYf(4z%Ke9M*yFzX#3GWN`OD>rd(VlfeA}-5V3@%3Hn?+tN5wVD zKTe%M#y=KactO;o>I$_STe#e~QxwxPHmUIYr<**M2E zfF-vFVnV~nwF<9wHm-S&0A9S6D1Cqo)veC~EP29VVZ;rt*)G6zGMCy*E6yCY4a7|Q?Ij*etX+|Dy(7^ zU&0>tMicYi?S(eii6`>7ROEWFenXz%F><6)d(4dStR;ybPkN6jR`zY?b5x)}J+3US z$qnvfnl3x^V`o|HA2e=fRo^`|&n_`CN(Yf=}U&2*y)=Rpia#FP|RsxV#E*AS{cToI6S zqh@7DO8tiB9wLm$>jC7hU*ZH~*+FJLKGeh$kO!Rlp^md23!xg0NL7lUg_P04YkP|(v9Af`sf5!6gXc&sLVprs zz3aW_!ag5&m)2a>z1w#px4-y-CG($~dxeBupRI_ZfIMK_DuDm*L;#!l&hTO4d4wo7 z3BL&R7Wa|rsK+0Sw_!ZZ|e~Ob^Dz+i-#r=QCoh)C9YtOmA2D%lp zj1O3UASexxq*EOLrBaazJvr~$P&Ov4i;S{r6$Z4ir??EVlsH|lj-1TC0c!lQTP%jR zHNo3dGoa!Ajr*Q(Qoa1XBm3mDYm6&zoOnLUuLA3u%Zheg?i>#hAMLyo{A__x zc(zz)(@SQwc60fkj^gr&Lnh%K+SWQZ%EO~?zR-p);7Nq2kR&WC69hfdLmg$}AyM`b zQa+5}YJFP2`v%U<=7f0re7p$5+n1ej5f*s$`hUeC5g&x=O45gp&sO+iy_4yu2ls~X zKK2mg&9pjtcQ<-@SEGoEyQe{9T%<0pYn5&7flB5u4a#5cTn^BSRHvd`RKrZEYDvqqVzoJ)%#?<6)#2Kr3aMLh26^@m43?MO zW4$-KFFMdS7Na9gqrm48yLNeWr5RZehyCb&NQ-*vuG~*tVJ#vv?_@jucI6~!nf`IR z&9NJO_$Uxb}_D8+81OU$#>maNkah%SmV9Pe`~`8G;O&Dw0A9khxxKHL{~ z&MoevTj)>K2&kBXjcUJE7#QPh+h03QI36&~D`r`3bWI-bEOZnc!-XYH8Xf&)LN73t zyz*l*IgZnft__d0*43>ziCFc^&)%9_1H~XP&8QZUR4ZYZ7*b$K=RmjOf`(@!SDXR9 z?PfwhJAKP^ghAjHxQNDwIh^jm8M=>LY1CZboj+65@ItKJ!)wOv;{DUWgnxblO&IC( z!QWGKSru`6r+ZE|oqh!#L~?EJ{n*GXK>C`4T?(w+3cBF4nU?&TU+oY?X@eMq_w9o| z9xIVqH=KN%AhyGRHL*EFz2vR8JMU+@YN`_4w$*o8&bTkH%THDc7?)IMLJ56=_au+& z@X-(x`gI$vp4Mo3&9GR|^BK+St>4eC*yuxYjG=MneUc&6o?*V@i3dbc89_g}NY8AY zo?Tu}AgertYMZ%}8L@botMQ17%yq*j`KRCwItw$!xCv7MEJk|mD+T#aA6X8e-f$ZF74!Oa-z8z< zKJc$ldVr>_9X^SXP@t;e%&d)tZm&?Ky!$ERIN-b3!-9Dq2-SV-ecL#ahp|va*3er$ z(~J(<3u+Ehz3M9`xHPUeLp?SyLlM+$y9;!=P(B~`ly-6V@W(K05;q2r%r{yh;Dsgee|Y3Yeir8GBi*}w>DK;^g3-(ZNsiRD!#f_iTw{lE8NzwfEO!*TIz&<0ZQZWm!K)g|$Roe(Y7 z1eQv;5nn5=vdb?Rn4STTwYDTx_Av9kJNMo6SzeoJRx}B%TrS|IIe>733q7C;N}s1- z4{e62=AAF-5-RMv95_Mi4e_^qpjlmxW7?(B^N4@BUDpU0W3q+}(ErGBt&a9+jEY3f zGn5W-XDG#}Pp_wTGwoPQI1~LI^TgOeWnv(t2)D7b=>3DGXA&EU?E_rJ0}q0yzujKW zA9CDpP-ro9&p^h!@D=+?--2yD4(FsQ^K4wr^jzOdecdWd63wHTf`tqCrw8dAljGJL zoNGh7Uw;f~Ue0&MbL{Hr_OL0!gY&V4o0Eoaq?qQ4nCqr)JIU|!YP%@P1BellVi5Kq z1=l@5AB<;OUp+ghfF(u+>vvQf>zgS}cb#GU?t5Xp_yhm=DK*9OriZG&6s{2Zjoiz4ZaMi`HK5pPc2^uy;*Uy%;o@ffH|MF zXVk5uE>}bOCvd1?iQA2-cID8_RdWS7!S?Gb=sX9Kv9NRMk2V&^fN2VDgXWJkzEVTG_K zh;m?!5%sc7uX~?IkdAqW?HR(m;zHv*WiwVqvTI2hQLpcwV`H4&s5@ypl$cHR!7M%K_pU|MY2Y3oca9sO0!^dpC`YSoU{M*$)Y{_rTYZSOO5{&2>2sQNR0 zvv8%X_~wTfce9McR?8}7rbSdUFUNZY+@oDNx-NHeW_Ln!-o-oN4>%+Oy$V63I>+&o zK81}ye0lsrK}Xq@Zri~!cZKO#1_m1)KMetS5z6U4Dz}Da@3En1(N+Ph7@r_x#3QgO zv7c7Olera!*XfQ*@6{vNsM*i<-ncnoM|*%-*OQgJN0o9KgkSu`IW4+(0iSNT#?yQA zeM{VN`z=7dyq1Sk{Zv{hyw;uf+EWk(z0q?*bP}(gCy;hSp84|o+0u16Qrf^4_en05 z^y22~q-C5EJH;H+vjy^)RIB?4HoAcz=bp;C%wUHNQZ;xiT|bB-HO(z;ILs4zRwbQ zj)?@$$*@~xpf+t|CZVW)*C$T#4oy1gGRI{D^~91Juv6p;_h#7a@c}oA3UP5!qUKTl zl;=TVez&jpoMk|s8yojqkI^iH#_nT+#S;f+->yOHUujeNicoa;>kVZdbOYn5952Do zPCgz#lV=qx$WC#wd&S2uJ2oe~%M6lXiGjgA9y+fMG3Vi|;<$=g0*MX_GB0`=sWj`P zPgAxQXN`1;8~?K5<1`5?PC0|U+BmPzU%M5&)#-C2*H;Q0%f9w~jcUTj(7kpi%s5{v zi+t*13)0&RCfL>IH5D>X&AI}n<;qS4$Y*pPvn!c7g~%N7X zF185C)old7!Rs2!1oUwB=_cV-0Xc+OEDr-o3(JW}`96rWI zz%y0KkCx%Gz_ft>nQh)5U%s3A%Bqg?D4TS`Vn1gqxCG!+yR?Phsj|>1dGo z`nsWaIS1Y3@O8mwUudHRu*W4B_B@5|g z*ifDR!nD4Y7qPG*865g~N6@IzpCL`94@e2Y{>_)`^5f6bVbV(a0eDbhxO}>nU4Cbj z?QqMGExxU=G=_yKx0}0qI4h(}gfqFTX!ll#`DMwDGh{-yHl3-&wQn^5ptzq4 zLnYDM74DII)~uBlFs5qG*@WCTDt+uDGOW_@rrbNOOMn#c=}bmBoLUWVS*nP%s`ON( zeyzdS#ybD_Li@Pmm%Dt?Vf>v)JNJBV6+OB>9QH*omD{!j^@Y!6P^C`kgRqtAfE&Np z@|h6m;kidHSL`%x%S>d*`EJ>g(UMlc7vom?yx>a0^M#=f#q(Fv#-Rq6X3=eU>!DbR zcJmQ>(=rz1F5!W-<%L1;{DW|}ahZz6)Tbr>l>DpzoG)QRd&fb4N;v4vNO5k(DETlj zX?ep%E0$mOOWeWes&@)ySZUf8bw(K(Uhn!+KdtbCe|&VNc!HRm)|;H$6!{a>Sy1_< zlygX`8_zVn{^un@l#GFYc4Gb{byuN;p=VvylY!j`23h(Gn`H!@I=3W=(h!zPVU>d6z2sVD*AtbO2bc>bAC8Qe2 z+(wUPD6TDqI~N$Ngjb0cRzO~ihK-5^XC z!WZDj=uuyIU{@$!{it)x?vTr50f+(;%pt0Rop^R}_0&5jbX65HLIZR)OU73C2Roc= z-1j`qyVv*Su|pZ<`Fslh4hPoi;$-QuI)%o{#G^}fhJ}Gcbg3;3rLGI#x9=|^yN>f( zwRyXr&YaH(G=8mk-lo9sO>VK;7K8+*sVLQ0>y$^g7K#ulp;fJ05T3?(kHU za(;%G>{@cnQfKN4rn-1&q(xlmmj{C?`ifqMyi(usb9>q|E0}^= zFt)vE*BCc3v-`y6`)5PGO8Hcz7qxt^F0(v-eMXep@?rAu>V6S94F}sb**Xvb46qn` zd{(>PKALS0vF)i|g{q?+st)?NObkzw{1-ossTwEJ8)P9TA!H4Mh+J)+j=7s}$n&&3 z{T%C#e@0wmx5;}5RP`!bm^o)OnBub=#Q;i#XT<~z3TqB@zhBj`y1q zhuq4M**PZ`VSvQ*cR;8l&QB2X78l^c(FdzQ8R#!A02Gb6{N$M70E_`(=h~}QG@e*q zbE@AulK2jE=>wr$GUfBZZ3w6R2~On!g}GZ1cePOSW`^$cXKfx6cIxx{bW?-lgc|Qb zFE_o3eM!2urUYztAJ+U#zb)MW{&lHTQ{&~u*M$i-ncMdhpvR3ZqjlHbF4>E^12*F^ z^0W0Y(D2)9^*B7ayzt#VE};d#sf`;0FSebuy2R`xzT?eLdYoI}r{~+}segwpr@-=i zHt#lr?(um(u5sQ*&XL!GE(h&&rm#EYGZqeRsUcP2d9WHu`RETzNp`+<{|J z+xJ4u;kD>2Trld03sIMzuMtENLp$QMJ-$ff6ojj^GuvcY4ULPNe7o*|{ ztbaHY!u)L3X8%VizI7X8kYf-}hgwa~dXfr&UjyYH$x=x_c)%m;NMYo@+d4xf_Tr`y z9i-AA5>C*4bL}O~aWTu?@s2T;|72{2-xz%EP~*nmhrJVSN%OSRidB)LV0ilD_cV+0 zE~gb_YEgILbGWR#-RGJ33pRS4R#|cy+Iy#xN76z2PMlcXfik&m!?4BvL<9=eEm^!e za4EhkYe(g(xMD_mnvEj4yjTC^Wzkz>R@0Y~yzJ6gDCV6hQm_9rBrf{P-uKhke-V5@ z7Za!L*}qca6WmFpGqe@W*6J)Yc3tPOt32qv^BeXX{l#pPaMfd!VVf2=Xwt`?^JEUn zJ&^paEDFOT?7qqGc_-`@e^s&W)osKct3guLO^1GV?}j;JNZLeaaMuQiQf zDsHKNay8E3FxhpgcLo&-CHcT1|5S=XT)5kJO*1kk!yVQvH0@SO7elFTVUIm^x3h-2 zApHrON=3V%@H70^-M6{R&|eIt46a;=_(`3q0LC6?kQ7a&$539s<^mS;G`Z&Q?IU0RVc>GF6xT6*`r!fZ&#jr6-6 zaJ@1CUr5-S4cAKKtvVhyKVv-Y{OE1paarb;{3NS}V%rQvT5uLJ;F9;36`(<51*)o# zkAe=MGJe=>>YAyDEE|Q9_wEGEpbod@JQ4jq;2$9!f8n{YH{KyquyoBWMI$UYpGflM zz3v-MN?Mq!V+o=Qx*F*9Do1LSf`1H7^z*%e(JGUt1IYox|CvOO<9HB#ZhwO5(jKlQb ze*kZk@Yo@Z?M-1*$WIUeDt*1m%go~P=U-EqThzu5i}1Tw%xS|r{fIbhTdDMRz@6)o zU10$@njOZGh?lr+Wfs(|`RZ)Z{77xdWP7svf}Ql*oPynnd}A)lCOih3Qz)b=oT#>; z*paGEF$o{n01XNK7UNGyV)-d{L^1V3C|Ku_&W$}Va|7f2TSy~OoszMJ#@r-44n+2yTw%{ zU^Am)TdFWwvO};fK}30j_{}$`b?P^`e{5((LuLKPqqtUxkYG9Gox#&d8$X*^P^`t zh=bUh|1=mkc3IM4E#Z9KmagpP&b;&LsM{bDOTf@`3H-bFp{&QN`Z=m+Uzro~podJ< zo!>G*-mke?w*%GsQ)3k|3P^SIeXJ+?Iq_!jQ<_)27*6}1umS$;iqV;OtW0mh9JZUz zdHiaeh}ehjMKVRWU??~)pwCP3e3a(rf#enGsk3GCxdt8Ak{YNDA!!O}N6U-F%QIt-&WJ~InoM`4aAUZ8GD&`JAaS>bl>%JC?m zwcoTlCQW}Tc0w#ORD#Adpa+JWB0M+>m2wGD8$osaf%sHP%RE+x1(l{W$M#5LpB4Gd zXJb`Qf$uP7RgC=LH_ojuLVKJ8TjIC2)bhZ#ZsLwB+4W9rW5E!}>W;QABZnXTAa{a8 z)QH{9Zc%KN{6=Q%n0m?vlFQy{%`RU_OUzsp8D{pd4NyQ~Ac$|Bt$pYmCvubT`%T5a z^<}zGtUL~rSq*1D?)8S9*rg2JTJzInfX?61NgEEU$=*MC#V&^KXXi--8+vlRN-N$W zg5)#?m8!4qWolaQ%~2|fH#0Kt%d`QnF}Ox|Lzm};0UWN*)4thvh3cHKCgF(uYiRuk z+>|*8o_-L(>Ok(bPYslv0aN|ak{lkkYPo11^V^1#D^x}jBta<%vNZRfVva?_nt7Af z%wMi!KP@M}NT?Q4eqtaV)T9#mDYHuQ0Ixc&_0DXQ z<%WOwO1Hf|9vM_3I)0y^`_}NCN0h-zeW{-F&Q$Qb+o^vZJdpLmG~y|kvhoE=XkX#R z!U@kN)u@csq4o9I#=dj7zI)L=60#=0pfof?L`L|Ib3gM4e#)8YvrZnY`jKoq6?uu_C5;HdiKn9UDUr8H6UxDvp((DR9+M*Lad zmOtBzCiRvTQ9{ukb(IY9XP_~&(pd+@D_q`>(h%_Nj`wfV*x%r`*~VtM06_qn4bppZ z`6*0`^LPafy&8G&Nes=d>AjXaUlxvy+a+^#<1rvSOlCqCL9K4^rJUbDFvG!trTLNP z1#O5T0Jxga)$gAK2NBUU?E_}EqgIHRw+LaNen$ZKVCNxk_lXxj`ZqBPv9t`4sreFh z(yKFHW4)8I*-!mABkJ^#LiFp*E!(CNt-%DQw}Y#jPr0V?m`aC-zalEw#LUg-r!p1B z6@^%i8-i5~EDReD3~Q1#+_ZgB;@4rudbKT4CB9pcbyVV26=uU!DY{nJ{f9LX+HKva z*`kVj9_qHzNoQ|$PoS=VcKJ;f7h=^TG=3mT^RR^u6~G@G2>wu|^{ea3d>^2$_KwVF zYIo@_o=1UHSG#t!Kt!lH*cJdriiHXZlYvd64bv#Y@n=rOl9GEj>*6@@=+d%X3pVmz=Wd&e+Gn1p*k4WFcu((386u~j-7uyvjtY}m))T#{7kBft=-8m#6!azh z=#LIkfK>zU#w@`m>GGyfs1-_`0}ank4u1JCy@KMuR177KC`R-ouEKl5RpbXPDnPR& zsoZ96W$WlMp*4pEmP#*f?m~hA@Bl~jEm@I1=Up7hU%o-2*Scg&CYw)T6teqV?U4wT zb@x6f;j2)Sj$eSZ>B>GSuj>=oF5MMRBagl^A&+&Zm7)^Aqsy!Pw!Jbeu8~iUNL@M0 z%I$tOKyJ-^6fdf&=D7#X-byLKlTteSUN1`H7l?~sMYsCT4KFLJ z9WBK7(nHu*0)@H9qz;Sg9{QjMu9czBBnQ-DSKV}YG3aK6U*r%>Kl$YCtTN#toH}4R z&2YTv$G4NGZwq+amDm_o$dyG`74>mkt*KJ6-3U54=+4#riFzD6qX67eeJqreqwpSs zFe0dBA#Ln`t`jt1nkUo4D%GJ81xKn$IMV2SdjOKReZ7?AK1;|NMF-{6USqqYhTt?{ zL}b0H)#<2X=+b)*yl!`rm}M)DwJV(|emy8>^nC4}yF-(nsCzwQq4@`Eq)1eI!u76@ z@{jxQgAr;beYAtRe)Tc zyJL=!GN@J*n$zo%oy#{s5C#_sVO%z$mr|N@Ey-LpgcS>?MWO(Rk)pL(B+oWI7Y!4l zkfnW-rDV=Im57}+>k~(qM?fMhMqYzQYYv*`N^<{x((+m8&fO}=h=4nebF&_;7T_1uqC-|_vFTeBe| zAU=`2ms$Qo@KcX`E6+K%^3Pq<0}g)fMU}76?^+`Ri&fs%zC$YYK>zD_<{YfT5UjLW z6&G$YqUTwz5#D29Hk*E{P#sia6`E6l?`2e)iCQVBv{FX$C^jYVkk~n~q(152$Lp|3 zxYh+Rp4<0%Ih7pcw_%KYyI=W_wZq5;H!d9^)pmakh@5#sbxkGT7{$5LDZ+y5yRr2@ zM8`M#q)yIJIuP>S@d|drWRtWBYubI+G(je!VhK7ErkHgQYMdG8lydWr)gqFiH1_>M z7ci)ytewkla-TTYV5C0LGWe-A@bPeDi38(7S(i3z4LA2UzuW(6XYnQ6zNo_fFHl>%i3m;IVkIo6PNatTC6 z_W@3mIn!EE$3OHY^gex83?o~B#opPS_$3b}Mno`^-sIjK>=?N1!xiGK&r1_K+wJ)T z8(p2#5Os#AYigUpUZuc{FbP21b%;TBJ}1Fzr1seySP zMagl~41G4Yz=aIih9HGTY3$p?FC_wh_#g|Da3}}t5r4A&L*`IN`9V{0VHOGT+1qwG zueK?;mjK?nX_l-g=yWxI&sSyej8SZ8{Wn+ch(>9iV^s`cL&s^A1fXPZK@lzVlm5@+ z5k@C{BYJEEj)&|gB+7+=Fd4(RR&*~nKs+8rFIZ`HQQlRLKSi1ROq&mcIQUHgxAsat z0iR>Z{F&kSvgH?(xoGLJahXrgONA}Je6s!0&^^#_#;%y0>%-f}(k9{GK(eK*O(87c zy|B#^Op(L~1I*Yr-#+L65DZi5c3fcaq>zi$C%#qbFR`8*Dt3h*E*W{(hq_xOx?621 z5}S6mm#Ng<$~bEiv-213WJGJ0NPw;=-a(86-JKQB2aTC3Rh(}!Rq~gc>OKce8h*-i z%P^e7r%j`TN#xiKM7stO9scUqKWN~CbIFjPbd)9+1V8>>Ucdvr7Q z+LaFBzhX6n--fq>6#28kPj_X0c)*Chi~VN)`TNu{Cq?j^nYUr!38r#f^^?mf+1>ws zAL#%3iViJ#kTTd@M1QY;h{hgfYXo`m!VQ%$-Q#7TUN_Gpbr5?d@Fz_9*w#*3y}2($Vd|ET z{^a$pn{((~LEpKGM~|&De^$d+xB+WD+V$(OQu$4ktL&+uleq^!D`gQV z7rSiGZrR~?_J{MUA(!5=<5u^d=ey(@SE8g;NyiFdB2={CN$&>pg*8;NJyc+!xu-h*+;kncGAd6*|`0iZJDLxhuJ>!7{ z`^>AXiV~N1#;J<%p$>868)%N$lW2YT;t}8GBnV%SrI1wgM`*r3$F1=$&cfI<{$eQ} zRJHnC-=h|LgfDb6j+EBy*y?nCcb~>jazV6vp{VvUz%T^+52V}eac6f>ZI?0@6oVV3~cvcg`c zW8Yi%L!c)5*Wk*8>XfXa-c@W`S$secuiQ_>i_B@ONd;S->=-Eng>yMxW^`|M+1`_` z@1B4E0m3_;gdfj^lue*bL_gnBXTmjcg9CL=X-2o>-3z2k5fO&Ab`CpOuz16V*1@Q< zv_!}mH&&(7-aH?6djnUFxQ`Z$PC}Ox)zCFzO+*Oc?4ZF-+s2pf|?hW ziYUzs8`mpqqY8M@((ct%KOEC`KvSbRH!7(8Xj}=d#J(N&m`OHDj97kA|2{N<<& zQsLWCl+s~975a?DJMmjyZbIgxodcZ8YB1+|_2T#de4lJ70v&W&M{8v^ZcX4GfDk&N zUu!$U?Z@ptqR=sQB-;Fi;#M>8{ZH@$2=Ju#E8X%OCrrY3|M6*kPU1_F)@Nv2ek%>I ztcrUoV6tP^6G$fbRwTa#$f0*lj~KbGCjo{py>=2&4PFQHOB4#8(!(xtyFkhZAD1Ik z2VhbpK#sTBbFk6X?g%_cV--yC&12|nqTYK1vmMo_@b8{c`P~k2;Y9X}cWb|LzO^R< z)#KxT#qgDeScb~06LYMDA2RuNNu(?Y*==_(bBC>6o zzhgJ-dSBV0B}teyLi8f}e)#o|kMhvujy#ZHHhhRer3E?0$bZ#fzoE8I?TaP(uh<6Z zL>x<;5f}Oy46YRW^`g5$jwrdbZ-t!>)%Z)K2~8xdg#g={X*?1R)0_Zi9-p}k5gB8! zH7@fO_9E-mMOhHNEB=1f`u56kbD%gzz@i)F{#{H8ze5?!9=lYa#~6hvLCyyZTgxiJ zi$ghuva0w(e6F6srE`w9k^VnVy$UNgClyl+LV9AMyj;#+Io`7C3hMVUK<0Jcg3+Ep z+-;4!yZ}@1{Q5u?7Lch3v}Dq-Lki@Vjei&+h2P>t>%ql+eTPX=gQTDb+ZuRx3qESM zKdja@O^M{kI=-_Gq)z4fe}4K^WX4^hOMLAo zxIZD&-3c*(BSjXdh5B-IvD?}L7!8gsv}ZV#9*gF>2j)2_w~1@lTi$%WXc%`)v4 z;G%J(%dMg$bDDy08`AMp?X=R*Rpj99o+|oOJ4+?*HldKSIdI6Xx45wj@26uN9VTb} z0EYj?Yqa*Vo&J~rYXCRy?q6Kup23rcpZeV(bWv|bdolIRR|pHJU}OwVxOoxFh!KrJ zmJW`Wrp3?Lu1Pdb;;@nDOz4>FC;bHwQT*2{Lq*WITyjyb z9yy!8J9)yZ+L9C00|a1hEJ{`w8IGj*sf?9q+GGFQTx?WW6=mq8b5o*~}B zA`2_k=!QR6^lfCbmcTi8uVjqQ+y2SrAMVbRkG*e-;=Hn-i`f;qdT2;BiQ9?I4yrfS zUTusjuhoYkXunSk5AX_HUZ)rJ3KG!fdw+wl!$&=jtY}9_>wH2O7kbiwwpM$=nzE;( zMj6-sJv#`u-z%L9+?zc&yY zaI5LYaaNpM+UdUQlLhVravWaYoGc4NtSVMxhfuh8^$QLE^Xss-tcvT8hexhvlD$A# z{2Mj`lGLxs@LIJcTRsENib_Dn^>SnJAHLOIRl|S4^1|%Pt-uRG5E_Q|xV`9TM!od- zWhfCs!HzQ7X0R_PRGch;=e0`N#g0%3LqO z`?po4S1zewn+;U<*4;H88j1Qdionb6>G2Jl26l8r0(Jsut*!Y($WF-DyAPazfYx$&+qrX|Lp79`&n|Vt<&l^D;58W6!@V)j^J`Pfy>Y?zYqCiuLP&L zu2LxhI35{`4>S;h+p9|K<7HDiZeUjPg+DGJEYgBD?!(5DD>o;KIn|1dOK^m&^0_lE z!=#l_JG<4^Jh9?2w9hq#8)M(Z%0fD;v?9>4gQ*p=BJUqX6?`v@RbO3ckG^jIoyP$~ zoZ-&yg1Vs)y4k{kh3> zMMr!}KAAdcaJqE!vJ`O!1kHHIIe8w%A6e`mk5k^2&cp_o;uUctM@q*_oK`*330;|` zgw@KJmIJ4kdjziloe(KvaY`9(++D#x)s4%OPNu^YppV9OV36VzY%R=)sFHjHy<3 zGSOeu{a3Jmyd$QM^VqFheSO&M$?v;-=c9mn=;7`1XHeeEH1D5mWF&~L+=*eh-o+EU zkgyl#qSc|3ac#K`G~frmJ1#qhnL2OBNF#7AfGTfE8?Frl)bv@JN`k%$xlvP~l436R zpf!ONj|eVyRLL7A-OI_V$9%Ob>AW#QnxZ!C2`JsZ_W_T+6`%3fH$e(g zBuee++V&FDZqjp?9$FsTe*3aS<55E1hjWpOn` z7m&t1dHbJN7^v(!v;3Yl^;rR<{GSg(+Op-?CH64*I{uYP7pmb)Fq!!B@%O7>3_pT0 zR9Ow$QWE6^n#R93l8zDlcd}or`m#{w)NrPH;(r zmI%Cmo-z7)Qps5O!cq5~h$>`lI_x52NhV>p0naK4#%kRs=xQbI=I9ZmMrMo)P;K8t z&E_g_midceP8W`ncUR6d(wnyD3dP)UKUY0bO!3AwydS`Vr^4<Cy$I`dgH#zg}pXn%4kc9iUa_qV3h4ZrY?tP`T6qd@&=Y&g9S_8E3fqr zp(eGA7mOPYy-^6G=r33EQWdNHy+_jHj2jHB*9u1OM@Yd~yWKAu$WQUrqf47;ME{k# z^_C%~7n+ml|5Vor!)6)txjm1EYcYm~>Bz2Pc>QUDaHivR)E+$w5E3g-?55WTekB}$ zid0!=RwE}luM6za$x6Badxcn_5^5{$RbLt4tk{MTnod@vTAQtv;BHrfC2iV~q2tl^ ze1^8u={Z{zH)r0`CiW^XSAbBn?I8DI56iBBuOT1lQ>(|Ox}19#-44+f8la~vI?a~c zyK!`@nBeN&9tj@S9rHIZbVgq;bkEQ=oI?rJxL-&`efZfnl{X!m_kxuBb4qvh1?|o^ zJt_6od*8lx>t*aIysJq|(^?H9(L@(+l zo{JMkQ0(i~Kk|Mr;Oj7k)_250#eTnYbxeLKW^55%*j&P4S;(_P@k#8GbexIjd10ZnSLBG%`-8LI^Qg(2{U7exgSD|e2yr1|$cU~8 z&Qd?a@P|Lxr%pp*Pb2xc#5uksW%7m>bBwGL>STiKybs84cT`ClCJ~1+@x5+gy*=pq z;>K=$GvhKsg@cd6?Qs>-6wArh4>3Z(Cf+_J{y;|kd&`=`gdQ4MBaPL+UFj* zBgR@}yN?2*YHRPY6zBmoD7t!r#urcDZ5pdXzY14-u4^CfZ;v0mp3kYh6;b281REiM zRet9QULsW}vc3B13l<=c7rYT8C)}120Oi=@*1#%aWe`VchSm>jnAZVo_ zEc%bX=?@@Z)^+gbSijv+ZTQJ9ckp_BZ(wjI=SxR66c2rBsJjPi)~$3Z@BhMzWNSKA zER$t$p-U_04IDVVe%7BL6yo}uaupS2W=$@3w-E5fI4rfM3?CN4ceq7riqpBIi<&i~ zftw?>9}RV<#;m&93@Ih_*yhz?C}OT=mbmaJm!2JJyn}q6vzMi9RvwiNj)+lj4hf^( zCweFq7fHDapQtR`Na8~IcY2-{Iy&s^Ga6ztsWZxHmY@30u(hu+fS(jaS$@~5{5`V( zMeMTeZ3jgRZ_-jc{vpK9ly=J65~~TYH}x|3=*FowX6wINkN+#pHV4&(D6o5_feQOS z>bmoCQU()WiZ~hmbiVL43&rg&Z!ZPAocW;5O6rF;Dkk_J5ML&@lMw4lN7y4ee`@J# z(=Cj?dB)s(|Gjlf6;H>u2M5M`5yOUAv#*Hqdz4c0-cL|x^?-X{4;TPrY8>GWouX%s z)K1e(dzW(%oBXehu22jfonR;pQTVa8EpCI!z6Q#Um5>vX8z_93j=Qy zfiAsVVpSf+uwYJIG?|%y9kTmyi$&8%mUao=3ddp2iK3m}SNI6&m6A-`Mpj~l;@p}} z>NEQgGs`R!Y2SLc-zt+^;H+?dQ&q=EJ!!z}(b`lU*+A{O-#KDwG|flxv3|c`$W;H7 z>v)~#lX7WZ*_F?pJFi#=3<9ch~bq5XGeUDAwZIBc<1bS(_6dV+Ot^McI$XdQP9iR`{%z zJ){R8(P?M>%O#GfO8^(|1<#)ekk=ET!1Isf?H%osjQPqU?+oJtWGma&W+%CJH5L)k zQ}xr04k&deLPr$3LfX;xkl^*4p9VLh@l2n$@E-?$+KlT0hFIVNvyI*EPfivM?isP< zbI*9@=IHmtqk2tIfc?I?6G9h-6;yWMo$X}VwP z<~`ieEVM%7b&zlRJgL*mGYu}(MY01J5m}Aj4Fc^vjTqUsx+v)eQR9N|UW;SNxS1hM zUFfK_y)taLP-=+OcbpoM*?hRg)>}_?y4v@49+NVCm#0}~I-W{(eeHGZ?$t0J4nzG- zTRYcFQlj709BjTw5$5moSu*jV zlp5V_Z$0*@Mben3ZQs3p(?KEuP9W z6SeZq{`LMP^44O)Autj ztPBqhIMKVF!a$%q!oh7~5YX9`^qqp#2&NBIpmaEdregy(B1Q;a5VBxH>IZSxNei@7 z?=M(ux7HER%H&yIlgszryjcm!3_!_}R_6>YnEO z1(ENV(&av!!0>FL;Jow&rI~&e>PlM2oDdjL6EX{(h?TtJi)nv_S#G%m5 zZ$kg3umSLDADb_KMC_PT^?z^>$ipRLNl_(2ah}alhZ9Yem!nQ-Tnv}6eu0oVJYaW< zx%vq>9GJ>DgyS!AB1dGKl3h;L^%POWh^OI{+=Ee)X7s!dNE3COVG5O3yq4kSnL3B$ z#4=@a$&!L%b5ATzn#u1vQmZW#srX&TEMjaGk~%CpU$7`r^p)iB%bF7({CtL(6pR`y zFOOBetNa8ME;;B$BC6@y^q1NPeSBbmUmdbktozT03vhJfg%94S=Pctn9Z+mdvSzRUVF=@73rf~aWFHcpT&1NGMM!S&J1Q=6O% zI5*NhT>C=%44wRsA@dS9Ql?a#5S$rPb<8z|tVWB@;|)xb{5zql+w>)?x1`jrg5ouE zJJAphD`itfU$jF!45m`paYueSI^k|x?>@W_rjx(M>444jOROA3=syR>XJ}LC{x#nm z=8qEbO7qNXPe#DgzsGC(E4jr;O-m*^;m`aIJvj2T>vYdb!Spra@n-ESes`XHq@}S$K0-x6vuaP_N;VB4ApqbQ z(&Tz(l8dU*im&tBefL|!@4X}Z5+f5?*C(xgqT|R4%tEIrqHg@)@C5TT;022s6qdes zPpQb6BQAv?Vd#ak--XaX-3DLWsGwj-ZrK`svdI2SML>A}=mU+NDQ}&(G(GKx60>)9 ztOXNuNkT^XWfV5cl&6dRe!Y+qF(=F6ENx`4t(O8yeQ>?P$*i)toV7FQ$SqE%#h7l# zyt?mw-KZ9g-hu0@QIzlumQYOLfTBI2p!2_{1a+a-1~Nch9l? z9Bw@_IESOqz@vdTy%n+GiplEv&#;6#5nmspTD=>D>mDwZZ5sch*p5u0Ad#Jq6Yk3( zr^52{&kPgEd(gG3dwKL0@1V6ly8ar!68{QXm%OqgpqG3A_L*FN5Hp8>#+%8=mj}!D zi_&nSch3qdCaHyAGbf!d-8~zsWrI>)huoB1k+f9Hj{O zYGoo!4HMyF+p1I#sf09kR;B2${snK%yb0NntZ&X#~F^Ih6jXeAvnfTdD`TM~DjBPKstKuLyK~n5{g08kzrq`k%8*)le`kF| zcAGKOnrAu6zO-8-X6D|4gE!tv=KWYRuz8=3M-e|_nZalLEf$tr>;9?y%H z^BGg@g+8*!^Fph|Wp&(Oy-!)2=TBJ3jhRJ>cYA(+;t~oPN}Y9{JxJJJl10$+yA9*=HxkZS!wWKiD%I_DVxCu!7!v?&a}I-~{z&$0-6CyO@(fQHt=7 z84L%5pNZ=a|M<8#nCfZ7NzdG&5q{j2nX=w1A>c_4o@KWvDLbyP&5%lbKTG9xvfTFs zStwxmIVDKu#50qDDXb&?SjZ*8g{-Zr8YW`9r%|`{lzQ>1AE3t0zI1{i_VAmEo!@pmgUS0XI zXO_Wt!Is@0-%h0ghw+RFR*{T?UV6iG5v_FO3t`fyqC4=deandGnkksG!F_R#h* zP;tmqo5!|RUjJF|oJSuEuZU!S!o8|)(sFv?@Deh|A>|$;`dxQl%@}mgLI=J34+-Ae%I+vImOVtU}`lzIzas z<;Ok8Z`RNHvX#?6-hUY}Vcj|4xZ=+1F8`QAbeh%?p%iMd?|iTN4f=y#pOqPYMA1r; zxJ`w1i@@czZ)9!5?a&;wXB7-)v970T*Go4TzeWyfG0O>)WHLj_C2|~Kpi*KPB;Q0V z#+Jv&EyX%boOWv%_V`#+A@PhQZBt3VWs3srI5J4+3O~1>Nl|v7qce~?5j`X47U$=5 zAxp>a`nvwh1(Om z#ndT>@13epTMmLkWX>{mp!DZJhd@s|uiBmm&!RRtsmTkolXhek!2sc5V z{ja9|TZpd`cqG})$vD<6o6ho8GKbzDiZ@NnXh`cW(R55)Q+~7+Ick^`QgSCQk2JHd zZtziRNa+nx!w8CrMuW_)`wLv^Oh<+lq#$;uaT8kM)g14fq9$o>$5*}!EvqK2QdfJL zRvn_zaiJnWL2qSB zr#boCzhDQ3E2JOrEY2S+=B!2b{OjE-93#w}4$tNxy3|qGr(kQ!_ot0G2-2b*{SEuq zp)T}A zBDOXp7W1|KsCH{um>bagelJo5l|Fa-C4N|5tMPdKB1n3gAk0A%dC2Cs#y@HrL{%Hd zE_JJ8@Y`2B;wz6`p71!_XFSF4GUpZ+{_M~YD7*hC>7}GBM3?_ZH+GQmDzV%NCa&5L zi+?QH{$FE}b*rMUVkt<&!p!<};O{I9Ul!uk(b%9VJGQ@S7%uzkgL+Y&8Je%`&-%u* zGF92UR^>g?TbA-BbMn66D{IcD5~&XD#RN&Kgi1t!!e5civq>Z3JJY>UOrwVi-A;js zEpnIte(})PwH@CEEP|?i>jryQcdFQm$ zPdt;RenHk?@%U|vm1$z8VT!#w7NWsNJhzk>M5pr1_`PjrcRrQRt=b;+Bqtt3k!+iC zi_K;N6pNXpIs{0!JxCsR3lqYf%61>W9b}W|)Lm9>WxTAg`9V~*&4-^&LU9hIq+A1> z<(*#is$UsQNvNdr)Y>tr{n-{u7Z_ySNKqQs=4Na!vUgHDEPgY|MB;GVw*3+2Zj|JQuc zch1WL12-pVZ7vJQVdyG^W=R*xd!baB`h%Z@D?svDU5byB2j%VcjG>dJkS8O}mQWw! zyTe=jl=KnqXaiRj6wQ9S@iH>^tkG;F+e#z2MDO47SMU+vn8Vv^PQQGW*>FMmdFTML z1a3XURQ`ldv4u+hVTLU`;jC^`0%tfmyc!z9dezF7KfN_Cy-Y_8h{ZE6+( z0EXmmj7PK3GiNP~ABCB?BW0O<+lBhn#~It(*wzaxeQD?B?s!W)W+^pF!t0@nSt8@v z#n2f(x;BTCir1dF*e`xZ20AzgDlr!~0c*NQYG)YCc2r?0(f_+@fuZbCUuj_{+HE{j(%k?!k11_4x0Z!qYUg zxnt95_>mw~zFu#=IQ#kg4Ml-?p3v9y7vJivE>p3zG-LZZnqCCkmh9raxnA#oQzaH7 zUc>;7M-X9s-gN7}D)zQAUs54M> z44nR5bm)+Yz0TdDvm}`I%B>&h-`{O%X#xe1zsD0XPG9T+Kzql0%kZ`|0~% z@u+YrYfJJz#Ow!dzMj(WW6}5OMinB$j_axUT6iZT4Xbia-hn;5Rv%H5c`ZSEgeOA$ z{_EdS076EM6|i4>3Ri*s_q-FL?GK=GEtoc<2N3VJFx!py@tAE=gI7X#({Iv&5!sHK zNxu@cVt@1q9lQ~VAHUzAxx39bMs)j_#kW_7M)1iGGln-D+D-nkX#t@qb*f$BC?l)l z?_Cu{?5_Yu5X8y9F;qZ~c?~UDh;fd^CMcMMfA?xS>E(7ZR*eGxJcN5~KYV9jZ@*I6 zkZsppaGl7!mevHW{`0rPXs8op_b~U?V&eCR>3@w0+jYJlp!tqK#bIQW0M8MQ);2l{;_+azy4Uqi5rraL|R39+Lk1}clU*flE z?Ei8hML~Vpu1f+5x<3s7Of?qgh_(OYx2A-z4_P|EeAV@o^}1OOSJ*$6LC`uN>N-)! zuG7O)DExncfQC}|xf&lp;a)%b^Rb~f0n?P5CI}lklIeKVCYrrDvSz^wC8r&e+U^}; zUl-{g{O7I&nj)}CONLK<`v2ln1IZGP%vjw2`8$VTY56~Xr{-GbIZQ?;{+IZI1{bmE zfi^7dpUztLaCz2eY;e~gdN#m}a5ym40z!=T=|A-=j5=}+*WAbKEd#)F}QZ@5`z<-5%+xH$2%42L%89r5B{!I;B~(q)qkc{pY7Kku1c;Y?P% zO(g6zKDf6Qyxqg7;>B|m<4kOqchuNPYb~=r@6+Z&w!V`JHz5q#{tz7eKMDX=d+T~K z{uTG0%I?-yR@=N{;nI=my}v*%Mkv-N_)cnMsmj5nC#8Qm<)arGKE1x zV&f8?`}>nXb=)13QDAKdBw_o>harQ5$V%B(08`T_g|3mK0tN5WftoTf(Aw%1D&b?C zt<_oMW5CaY83izW?h3#8Jm%Gf#PR3t%Ka5JSgmSo|z~{UJ}mVF9}eN7`epnt|ilhzcO06 zK<~lXD_&V^kfe{`{|1o?oL`GPRv=Wjb01!!@`J*p@)<4eaYSMbk-+50jD}F!w(+$= z!DO{p7UE(2AMu5uJ;^5kTR} z(qNkaDE-&8ztf3?Fe5nT1c z-!}Gl1v!8joH1`n=x`jVIPnl;Gg@0r+O?HNjB$D-Iu{Z50CHJDlL6Of{SugzNR0m{PcSz(nkzZtc_U0x&;KLs(0Ik)B;Q?%vatI*$*)t`Tu-)%8_VhRU0 z2O;I!ptt!+u&mKBmReMOcDwrwZM6qHdU6&E8pWT{-aVf8uC9T(TiDx0hs zW^Tg)>N0MqTir%q<{osfXg7be2BPAxEo5f*duELE#@ocbQLdbO@loI+MHAkLUa>Om zyUxbf{@Q!hUH7htEAQ&iBtK-vMW*p#+GpJT|4vBZM1^_Bdsq&@I^A_wvr}&dZG`2_ zbNx#b6)vOE?;mHijqJNR9evEkdXziVryS3bT~58$r`D6#pg6WBF z`@jPc^G~=5zM$)`1)V~7vYZ!Fg`AWPCI}1R|s%A-5BpEZksBYh5ysy`I zMo8al7>Yyk80hKMQIbGF32GhGmS0qF(EZU)x5&)zPMB!Q@Q_CIz+SR|((IKX&BrhC zB*UP1$f!n*5a~k)qwN3Zhw)-t6 z#P2CfKLdIxn7tMV%YE%8$&mcUvYZH$LkHOV*Tk=?Qy!|X2p6naTqbeG-VtLY;mHQPXu|-Pj#!elB5mwGecTnj*|SW$0>aQXZ3;`(oNT|bOX2t`u3H@4D$!|kGBT~ zj!l9#AcJWI+@{M1O%yGx(CFkZ*yNyFm+%^acFCuAo#clSm3woUM5z-c|6Bt7<2nUbgJX`0}#%<8WRNXgz7cQ;5`Zr#z-) zDj=ePPOn7#eRW1+s7q0#r{mr0ovgaWdQuT$9A|J!$ZRM^w6N4NVEe!S0OMY2je2C! znv%x*dn=pD$8~hG< zCXPrZ`758kz5FO4AJp^&1@;Pdr_=-ptqkBbek_7!MKfC#|9=T|_C9MJBRmXue;va6 zj_1zS&7Y_G-F6_y5Wu^G?|4fVma;-*77Jf;B?X#x;bO+23>Y+BMFB$P)SEeDJcp;o zePzIuMzzON&cK8xg%?%WT9`i}2kw#WCr7Z(fy755Sr%(WqS2O(#^Q>$kK=gjp;;Ll zmtdeYO7cAi%&*{Ks{vnnyz#D5zn_Jx$~W%u?XW?&eoow}x!~3!TmV?}8y^PkKh!pi zzy{xdTKVpMfbtr(hVYK|*}I`fGCNgC577WN{vLkRtjP^m=7xA1!0ow;Xa+&Ci}qOp zQs{P}hfsW}9x`02OlIiu4~ylLm6mZ|{@&=ddvCj$2D5(MKG!7 zVvPu9PV4|tJ|oG#!!lzbn?{!QdXUH09)x*fsATp)n_l{NceH+VSr&17xls8=+-y8X z5H~FT3K|7gIXUE5@h~oeZ2=#+2`sO`iPu7a%6}`Y{E>=l}~FmxFV=) zNx9nLI^G1x1iq5YY-^D)OcD>It!1N)IOzsd@cYkO#RDt2^92KpH+ks4J#!(cI?mB( z)IB89b2NYuH#&3cQjuAH6qIh&<$veD|09yg=r=EyZ(W#_zIw}Tv}b>5{vJUsaET#E zLUeuaE3>c%&47}yGbiz9G(0;s5M4S@&yHICaiMVbr5lcHK4#iA4yug#eMUd4)0S?0 zA!HRDoQ<7l<`Vw^{CIwavO|pTZv9Xx^jtum$fJXW#_dHts{7O;$+!&Ho2x7Fl+Rce zzx4ufczv9bp~=-M)cd5(Kat)qLAxoT9iFo3W)QCK1(L^6NAGtwg%j~t<@dH%rLfx$ z%Wy7NMHLNYU+o4xhcZ1|x@X+Mg^!4!yP)Tlb2tefh zg=`uom_k8n2bf-6x{e57!@v>r5+Hqjd*e(1_d>*omh5&`)enyRz#Dz7Qhclj&|13A z^s)F^@pvgRezdr3cMfRw<9z2Ujkek{XehN}c` zqWri&!JG@%fQWoPjgu0dyM=jQ|Mb^e0N=u6UhCw7T?~EVpNcQau^Rg9WX)aFjL2^_ z89i&}Kn=MeE{(?8Y;`dBK9{w0!r3|<3!Y{Qo!QmY8@dkk!y4>vYVf{Bo1g(ATi_InJStGW5Q(S3-h0qMsG#qN?ggnNj)D zZp0K?(?67`{3fCB)Qt~}xpN%!zCUyE6OaadjYnB{XMB0&uHQ$59i;IXSA*P4toYYj zv5MX98zX+ z>(0s2A-+o~f1Hly;pG%!sJKX}473Z+RTXBJkHU#-Z@2(R`huP!gnAx1;c z!4#CvY?7Nyeu(dS_+had1d8*ty_SRmR!_VaK@tEB`$W262qhH1;*c_aIC+HN z&E;;2ggo^|rQ6Pc031u-PR$dQ>1Y;ii4vUr zYiBFY$A0Cfj;gKalgAtRM2&GgY^}I0F6f_qoC#`2Lz-sIsPjI>l=eoM#dnDcSBb56 zom!P5^m8!yO1WvlBMYUzRt(hVJ0BEnKW^sK!ddoXL^Ns#LAU^9i@{LOZq%tp&c3XD z_k}c;^RQvRS9@g22xrW$!<$o1dIM=JpIP?cvdg|&_!3@E z(jjEboj$>eD5UtQQSvzs!kslScTX&U!eV-60b@+^wE))7U0q|-Hgz_aP+?zsY64cW z1R5%uu}JZ>E(qjORSK;l0OFsDn22Tm)&hsB@SmvX^Ys4q~d0O}Chbc_#Mlyg@C*>6!MqD-Ho} zjY!M98=AT^Pq=w_+Qda#J@#pA|XSHM8c@y&4)Ar@ME#i+O_AjpzFK z>C49WX&)_F!^84U#7do5fuJPg3puwjaMvi!6FYI-F}6Iz>an4hx$6S?>%C*>t=-15 z9?HS}pAYyLAl)(EE_=^sQe)+*LvaB(hGNk0 zDou_xqV(48I?Rw2T_v?hGc{0X3^uq=OR3HtdM3o6D@+I5O6flulBxmL7$Ob(>K2LO zc5@j~mz*c(3i4IqOW~k>-oEAYJ=LWm(S3jI$vFSi&`a;riJyVD0$JUW_tlYiQI>)6u8h9pzvDvV?^G%xYOl|AkZ$j5ZDSL63`9L-NH&oq6V z$$d-Sqrcq4AIFf|Ybog*-{1oFF7{xuXb-p*+=(g#8rNWCxeH9_`cAj?r=U0Ma#Zv- zbG?u}B4ln<)R?i_o~M&<_p~;4$TpRi4@s7je`>qbRzD-m~$A0TGbvZrsIS~=b(?OT5i4k?PpSmJO zr2iYsvG^|blR)G(c27G1cuTqX_}qK}rWM&nB$$xRwy64D-zj+E7Qi|jA9IR1BbX28 z%TorLmN|^Cr=e5;Z5RWW&g)@~`Dw(VJShw32`8*Sj?N8npd;!Z^1Gf9t#3Sw6GFmh zlg_3k@RAv^uOMi}JJjiw9{Ky7+7ok)le$bGMmwy6FX5i~ox1@Z^HX==rt&8c@ zBBTf5*5cB`69(LVi2LMAmxPlk(BY%0wf#Ic`hw*(PG zU8t_%6Hyxp;QwBP;e3&U`bd5|-tC^*?vVpvup;oNb}Owe2TZ#CXk+|DKGw_uB)gr_ z71RO`AiVbiw_(7mox8R%e3R4OyWd{Q9Z9w!LfIsh-FaBl)p4jsD2jB?d8^xWjnMj+ z`{={QfNxi;;-#buM-`V4x#owAerJ8AUkC=;BoyhFM1zJHA`U(AQ5_UJ9JHAtt^eM+ z`DUQ#rJbSRmKh&nlUPtsx}$#6VQaU7m;w=*zP3POZF|4KU!U>qb0XxLZ83z^<%ZS! zW(L}JsXkz#)=m^gQQdn;&fRW8Gx@dly}$$rp{)hC`vd4ZUExP9%PJmovr)n2vPW@c ziiH9wf~d-4cf)n$(rV};@4x011&3XenUat$vvM;79giEPOAdiMT916;U!AYUoqC4D zczE1n)%VPh2T^$m4W(uLUbGAA)}Q&uyOD8@vHdRIZO=<6E%N=~GE?Vq>eOM4w*9(M z9WWdFZX=2{k8wkp`J7h{hBC0kn|(Yi&PnwQ3#5=}@j*RlyUN9yfRcj2SkHr*Gr!`1 zbr&NZb%jkd_|9@owST?L=#)^*2vP7$y&asC+&3GcV3o(aYht>nA9`o{c{P+-eX4Cl z9a~{gijIE+I*5nR@%v1%@EEVCsNL>)2KuB!pnx&?7tn_ge}LECUfpR-SidwhLDDGO z>!pU2bsBo9w3->*=9iu%YebE3Ky`p1)e7CoC<@r}RvX{-_dwsBZ_FPSbAD?x0h zY*V)t{?$>z)xOmT@{jlFU!uJm%P|Wj*8zgP5fK~Loq5+aKSX~|!Gq})uls0xypI+# zSnVFO`BDUo5T$z)`xioJA! z>7c1+eKw{t@mx0JS-TUb@JF&s2NHbuur45k9vJ>ZaScFH8kO~9*gUs<MERgPC-% zdKm%<7G$J|V+sks`%T{0^57l}rBh{5#U$2#Uts`qqFs zr@o>TQBaRnim2EZ2>Fq|pi7~GV`o@1A8iv#M*lU-8*0-t?;+UecE3j%O^mB{srXFv z6vt<#K=m8QsmM5hPRtIe!1=6t0rsM&R`0wYPSCnx77R~+@{O}CwCFl?v_ z;w2<$#*BMarKNj8dY+hRP`pVoyerIV^7AYfS9E)@M^lcS+4=ex(46yI>jYCyY;=O* z%Fd0S#R+n(66b){cehiH|AGtBS>VDYtBfCxO-%SWmlDe#FBq{N<$Gp-LKrR0YV$hd z(6(b!!(@?;^5f;!OUo!tlk;Glg`$$ZdB6Ea- zeP_kDB@hg-xfPAiv(T$XgqQc;GHw?MR*cQ>eeEO*8YmiwM&op9VD2}WvPnZZ7%_ElKG!zDRr0_uUAwwB z`JEZVukHxBsBiREFM4(NWopQcpztP&tO*ZCkp0b(l4VboJ%LBlSOdqqVI&)r}b14^cMr=Ri3esICW-1!y0cKL!w68A7B?6r0%{8lmaLcJ#nT|Sl>{YEeF{a+o35dk6-f%Br6lJXH4 zpM9-pp#;CVnD7>8u?k(|;&5sjNpp1H?l9DMJuuq*5YZ~o*DywiQ+IZzvux63F||Z# zwPlS=FefMrFwt6n{E!%CHtyVUg>J?@n-lsiPM3w@FuXsEg%*^9ckDak+KtzWl&D#X zdfR(R!fFn*65M@CnPo=>X54B+dR4y(X#h8seOm-ftk9g&{;v7sb|lWYJ6P%%Wg;bx z_lH#9)tN>(g^`6%7-yH{)e&7WT{JnF?Jq=bQe!6zdj`eDP}fjBJ`=j8S$Hr-^^}8A znJPpZ%#pRE-uySjO1fTO`^ST6Xq$L8Vl;tHMWV@sfe#t`F>J=oH1ZNwoTJZp@#Owx zpJ&$%Hq#TBaQ?4kRRL%cN==vIO-!Fz3bIf*_O}EHl$bfVlx4N6s2v*v6BHNSOWIz0 zTw`@mx%6#?dbRp?`jSSOY}t{SsKTxV6{lYJY_yFKSbXalf6YW!OnziQL{K2O5sRh# z_+kgVd%h0_&CM|BvNgV(Nt@U4>6p<>^{_sPl91@>&e5cwo4f%+0In>5ZFBS*w07nw(UzmQu_i7S$) ztXz5H;kt55?9?nZZqR#NQ{dtWfAX$NS_FmRti0SrG|e$0i0mmUWT%LpQig( z#@>SB+VUJ4T&rmZh$?KZ`;&?nrH|fnf3G#YZf<;-uq6vu965nJI5CP^`rl4X@V{oTz15s&Kp=vd~~Dg*Odb3(c{JSgI+N$6bh zU?sFg^6q#)OL%JUMIHh=IlK3%ghi6xM~R2Ja(t%1$(!h~d`9jPtsuVZx*u8uu6?e= zbiv@uirKGVtCmnY@JOTwCYjM#`-MHM`NrJ+Sfx(HR6^h@cwT^(SAN$U{qUB)#2&qt z)&lE~0bQR*eje1jARiXn(kH(qp{b9BX_~h06ChaL`t%J$s6_Oz+_$1g`MaImCl;561re|_4ZMA12WiApvox#Z*Z&Y({rW$<47(oq8X@2mP` z(wF**d0UqjA~uS0wQ{kb;_LG)Ia8k!W+(aal2&pmK^JS2?@~4gKvJWp7?|Wkc!Fh~ zh6>qE)F^rJ0qPX0*2#NCcqy$DRn0)$o4!>dhDs%`IsbJtYNSSNOZLL})`qpWhXBNrMdPk#q2!mGi2VLQm^~ zSkpGzm=|{2%guzJPjy!R737g?LGEkOq@gm`xc@kzv_JjhJH1<3`73uX+Y6jZzlyqe zUr^2J#eG3>>%SM_T`5bz@Y6Sarq?huxMD71k81XRM2)-~|FNP~fM)MVx{WIKVHT&U zG`GNv&|&Mu_baNgw~<+Blz7THb!b$`zL)8(4y7dCJLdD{z0nrsw;-?puC)3zCy_g$KrT?xBFUlk11`+mdyM^(p03_)J^!1T97)?R?-bIU)sMu^SP7g< zpGi59zBf~FZ@NX)8K((%CZj_dFiw%O(ev^Og@M&1?YA2Zu|$xuVAa`~=GU#Ax5%nQ z&Y@kTLM6Zqan80aLdq`EWALeTnZbiHDMfnQhm$5Q@(8*cP(=rBg}O01N6Z6c0I!w^rijd^Z@&ndxgSe8Tjj6hXkG7B-+RNf zLUEXqy1eo#>wuzew#|in22tDRa#M}Q_ZE9o&8*Txm08IoU!n;}t|9=rI-Tx>bFRqP zF(u~w_*J!LpuCi0U^Arw7S@~x(bWZGZ!p@g|0?O)YWz6c>P5-;t+Z;LzB9J55UW}I zrfS|^#@MjJuT3Qc4Qvemw>I)9sXhq_+FNSDsNQGRmG>;D-S{AJS4&3NDI~?b^z8mzw4C zQCHH2)0487Y})3j0)dM0MwXl4cl+#2g}EnPMR^RQ;HgtSCYnF!eBuj=s)vCa{5{Fp z`LSrh@IE@X18JqLGW&sBBYzxFC(NVIobS;+1o9_vZHm-$p;36%@6WX3Z4Od>yK&J(3-+PZS6| z34=^_RWC_H{W|eL`shfd?r2k`_X&|I`@RVF8g0Je{o``B>dtgYniG~)jA-9n`dncJ z8@;1U(Wx|s<(T{3p!Yv`I9@ps=%@I6Q~f3FM}4a+l$kEm zt)z@W;nX&$ARD0FDeF~Y^KI(e!^DkxeZgn50`M4;h%?kSK6Tz9>{9Z%J&}%;6MI60 zgc_+7~cPtFo+@ifn$QJJ0P|dU)L2!7@3+Ji;iRb*m5NVW6r9o}65^{oy@@ z5GV78;ivtmhfI%`I6E?p88u5L+Nfj0vm`}rm=YH@G>6mD)i}^m1xlXwFieEq z?MlHIx)Y>tw4mX*3NRU6uKhKSd24!<0xSBTfApc>B+NJub3!w&>_co|17*frpN&pe zNS2Lp604$e4Y<&fs*x^WpQ)C&SLvZG?6UPkTa&H-p!M40)RxYwsue|>B zm%)n;%gz>*4qwUk3+4-OWnZYwQZ|Y~GV@kiE7W5PUAo8h>veFm)rt zX_qCrvieV{cZ=rN>GSk}VeHKO`CX7&9l`l(JCl*99jrrfG!-k%)|1+suhaW_!c>~w zx^ULzoR8(~VyFDu;rT1UQg&VUaV$g=tKP8v%B$WN?y)-jJ>YQ(^KdjWI7>Uw`H=Rc zfwBkD)@xp64rUSHD{LoX@8-J&wEo$q5^rb-?seNda5g#cih;jIHi5nEI`J zR#h}Kf6U9_C24)Eqsk7#67YG8glr+0MN6vMJ?`Sle8g{?{GzPmxvud9wvY;Cfn}7+ zUHVb?Qj3PDe$kSInlYo}29N+sA$ntg{_)bs4%sghX4i?7*f zMC{I6f)+MDOU*Pazbi%*8{=ZfiNa?U4LkziC5Rs3z?;QR8R~Za_exKR08)y=h#b`29oBz=(Nxm#(kQ`cE*d6d)tcc5W3TP(;s9TmE(MR zjnnm$j1-kw?+yhU_ho%4>H&;N_y1|{OT(#bzqhw!h)fA(CK;2GA<0aIWS%1lvA3ZR znMs9ulq9re4B6NeHnB;HWQvTLJ!KoSl`+%yUw8HVp5OC--;eM6?b#n34i4`7x~_Gt z>so7_>pYD%h@?c@K;>&zjh33JnCGwBenrZ=en!gle)I>O*sqlUT+(q-`TNs!-y@74 zbUV{gY)@WiH%&@*VrD&WY05p&e3&d}Y)2D9qa}xUP=jitx(g4Fm~Hl{V<)E~qF<72 zI<95TYSppa!tJFPJ=ddl!tTx6rO#lu7U>j9FLK-smq_jGy&bv2Nbo^|f8qodgUCI@ z92~fR(($&&W?czV&}xR>H3rIvCfW!;)YIcb*Sh(pRMY~qtZ4A2Gy8mvD2Ic5rH#}c zx6T&~9o#3oL@-;P9PT+5^SV_h3{y)#*cEFO>lEvr+Z1t!Z z37nPo3U>#oQF~&JhAe*HGY%Q~i&}Gu6lU{F8D}T6V-Bnn8^}U!TIsp~uU<9o4be4O z+Aytr$QmgWq_#Kfb+IDb)pgf-{*IJCUXs%~<_)LqAE$_95zejj)o+V1V zFQG^piEhi*%PQb*-47ubhOaQm#!l_iyR9)gwfBs`H#Rc7Odd`F;Di|mK^lFjo0et} zmo4Kb;xpe+O>>`8nFr;^b*un`@vuRJ#w}lh;9W>Irk-0qL2h6@uzTn}NIlu9Ro(Md znXc+w&S_}y))J%$pVq~B(G4CC=rOl^e34c9gfS4$qe*Sn+M4nx@P|p#QA1F4p*MzH z{&DT%)AH=n8JTd%OXTCMKsieEp*FO^fdO*nVw2J9`44blfVF5!DC^C21$B>I<$!s4 z@-9!#BB%UC{1p@SJc>^u=IIvz_Ll{RaNYslD3z zB`-A29DQV^F)VYEPRs(+VJJ19_bq?)X;Y6t@V5xdgdT-9Fg#SIs7=8>e3l2KglC>0 zS5R)jE{6=!G`G03#pY5+rU|nu9YL9nCuuH+J7vxq%_WLn4FMLt5$1gi>aH1QB3-z@ z!L^1e3@X2`P4oxw%JbD-?hixclU-Db_N;p(je1I;pdya_l;NjOj1b@Zh2@(iakz7% zKvw`;&(=p4Vlc4y>~#VEaWbp60;sQprU>YRA+xVU+-NsUvDb@sO@)eU6WPVT6~q`b z3}9}d4H8X0+1EMHkf#lWZucz54L<86WMNMA-76BZ)33@ci@sHK)DBZMv3G}VXT6r& zEre2B(d(JcPhHOea8esZO@ix#xwQaDMG2nkF3*cKRuTJj4l*IaTiC~Nb_;Zvhe-Q> zJkT?07GW?vOi+ZkWBUcT;9r{EdMq~OXBxjAhZ<_*L< z6efS4sZ(G|H04O9JkTlPStCY~=S0+vdAu4aA_ut47VF ztq6|-^Sow{nYQT@WO~TvZ~G?yl;Ql`R{>}DO0f<1vPby9!$D7TpY!L!s0$UWJqbM) z_O5RO&+E6rsntmG9+dtS^vC}W`nBl$>t)Wmc8sjs@8%t7Jakgb*r%iL=-#ZDy;!5n zn3BN74nKm;kSM?X3+$o>BCU+RzSrDbkF+(dx}A#cmbhEeY+xAxZ+8C0`?t zGZTBW=%5U$#L=%STPsW~PGtF6Hlgb?+i6~)r3 zd)b`a^yOZ78^VI<4uA4cKV#$&^Z}BAM8_9$B7R=g-%r`^(+=r%Ty?C9G95knBM9P} zFH?<^y%px{-{DL($OAjmRq3lR-!Nf1OE{vo6FR2^`^U>|t4@94PwLv|GTV!9EAO`q z6DVx7c;i6))uUJc>r{PyA~McS?#0>2MH?OQA2qJEjYTehDm6_oV@c;6J6}I$SmQ=@ zi!gGm%0Jd+myyfV)d@4GkK^8{UcWqp?O4v7)$Y5ic)Z-PTv5n~+KF;AXH3xAL{wTA z!!Zb?BPYvZDL^zm;qg>0bn{0%h#3b<`WFOqoci)kMs;>*OXFCWH^E}a@~8!Sq5SpS zh=Uw%@@&@B25u&kg+6_l4$MbPC#DP2jWK^=ot{anvUTHFgG;!RN#5`R-KNw>Pl?UT zgpV4Nt&cCMzRH<^5wrB69#clfnaOm6coF7=hRUGm9b#_%D%$C>s#3S~Ba1Ih+6_Kl z71iyn4P(5mnYC_OrMP!%0E_?11;^KMpIz0ek{7;S9A68-yrMD ztR;8vu+99jJuaqpw3f%_>U&95F+0pyp__0Q2(vbXFlFF6iZ$dow&de-sZiRjmWU&< zld5v5ZqkdTcBCh&C0n%HFZwjAwyeYPT#|ZVXdY8)5`gMUa3Ss@lvv3yytT@5}lUs z{A83?A!TofWc_pFEeSPL7Dg&2GjPrr`>Q~ z75yiw*NJ{^#PykiVFAiFI{#UGMHNDxx;)dhCF6=keT>QkN55ZiF znphen&};J&&vU(n693((yg4toSn*x7814L!>1DVLZKXba#T3V1=+^?jpR@28JREB* zIdzfHqrt&hmiBbdM7iCsy26v+=feT|ejUE6*IFB6c`k=40%yfNm+wl_v~(YCL|&F$ z+F-{9*VM^rFD(@_hQ-IL88>nBE)t3wZ1xlG%mH8W7w~dc7Ont7l-sJ4#h)u`!uKxf z;4}y#XKk1`)8)g*xiaLhi!N8FqcbE7)`2`=jO#I;O7>10YC1~^-fy4?Y74#Y#ZSA- zS9=%B>K%yYd2@QomBc8y%Q+uS&Nh>U)(e;V-B|shd6p&h8M0u;osMLcH2M7>=M6tT z(=2PNIlQTA{#=dYtEqLLqjZnYH;Yn z_PS;9#8Jn3dPQh5_L9GjA1FeWXoi>{W`8ckHe|y_DKULNPfN+?igr|Xs5wz;*FptH|i)i4Kw0q?=0K%EfgTZ)^*T7|3C z=Y9NHa1+`bA)ee{yLYBA9)2n4Yd?_LT6J3YY2Aj}x@mNqqQ{m5K7-G_Zyfh5jeEfD zJCv>bDd+Bj@bc;#L54oejcCdQ6$)sXVS>>UM2? zBK4i$gYE+c*WtaiABA^e_ia|xZnZHs;M!Fny;R}eFN@G$t^zA%tVG>_$kbP8w zRt~-F5=N5u>IS3zx`3HZ+C>#@n+Ow!Kq4tK6nm`L8Zr zoTyC*OF%5Yid8Pp zAz1eT>9qxnOtJ?b@*iG4>B+V8>vx7diC*;fck9|pCVve57AcT?ztk+mmQnQJwImD* zWpy8&i!Q~NS10F}BAnIP9KQjlC?VpHNM{f<4g)@}wkG{orJRNejGfWZy4^PN9sr)& z#gsUF_d0TgP%c&r`WuK!4vjc{<97&TsWDg-%RYNMbPDp}#F zsKCVSS&m%Gmz2yX{sq4=Zk52Y6nRXdZMpkn+fo-MqyWtTb4ySes1yI>2mL?wW&1f= zJ%+2U!AKw~FIcTT%^%=YIy1+msP?6EOb!H#JTOMBayQP9z(1|DP zpsj(uWLb?XXLH=K%Dkw(WLKeP}ylt2KyIyI#b`b>U*SUN_`SvUt=&s}V%QPe1_8X)}yTTMUHg z^)UncYG2{YC*j)3M7R)t<*;yWQ%iAWy?e{v)@6S;Wnc{$26a0$KUdB=@vAozD(R<&Z$-dt8C4qy!qPg(7pmo8AabIqgB8=RyEw^A{wf$)!SaFXMq>1Hy}Hn zMn@K2De{XW44l-i&&JuUM|vcKPKL)Ya>6kyjkEUUn@cV3YXPHeo9!x*Jc&|2Va(%6 zm~VS943YpSLHzH-bdj!K)c>HZM@}&ZAAUZ@Nk2sUY?U#45^%#^rUWIZZS=RF2c1Cc z22PY9$xLz|EXcIxpM19zDFB&xR~{67;}qpiK4oWp*w*X?G5{NigXjXh=6zEu^Gx$u z3Ju#)iF?p)(jD3ah^o3lj&W%Gx&rkkJn{QGo?#&L_>(_yN(io_?Uf+CMcjYP<>l&V z*y!LvU3t+S?35vux;#{q3z(`v=b^3qta6i$hXCxU?&D8-Zp&A^%3NQODz5JQWU>RL z&W)=t5`6WQ8QD8-H-EVYKDGA>+=m#?*$)6XrFH(!*Tv6WE)m>pFM82Nqv#6k7vS|E zA^9bX7c{6mYDvP+#m`Q8>BsUh{Oi$1-Y*oeJA~%=I(sJ4;kfu|i)Em_I!Z(P>$c<*AJ>BpK7b@di-uU0^6#3hYtD-P~#Yx75To?z{b$8AJHa1Kq?e~gL z-ae%@l1@>~d_x9`!{egoo=h<%xLp=OhBHHLu+%qZA%w4G>b+X28Nv?v7!9wQMbX7H zTDBJ!X}&YxU4jT7Pw`ah%hbpL*jx?bWipvKzXGUX@PsvX4?x!J3-EpS?k#$zMuX~k z2Q>~R6WeSBxA~e?XgYu_@#L+s!WCAw7o7Qkc(ZIc31ZFF8?rY5m9^XmfUmaovf^cb z4V%XGeDW?Zyg1R{O|C*aS?hYb%3^(qesit!tsv$!g^2qevN<4ZVM@@kb+5eDZ()so zL_Tou_?zLYjeH0;+!K^9JqOC%$Th1Yi+-m%^^}3pxK0)ozTUX5Ocke3CJTY9m8(5Q z3jA5}R)~)7u~6o;p6fAiEMJed$34hT!iaS}y5I_mDZe_v6r)ZrELsOjv;mhqEo_}DmwRoI2;u`# zfRWY*K$cptih>8M!VZWaUQ(rQmmS#VU*Jh*hDgCVtXjZ^vF$Uy2cI}VF4Wd4Oa^r) zlf#k_pXSHn25O6IDv~{iD=2lO))kd42~-{pI;fj=jTo8d(J3qlSqa%eB^pggTsTV4fm@gz z$TMhFkj&%sGF3CX%d4`~p@n2Y5EUN-;tlbgNkl37)l0DKvC9>jh>z)J9~uU1o_Z8( zmeIve1AK^>+|gRalD1y_@Bzc^o63OrswK|ih7VJ6<=9W^Th7zF7% zGLG`2p#q)Z-chFTiL70U10H9IQJ`p9w`tE|BDGK|DKBg>Xnj{K=wSxlPnX}fLxbF@ zFLMow@|Ax_VNhkOgtX-`?}f}-Df&MHQF$E5IDuU|OF_>gsi#-Br{Y5B98D=+Tl|+K09sbg>;LBJu>F zQr~I^d}kvJ<=B@?pb#P9^*R2Ar`M~sUwr7mGXJ4S5QW-bzs|FxV~;OOrV&nbdhBX_B3sMv9kB>E` z>~shT3%-AR{fLFJR=t%u&(rdE=*VD}<745~9PY}H{*t?j3tA;3#tt2f-Q|d?NDA+3 zWcdAW*Ohm4m~=lgd1=PZx)r{Kv<&Qu#&%o^na>t=VJ|ris>9BoGdXsC{BRoRTv8y@ zyrD@vB)i6nioXq9Fyv7DuC4*4Ui3z=xsQp%2ecL&n^fkF%gXZuAKW9y5$C|Hws{5V z5vr6bdim!3RG$C*$j`QmSjCZVoIa-MrXhyQ8rDBIgxH$g0tQE_RspO5YxTXjp|X z*m{576X^^?eX||}L-iIDlyuMUzzIALgEMLvFsPS9SA*q`MnA%_y@&?}(C5~f!|LNe zrjWq3(>NO7$9PQuSossdxpb?*b>XMD7EqoA{GNrI7e-g!R-y|i+jGOCEzi1$bl+Il z9xT|3i^Z>Sj#3}I96C4fS^Zo#@7KtXvZo?9U0s9~*xI_?Wo=_}Kb{eZKhCOa5dS_x zB!0XjVsT?|V+22))?O&TxVzoGw0Ckje>Ln|aBpeRtbL!2b?Jhp`?@wl&U=mTP}A5G zN_n)qi-LF4M~P=oN@{Cscz0HAQ#PK#0-MR4(8F6FJ|UaT_}m`7t-l8q{@~LvN=}@< zKU(f?i_bco!Gpo2MxOhc=}MFA1Km!{S1;y=>9_oF6%`JX-@ij?sEyDBr@X3wjSb(< zN{B8eHR8M@J<{6Yz^~vtzr(>|BYj6oCa-$y#V5NzCGp^n{ejKw2JWPy_0@{Aw>2sL ztJ`L)XM}}$bZ@K6p7(nx6gQ+E0x_2bod!RU)PrO7=z^rI|7+=PXoqcB+FBhISZdC~ zG92f0l5WPD+1Gxsc_c4|=|ePLYXH@^m^nLueR-C4dZ|KZyx8eL67NA^l<%cbR=ioW ztFLc$n;IH`xj*rnA5p12Sy??LF8}QAJyfsZ+_vrx)R8?zo^VYnP^1S$+qu*`IH{f?kn|B0f6NBt#CYC5=u{4=a7L z8&r#lP8jc4DBlisvR6c7n$Z>)tXc)n(9R}B3IclIYw*$cGt%9_I)2xZGvbm(|6S+U zW}8RUT_i$zp|f=t^yv)E47|jK)qK7%QOCOF!Pb99wmY5ip2EQBtEEnTmVM23uju17 z;ubzH1BUuU4Tuf-7))m!K%uuy|7-R0dRSc6-q&tAl*2xp2DI6y4r%m3rS}P<*HzO< zS2USJ#F*t{3N05>WNNnyX{^~x6nM$+2At0U(b+U0qEyE^Iv3uREeN04zcT>N^+_K0 zW8H~=D$pgN^0vYv+Tv>(7EaCdaB&^W#-^;aLsT#`>L{f&mRe(KvSR94$Z1_WuI?SW zt>kXidz06q2+xCm0y=|qyL)9WSOrgIMlo~m41gnnrRvuDeKm@aAys2x=jaC;!n%#E zXYXj~<#t~;>a zJyJ_=LaWhyw+|;_YMcAc_aocu_53X_T@>n+qd#>=hT!;_Zq?)vFzYU)G#w`B1Xs^4 z$QJl$3$$r#Q`}*eUQR>!kjHN$DbU;;>!u9Ido}ua%h+b7a_bgZ`p;u~UI!LzrAM@x z&EQCP?*^nEDo%J55yv~zBsdG)!**LKd)biBgR3QhO&?2Y?d&fPR$ZH20?Il=#6zpm zn*@D7LCA&L{j%AHz(W-3k9sm~hK9!b2WO}&hg?#gm2<+@2$lw5&bP29X|F=0^&8KBo~xZ(|%vrRg-A;oWiy zci$X0YKP-X0oheukQqCX3NYoKT7#eO$TqE5IqpiRO%;l(rZurAhC(yvGen3bG*w#O z5#7+E!75`<%}Uso--UX8=Bp=)d+$wNoEFo~@hjJuttX#fW44=2%tGSxs+s(P+^%!V z`1Ms$VQQiAEkeJ3P7xJEBhkpr(=r~+&J12z>TSbpwX#B3L%N?>D}(`gK0X`hdv zxBbUl^WeX;w!`&9DFjrD7t;;~B_U1=Nm@Sx#SZ5ai>=2d-)a(SO@9yJKu+y2_l~B6 zrvsRrS`wwZ*bPq}?YoI%TK(F8tOnXAx}e8Tw(lOT2FfabtNp)Bp&faY(KTkv$+&CG zwv#ARY!}?4v9R4$fhO71hkXNSjq`oC|FYTkincad3A$vTK_s}4K^3>~pm2A@5Ohf` zdLZ5{;`zr}LnY0mDOPBQUtl@V2!;N(fz zw^&5~XBu2#`U-hixwa~q>qA<=#N)M7pCC)?fjH^ix1)c4eB-)3b878^Nb8VC7*N}3 za#%@kE{XdH!2$iI`!|8Q;ryxB_XD-a>Zx0yOJEMMv*?f{Si?`$hUYmsK zLM*CwPrO)RdJwsC8meSZ$~}iq2z+|==WU)`c&oswsjR!Xj55_%PQV;IdwA}I+lfDK zvge1T2+O)Gq_lgfRb<&+siX0cJ?_^g-`6VeF7d6vnz{w8IA?kYe8QZ916LD&GSx=GZ(geqglp*TlGo>hUFJ-tQbe=d}TW z$jOWfyIHSMaNdjhg76+myg4iSOvzjafE{&i`ssaD?1c$KJWxNXaqpxW6C`XK{%zKq z94!%Xq}Xy2Vg{r~aYL=?Jb(B>^Hi9)NK`U^$kNAY$Y5bve4b6G4sLa!+6~UQ-JZ0_ z7HPZ627Yceogj$O?7dycW-V3X;xzZ3JN~|t(!R$+al6x;QPRJ5fJW51kqhh2a7~Z; z(Thr~3kWg}Ih?^YsI?Uzp3o_U42)ACgoP7A8xZSX02XLW+d2f zIfYa$SV`*T0P`2TCk|;X|2$GQeC;h;^^29RTk#aOI}29s6{!(i-rGTR4hstS(>UOT zG{&L#O<{~RPw6Qad@I&^eb8FJG}pzWjT<8)D%?^?Zb;mk>-NS?n+!e@it7%>ZJAI{ zydiYMMlwB+YtgE|h2vwofqHH>>mi0h9e6>cay4RIuzbYgf~P(kaL{S(;~^~}r93<4 zqa@sH@bQ+5WVxEf?}zBB7pbJ%z6ERRU#b=n=+<`HOc)LB4gYo8IK{rVc_9Ck*J|L! zC1~zRfXT`rm6r|WrLwFByqE$Hou-yRdi#pS8P%N^`-A9YXflKA(TWCxSf&VV?5=Vt zei*4yc~F6l?U-wN<|khMX2w7EC$vQfFDFFv=BG9PNkW)?Reh~)&-g@~WyJSZAI(rBd&~b>TNE3Y^_{++eNw!izS$c z*=p-G+q+i^=;ITy(ga)d%U^Rf2RW=z4p}Y1O4c#3t29hq_ss&W%Jo6m_RHn$e_zE8 z5vb1=2()>}2_n;!ss;JSNd?01i8{s`PkUW8gY?{AH7n<6+V~}Gbm3{(Z1ZZf>Z*S@ z8@wNglNZM5>_;^xBJ$(LkBdiD9uNda5bliM@4z>yJ~Id%t9!{p@R;y?LU@tzoZZ+dp4cAWq)2f zi8r89K8pF6gT?Kcu2O5}h;fo79ZSSD<5{oEOoxgc<_9ah_!*jBpRZgJ$GG-(7UHaJ zOW(QR+pu6pB_m2^Vamwl>|UF^@pSj&oOk;8B&=6JlQH@Y$*72;fRZPS`2FpkQgFNu zk^*fDdS^U8%lw=iy0WhKAMmHvt|Y#tRjN;oJnI&C`M3zvx82d%`_rgL*WwZ?mHj9zadiSRh5OL}Z9oTOkgfttz zl6HPEx*Oig?##jy9e%ej`nZY*J^lKC+0qb))9yV`ux!RGarw$2y2P#^T^3PlL8aCw zDP*6iBU;P*cQi#z4yoGNTb@-RjR`$J+#KqDOzYIJRWQ6+Uu4qxjDSPl3jRvrZvCc@#fj z-nxP#HPf8NeQ(wmyseHpZt{$CsM_EJ4j$!z3J}P-U zHX8h*gf$-L{prZtrOK;SyStx>J)|myp=d3`>rmQDAYx*WNaBAaG+ce#?yzbzcM3rP zQ&f}uK5+61IKC^^YKdIDc;@k{ar*Gse{5Z)fkc6}&X@LEKW*}Tf{VXuTF&=W+Uv}6 zqyskGpOuzU@4E??*f;%Uj-)SE%&HTSL2K&eyeJ8QCSIEzV`p%&$2s|QRm^W|dA|%I zq?wq;6kf1dHVc$-&IrARsy9-3U1za+sX(jB`AGIsMJZR;DCdI`E?aVAiRzRW^%c49 zz`_4Itio`7j(V4rqnJ9!4@W)j{PE%9O}RRsBSK8y7mgZe_e!nJUFJJeBI@Mvp8EkI zD_GFSVOMxtmcnPNzI>dX%A+mg{jA1@F=tHB`8Fy9Uewho*0FEQeWQnijzY8{ArbVV zXUnSp?tT=NL+L*s%-0-MI?z~3qAT;x!-*g(mYx9Xb)%4x5UJO~;-nf+_02LbcIp3?!7|GJ9*`__. + +.. image:: ./architecture.png + :align: center + :width: 800 + +Component +~~~~~~~~~ + +This section presents the meanings of key components in DF21, along with associated parameters. + +* :class:`Binner`: The class used to reduce the number of splitting candidates for building decision trees. + + * :obj:`n_bins`, :obj:`bin_subsample` + +* :class:`Estimator`: Base estimators used in cascades layer of DF21. Default estimators are RandomForestClassifier and ExtraTreesClassifier. + + * :obj:`n_trees`, :obj:`max_depth`, :obj:`min_samples_leaf`, :obj:`criterion`, :obj:`backend` + +* :class:`Layer`: The cascade layer of DF21, which consists of multiple estimators. + + * :obj:`max_layers`, :obj:`n_estimators` + +* :class:`Predictor`: The optional predictor concatenated to the DF21 model. + + * :obj:`use_predictor`, :obj:`predictor`, :obj:`predictor_kwargs` + +Training +~~~~~~~~ + +The training stage of DF21 starts with discretizing feature-wise values of training samples into ``n_bins`` unique values, which is a commonly-used technique on accelerating building decision trees. After then, the first cascade layer in DF21 with ``n_estimators`` estimators is produced using the binned data (Notice that by default ``n_estimators`` would be multiplied by 2 internally). Furthermore, each estimator consists of ``n_trees`` decision trees that adopt the splitting criterion ``criterion``, satisfying the constraints enforced by ``max_depth`` and ``min_samples_leaf``. + +After data binning and building the first cascade layer, DF21 enters the main training loop: + +#. Bin the out-of-bag predictions of the previous cascade layer (denoted by augmented features in the figure above) using a newly-fitted :obj:`binner`; + +#. Concatenate the augmented features to the binned training samples, serving as the new training data for the cascade layer to be built; + +#. Build a new :obj:`layer` using the concatenated training data, following the same training protocols as that used to build the first cascade layer; + +#. Get the out-of-bag predictions of the :obj:`layer` and estimate its generalization performance via out-of-bag estimation; + +#. If the estimated performance is better than all previously-built layers, DF21 continues to build a new layer. Otherwise, the early-stopping procedure is triggered, and DF21 will terminate the training stage before reaching ``max_layers`` if the performance does not improve for ``n_tolerant_rounds`` rounds. + +As an optional step, DF21 builds another predictor if ``use_predictor`` is set to ``True``. This predictor takes the input the concatenated training data from the last cascade layer, and outputs the predicted class probabilities for classification problems, and predicted values for regression problems. One can use predictors like random forest or GBDT through setting ``predictor``. Besides, you can better configure it through setting ``predictor_kwargs``. + +Evaluating +~~~~~~~~~~ + +The evaluating stage follows the sequential structure of DF21. First, the testing samples are binned using the first :obj:`binner` and passed into the first :obj:`layer`. After then, DF21 sets the augmented features as the output of the current cascade layer, and bins it using the subsequent :obj:`binner`. After concatenating augmented features to the binned testing samples, DF21 moves to the next layer, util reaching the last cascade layer or the predictor. \ No newline at end of file From ef419918e149671f42787cca7e635fc145b2958a Mon Sep 17 00:00:00 2001 From: xuyxu Date: Mon, 19 Apr 2021 20:12:20 +0800 Subject: [PATCH 73/94] doc: update README.rst --- README.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.rst b/README.rst index d3b6589..25f5162 100644 --- a/README.rst +++ b/README.rst @@ -111,3 +111,11 @@ Reference Pages = {3553-3559}, Title = {{Deep Forest:} Towards an alternative to deep neural networks}, Year = {2017}} + +Thanks to all our contributors +------------------------------ + +|contributors| + +.. |contributors| image:: https://contributors-img.web.app/image?repo=LAMDA-NJU/Deep-Forest +.. _contributors: https://github.com/LAMDA-NJU/Deep-Forest/graphs/contributors From 35eceda0eb32b490885cb1e24fd5eb3da231d388 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Wed, 28 Apr 2021 13:41:28 +0800 Subject: [PATCH 74/94] feat: recover the parameter min_samples_split (#73) * initial update * Update CHANGELOG.rst * Update CHANGELOG.rst * Update _estimator.py --- CHANGELOG.rst | 3 ++- deepforest/_estimator.py | 13 +++++++++++++ deepforest/_layer.py | 7 +++++++ deepforest/cascade.py | 18 ++++++++++++++++++ tests/test_model_classifier.py | 2 ++ tests/test_model_regressor.py | 2 ++ 6 files changed, 44 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index af83210..7057351 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -26,10 +26,11 @@ Version 0.1.* .. |Feature| replace:: :raw-html:`Feature` :raw-latex:`{\small\sc [Feature]}` .. |Efficiency| replace:: :raw-html:`Efficiency` :raw-latex:`{\small\sc [Efficiency]}` -.. |Enhancement| replace:: :raw-html:`Enhancement` :raw-latex:`{\small\sc [Enhancement]}` +.. |Enhancement| replace:: :raw-html:`Enhancement` :raw-latex:`{\small\sc [Enhancement]}` .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| |API| recover the parameter ``min_samples_split`` (`#73 `__) @xuyxu - |Fix| fix the breakdown under the corner case where no internal node exists (`#70 `__) @xuyxu - |Feature| support python 3.9 (`#69 `__) @xuyxu - |Fix| fix inconsistency on array shape for :obj:`CascadeForestRegressor` in customized mode (`#67 `__) @xuyxu diff --git a/deepforest/_estimator.py b/deepforest/_estimator.py index 679c356..c8ce2dc 100644 --- a/deepforest/_estimator.py +++ b/deepforest/_estimator.py @@ -23,6 +23,7 @@ def make_classifier_estimator( criterion, n_trees=100, max_depth=None, + min_samples_split=2, min_samples_leaf=1, backend="custom", n_jobs=None, @@ -35,6 +36,7 @@ def make_classifier_estimator( criterion=criterion, n_estimators=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, n_jobs=n_jobs, random_state=random_state, @@ -44,6 +46,7 @@ def make_classifier_estimator( criterion=criterion, n_estimators=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, bootstrap=True, oob_score=True, @@ -57,6 +60,7 @@ def make_classifier_estimator( criterion=criterion, n_estimators=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, n_jobs=n_jobs, random_state=random_state, @@ -66,6 +70,7 @@ def make_classifier_estimator( criterion=criterion, n_estimators=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, bootstrap=True, oob_score=True, @@ -84,6 +89,7 @@ def make_regressor_estimator( criterion, n_trees=100, max_depth=None, + min_samples_split=2, min_samples_leaf=1, backend="custom", n_jobs=None, @@ -96,6 +102,7 @@ def make_regressor_estimator( criterion=criterion, n_estimators=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, n_jobs=n_jobs, random_state=random_state, @@ -105,6 +112,7 @@ def make_regressor_estimator( criterion=criterion, n_estimators=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, bootstrap=True, oob_score=True, @@ -118,6 +126,7 @@ def make_regressor_estimator( criterion=criterion, n_estimators=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, n_jobs=n_jobs, random_state=random_state, @@ -127,6 +136,7 @@ def make_regressor_estimator( criterion=criterion, n_estimators=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, bootstrap=True, oob_score=True, @@ -147,6 +157,7 @@ def __init__( criterion, n_trees=100, max_depth=None, + min_samples_split=2, min_samples_leaf=1, backend="custom", n_jobs=None, @@ -162,6 +173,7 @@ def __init__( criterion, n_trees, max_depth, + min_samples_split, min_samples_leaf, backend, n_jobs, @@ -173,6 +185,7 @@ def __init__( criterion, n_trees, max_depth, + min_samples_split, min_samples_leaf, backend, n_jobs, diff --git a/deepforest/_layer.py b/deepforest/_layer.py index 722312d..e0015fc 100644 --- a/deepforest/_layer.py +++ b/deepforest/_layer.py @@ -59,6 +59,7 @@ def __init__( n_estimators=2, n_trees=100, max_depth=None, + min_samples_split=2, min_samples_leaf=1, backend="custom", partial_mode=False, @@ -73,6 +74,7 @@ def __init__( self.n_estimators = n_estimators * 2 # internal conversion self.n_trees = n_trees self.max_depth = max_depth + self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.backend = backend self.partial_mode = partial_mode @@ -116,6 +118,7 @@ def _make_estimator(self, estimator_idx, estimator_name): criterion=self.criterion, n_trees=self.n_trees, max_depth=self.max_depth, + min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, backend=self.backend, n_jobs=self.n_jobs, @@ -169,6 +172,7 @@ def __init__( n_estimators=2, n_trees=100, max_depth=None, + min_samples_split=2, min_samples_leaf=1, backend="custom", partial_mode=False, @@ -184,6 +188,7 @@ def __init__( n_estimators=n_estimators, n_trees=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, backend=backend, partial_mode=partial_mode, @@ -260,6 +265,7 @@ def __init__( n_estimators=2, n_trees=100, max_depth=None, + min_samples_split=2, min_samples_leaf=1, backend="custom", partial_mode=False, @@ -275,6 +281,7 @@ def __init__( n_estimators=n_estimators, n_trees=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, backend=backend, partial_mode=partial_mode, diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 6e8a44d..61457cf 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -36,6 +36,7 @@ def _build_classifier_predictor( n_estimators, n_outputs, max_depth=None, + min_samples_split=2, min_samples_leaf=1, n_jobs=None, random_state=None, @@ -54,6 +55,7 @@ def _build_classifier_predictor( criterion=criterion, n_estimators=n_estimators, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, n_jobs=n_jobs, random_state=random_state, @@ -120,6 +122,7 @@ def _build_regressor_predictor( n_estimators, n_outputs, max_depth=None, + min_samples_split=2, min_samples_leaf=1, n_jobs=None, random_state=None, @@ -138,6 +141,7 @@ def _build_regressor_predictor( criterion=criterion, n_estimators=n_estimators, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, n_jobs=n_jobs, random_state=random_state, @@ -226,6 +230,8 @@ def _build_regressor_predictor( The number of trees in each estimator. max_depth : :obj:`int`, default=None The maximum depth of each tree. ``None`` indicates no constraint. + min_samples_split : :obj:`int`, default=2 + The minimum number of samples required to split an internal node. min_samples_leaf : :obj:`int`, default=1 The minimum number of samples required to be at a leaf node. use_predictor : :obj:`bool`, default=False @@ -340,6 +346,8 @@ def _build_regressor_predictor( The number of trees in each estimator. max_depth : :obj:`int`, default=None The maximum depth of each tree. ``None`` indicates no constraint. + min_samples_split : :obj:`int`, default=2 + The minimum number of samples required to split an internal node. min_samples_leaf : :obj:`int`, default=1 The minimum number of samples required to be at a leaf node. use_predictor : :obj:`bool`, default=False @@ -469,6 +477,7 @@ def __init__( n_estimators=2, n_trees=100, max_depth=None, + min_samples_split=2, min_samples_leaf=1, use_predictor=False, predictor="forest", @@ -489,6 +498,7 @@ def __init__( self.n_estimators = n_estimators self.n_trees = n_trees self.max_depth = max_depth + self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.predictor_kwargs = predictor_kwargs self.backend = backend @@ -771,6 +781,7 @@ def fit(self, X, y, sample_weight=None): n_estimators=self.n_estimators, n_trees=self._set_n_trees(0), max_depth=self.max_depth, + min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, backend=self.backend, partial_mode=self.partial_mode, @@ -847,6 +858,7 @@ def fit(self, X, y, sample_weight=None): n_estimators=self.n_estimators, n_trees=self._set_n_trees(0), max_depth=self.max_depth, + min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, backend=self.backend, partial_mode=self.partial_mode, @@ -944,6 +956,7 @@ def fit(self, X, y, sample_weight=None): self.n_trees, self.n_outputs_, self.max_depth, + self.min_samples_split, self.min_samples_leaf, self.n_jobs, self.random_state, @@ -956,6 +969,7 @@ def fit(self, X, y, sample_weight=None): self.n_trees, self.n_outputs_, self.max_depth, + self.min_samples_split, self.min_samples_leaf, self.n_jobs, self.random_state, @@ -1313,6 +1327,7 @@ def __init__( n_estimators=2, n_trees=100, max_depth=None, + min_samples_split=2, min_samples_leaf=1, use_predictor=False, predictor="forest", @@ -1334,6 +1349,7 @@ def __init__( n_estimators=n_estimators, n_trees=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, use_predictor=use_predictor, predictor=predictor, @@ -1512,6 +1528,7 @@ def __init__( n_estimators=2, n_trees=100, max_depth=None, + min_samples_split=2, min_samples_leaf=1, use_predictor=False, predictor="forest", @@ -1533,6 +1550,7 @@ def __init__( n_estimators=n_estimators, n_trees=n_trees, max_depth=max_depth, + min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, use_predictor=use_predictor, predictor=predictor, diff --git a/tests/test_model_classifier.py b/tests/test_model_classifier.py index fdc8afe..2fa147e 100644 --- a/tests/test_model_classifier.py +++ b/tests/test_model_classifier.py @@ -28,6 +28,7 @@ "criterion": "gini", "n_trees": 100, "max_depth": 3, + "min_samples_split": 2, "min_samples_leaf": 1, "use_predictor": True, "predictor": "forest", @@ -47,6 +48,7 @@ "criterion": "gini", "n_trees": 100, "max_depth": None, + "min_samples_split": 2, "min_samples_leaf": 1, "use_predictor": True, "predictor": "forest", diff --git a/tests/test_model_regressor.py b/tests/test_model_regressor.py index c3b7887..ea98fcc 100644 --- a/tests/test_model_regressor.py +++ b/tests/test_model_regressor.py @@ -28,6 +28,7 @@ "n_estimators": 1, "n_trees": 100, "max_depth": 3, + "min_samples_split": 2, "min_samples_leaf": 1, "use_predictor": True, "predictor": "forest", @@ -47,6 +48,7 @@ "n_estimators": 2, "n_trees": 100, "max_depth": None, + "min_samples_split": 2, "min_samples_leaf": 1, "use_predictor": True, "predictor": "forest", From 872c43e658a2029d868b7d92f3b6c84e0827e275 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Wed, 28 Apr 2021 14:03:56 +0800 Subject: [PATCH 75/94] fix(doc): add docstrings for bin_type (#74) * Update cascade.py * Update CHANGELOG.rst --- CHANGELOG.rst | 1 + deepforest/cascade.py | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 7057351..86b3b39 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Fix| |API| add docstrings for parameter ``bin_type`` (`#74 `__) @xuyxu - |Feature| |API| recover the parameter ``min_samples_split`` (`#73 `__) @xuyxu - |Fix| fix the breakdown under the corner case where no internal node exists (`#70 `__) @xuyxu - |Feature| support python 3.9 (`#69 `__) @xuyxu diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 61457cf..480f13d 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -213,6 +213,12 @@ def _build_regressor_predictor( The number of samples used to construct feature discrete bins. If the size of training set is smaller than ``bin_subsample``, then all training samples will be used. + bin_type : :obj:`{"percentile", "interval"}`, default= :obj:`"percentile"` + The type of binner used to bin feature values into integer-valued bins. + + - If ``"percentile"``, each bin will have approximately the same + number of distinct feature values. + - If ``"interval"``, each bin will have approximately the same size. max_layers : :obj:`int`, default=20 The maximum number of cascade layers in the deep forest. Notice that the actual number of layers can be smaller than ``max_layers`` because @@ -329,6 +335,12 @@ def _build_regressor_predictor( The number of samples used to construct feature discrete bins. If the size of training set is smaller than ``bin_subsample``, then all training samples will be used. + bin_type : :obj:`{"percentile", "interval"}`, default= :obj:`"percentile"` + The type of binner used to bin feature values into integer-valued bins. + + - If ``"percentile"``, each bin will have approximately the same + number of distinct feature values. + - If ``"interval"``, each bin will have approximately the same size. max_layers : :obj:`int`, default=20 The maximum number of cascade layers in the deep forest. Notice that the actual number of layers can be smaller than ``max_layers`` because From 3ea296e914985897a1ef2f27c232b45347597603 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Wed, 28 Apr 2021 16:57:45 +0800 Subject: [PATCH 76/94] doc: update related parameters --- docs/advanced_topics/architecture.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/advanced_topics/architecture.rst b/docs/advanced_topics/architecture.rst index 16a8f1d..9140554 100644 --- a/docs/advanced_topics/architecture.rst +++ b/docs/advanced_topics/architecture.rst @@ -14,11 +14,11 @@ This section presents the meanings of key components in DF21, along with associa * :class:`Binner`: The class used to reduce the number of splitting candidates for building decision trees. - * :obj:`n_bins`, :obj:`bin_subsample` + * :obj:`n_bins`, :obj:`bin_subsample`, :obj:`bin_type` * :class:`Estimator`: Base estimators used in cascades layer of DF21. Default estimators are RandomForestClassifier and ExtraTreesClassifier. - * :obj:`n_trees`, :obj:`max_depth`, :obj:`min_samples_leaf`, :obj:`criterion`, :obj:`backend` + * :obj:`n_trees`, :obj:`max_depth`, :obj:`min_samples_split `, :obj:`min_samples_leaf`, :obj:`criterion`, :obj:`backend` * :class:`Layer`: The cascade layer of DF21, which consists of multiple estimators. From bba4b7a99d4d98369eae1fafc395c4e7315f15dc Mon Sep 17 00:00:00 2001 From: xuyxu Date: Wed, 28 Apr 2021 20:05:06 +0800 Subject: [PATCH 77/94] doc: update related parameters --- docs/advanced_topics/architecture.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/advanced_topics/architecture.rst b/docs/advanced_topics/architecture.rst index 9140554..191e065 100644 --- a/docs/advanced_topics/architecture.rst +++ b/docs/advanced_topics/architecture.rst @@ -18,7 +18,7 @@ This section presents the meanings of key components in DF21, along with associa * :class:`Estimator`: Base estimators used in cascades layer of DF21. Default estimators are RandomForestClassifier and ExtraTreesClassifier. - * :obj:`n_trees`, :obj:`max_depth`, :obj:`min_samples_split `, :obj:`min_samples_leaf`, :obj:`criterion`, :obj:`backend` + * :obj:`n_trees`, :obj:`max_depth`, :obj:`min_samples_split`, :obj:`min_samples_leaf`, :obj:`criterion`, :obj:`backend` * :class:`Layer`: The cascade layer of DF21, which consists of multiple estimators. From fb0e35e4f54ffe5018d3d404f3924e649c9281f9 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Thu, 29 Apr 2021 10:28:38 +0800 Subject: [PATCH 78/94] fix: missing functionality of `_set_n_trees` --- CHANGELOG.rst | 1 + deepforest/cascade.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 86b3b39..641c925 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Fix| fix missing functionality of :meth:`_set_n_trees` @xuyxu - |Fix| |API| add docstrings for parameter ``bin_type`` (`#74 `__) @xuyxu - |Feature| |API| recover the parameter ``min_samples_split`` (`#73 `__) @xuyxu - |Fix| fix the breakdown under the corner case where no internal node exists (`#70 `__) @xuyxu diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 480f13d..6e93f2d 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -868,7 +868,7 @@ def fit(self, X, y, sample_weight=None): n_outputs=self.n_outputs_, criterion=self.criterion, n_estimators=self.n_estimators, - n_trees=self._set_n_trees(0), + n_trees=self._set_n_trees(layer_idx), max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, From 3a4cde2a9b91e1124913073b99e88d6b4c42ba2e Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sat, 1 May 2021 15:49:55 +0800 Subject: [PATCH 79/94] doc(exp): add regression dataset wine --- docs/experiments.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/experiments.rst b/docs/experiments.rst index 803c2be..66d83d8 100644 --- a/docs/experiments.rst +++ b/docs/experiments.rst @@ -151,6 +151,8 @@ We have also collected four datasets on univariate regression for a comparison o +------------------+------------+-----------+------------+ | Name | # Training | # Testing | # Features | +==================+============+===========+============+ +| `wine`_ | 1,071 | 528 | 11 | ++------------------+------------+-----------+------------+ | `abalone`_ | 2,799 | 1,378 | 8 | +------------------+------------+-----------+------------+ | `cpusmall`_ | 5,489 | 2,703 | 12 | @@ -168,6 +170,8 @@ The table below shows the testing mean squared error of each method, with the be +----------+-----------+---------+-----------+----------+----------+-------------+ | Name | RF | HGBDT | XGB EXACT | XGB HIST | LightGBM | Deep Forest | +==========+===========+=========+===========+==========+==========+=============+ +| wine | 0.35 | 0.40 | 0.41 | 0.41 | 0.39 | **0.34** | ++----------+-----------+---------+-----------+----------+----------+-------------+ | abalone | 4.79 | 5.40 | 5.73 | 5.75 | 5.60 | **4.66** | +----------+-----------+---------+-----------+----------+----------+-------------+ | cpusmall | 8.31 | 9.01 | 9.86 | 11.82 | 8.99 | **7.15** | @@ -185,6 +189,8 @@ Runtime in seconds reported in the table below covers both the training stage an +----------+------+-------+-----------+----------+----------+-------------+ | Name | RF | HGBDT | XGB EXACT | XGB HIST | LightGBM | Deep Forest | +==========+======+=======+===========+==========+==========+=============+ +| wine | 0.76 | 2.88 | 0.30 | 0.30 | 0.30 | 1.26 | ++----------+------+-------+-----------+----------+----------+-------------+ | abalone | 0.53 | 1.57 | 0.47 | 0.50 | 0.17 | 1.29 | +----------+------+-------+-----------+----------+----------+-------------+ | cpusmall | 1.87 | 3.59 | 1.71 | 1.25 | 0.36 | 2.06 | @@ -228,6 +234,8 @@ Runtime in seconds reported in the table below covers both the training stage an .. _`fashion mnist`: https://keras.io/api/datasets/fashion_mnist/ +.. _`wine`: https://www.kaggle.com/uciml/red-wine-quality-cortez-et-al-2009 + .. _`abalone`: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html#abalone .. _`cpusmall`: https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html#cpusmall From d7127ec8999846fba79e453c639bd8607d96761a Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Sat, 8 May 2021 11:05:23 +0800 Subject: [PATCH 80/94] doc: add sphinx autosummary (#80) --- docs/api_reference.rst | 31 ++++++++++++++++++++++++++++++- docs/conf.py | 2 ++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/docs/api_reference.rst b/docs/api_reference.rst index aefe0ae..f371364 100644 --- a/docs/api_reference.rst +++ b/docs/api_reference.rst @@ -1,11 +1,26 @@ API Reference ============= -Below is the class and function reference for :mod:`deepforest`. Notice that the package is under active development, and some features may not be stable yet. +Below is the class and function reference for :mod:`deepforest`. Notice that the package is still under active development, and some features may not be stable yet. + +.. currentmodule:: deepforest.CascadeForestClassifier CascadeForestClassifier ----------------------- +.. autosummary:: + + fit + predict_proba + predict + clean + get_estimator + get_layer_feature_importances + load + save + set_estimator + set_predictor + .. autoclass:: deepforest.CascadeForestClassifier :members: :inherited-members: @@ -14,9 +29,23 @@ CascadeForestClassifier :exclude-members: set_params, get_params, score :member-order: bysource +.. currentmodule:: deepforest.CascadeForestRegressor + CascadeForestRegressor ----------------------- +.. autosummary:: + + fit + predict + clean + get_estimator + get_layer_feature_importances + load + save + set_estimator + set_predictor + .. autoclass:: deepforest.CascadeForestRegressor :members: :inherited-members: diff --git a/docs/conf.py b/docs/conf.py index 9f754fc..4e9ff5d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -49,6 +49,8 @@ "m2r2" ] +autosummary_generate = True + source_suffix = ['.rst', '.md'] autoapi_dirs = ['../deepforest'] From f9b54f0911bd9d37b09797f5ccce4e795c2731da Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Wed, 12 May 2021 11:16:26 +0800 Subject: [PATCH 81/94] doc: simplify docstrings for autosummary (#81) --- deepforest/cascade.py | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 6e93f2d..96e18a3 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -1028,8 +1028,7 @@ def fit(self, X, y, sample_weight=None): def set_estimator(self, estimators, n_splits=5): """ - Specify custom base estimators, which will override estimators used - by default. + Specify the custom base estimators for cascade layers. Parameters ---------- @@ -1079,8 +1078,7 @@ def set_estimator(self, estimators, n_splits=5): def set_predictor(self, predictor): """ - Specify the custom predictor, which will override the predictor - used by default. + Specify the custom predictor concatenated to deep forest. Parameters ---------- @@ -1114,9 +1112,7 @@ def set_predictor(self, predictor): def get_layer_feature_importances(self, layer_idx): """ - Return the impurity-based feature importances of the ``layer_idx``-th - cascade layer, defined as the average over feature importances from - all base estimators in the cascade layer. + Return the feature importances of ``layer_idx``-th cascade layer. Parameters ---------- @@ -1149,8 +1145,7 @@ def get_layer_feature_importances(self, layer_idx): def get_estimator(self, layer_idx, est_idx, estimator_type): """ - Get the `est_idx`-th estimator from the `layer_idx`-th cascade layer - in the deep forest. + Get estimator from a cascade layer in the deep forest. Parameters ---------- @@ -1217,7 +1212,7 @@ def get_estimator(self, layer_idx, est_idx, estimator_type): def save(self, dirname="model"): """ - Save the model to the specified directory. + Save the model to the directory ``dirname``. Parameters ---------- @@ -1268,7 +1263,7 @@ def save(self, dirname="model"): def load(self, dirname): """ - Load the model from the specified directory. + Load the model from the directory ``dirname``. Parameters ---------- @@ -1317,9 +1312,7 @@ def load(self, dirname): self.is_fitted_ = True def clean(self): - """ - Clean the buffer created by the model if ``partial_mode`` is True. - """ + """Clean the buffer created by the model.""" if self.partial_mode: self.buffer_.close() From 1d1097ca9068fd2ef8812e5b6d9d394970d3fb86 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Mon, 31 May 2021 20:07:48 +0800 Subject: [PATCH 82/94] doc: update README.rst --- README.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.rst b/README.rst index 25f5162..9a176d1 100644 --- a/README.rst +++ b/README.rst @@ -88,7 +88,7 @@ Resources --------- * `Documentation `__ -* Deep Forest: `[Paper] `__ +* Deep Forest: `[Conference] `__ | `[Journal] `__ * Keynote at AISTATS 2019: `[Slides] `__ Reference @@ -106,11 +106,11 @@ Reference year={2019}} @inproceedings{zhou2017deep, - Author = {Zhi-Hua Zhou and Ji Feng}, - Booktitle = {IJCAI}, - Pages = {3553-3559}, - Title = {{Deep Forest:} Towards an alternative to deep neural networks}, - Year = {2017}} + title = {{Deep Forest:} Towards an alternative to deep neural networks}, + author = {Zhi-Hua Zhou and Ji Feng}, + booktitle = {IJCAI}, + pages = {3553--3559}, + year = {2017}} Thanks to all our contributors ------------------------------ From 51a8e704cbde10adf3cde5c0ffd8442e82a3dbc0 Mon Sep 17 00:00:00 2001 From: Hao Lyu <20434183+IncubatorShokuhou@users.noreply.github.com> Date: Thu, 15 Jul 2021 21:29:38 +0800 Subject: [PATCH 83/94] fix: correct error type (#88) * fix raise error problem * fix CI --- deepforest/cascade.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 96e18a3..79d9df4 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -578,7 +578,7 @@ def _get_layer(self, layer_idx): "The layer index should be in the range [0, {}], but got {}" " instead." ) - raise ValueError(msg.format(self.n_layers_ - 1, layer_idx)) + raise IndexError(msg.format(self.n_layers_ - 1, layer_idx)) layer_key = "layer_{}".format(layer_idx) @@ -1598,7 +1598,7 @@ def _check_target_values(self, y): def _check_array_numeric(self, y): """Check the input numpy array y is all numeric.""" - numeric_types = np.typecodes['AllInteger'] + np.typecodes["AllFloat"] + numeric_types = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] if y.dtype.kind in numeric_types: return True else: From 640722dcc9dce8db7bdf231ebb739229b30b7117 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Thu, 15 Jul 2021 21:40:23 +0800 Subject: [PATCH 84/94] mnt: update unit tests --- tests/test_model_classifier.py | 2 +- tests/test_model_regressor.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_model_classifier.py b/tests/test_model_classifier.py index 2fa147e..10b57f9 100644 --- a/tests/test_model_classifier.py +++ b/tests/test_model_classifier.py @@ -91,7 +91,7 @@ def test_model_properties_after_fitting(): assert model[0] is model._get_layer(0) - with pytest.raises(ValueError) as excinfo: + with pytest.raises(IndexError) as excinfo: model._get_layer(model.n_layers_) assert "The layer index should be in the range" in str(excinfo.value) diff --git a/tests/test_model_regressor.py b/tests/test_model_regressor.py index ea98fcc..1149019 100644 --- a/tests/test_model_regressor.py +++ b/tests/test_model_regressor.py @@ -91,7 +91,7 @@ def test_model_properties_after_fitting(): assert model[0] is model._get_layer(0) - with pytest.raises(ValueError) as excinfo: + with pytest.raises(IndexError) as excinfo: model._get_layer(model.n_layers_) assert "The layer index should be in the range" in str(excinfo.value) From 91540a333d6933d56370dc3d7d31ba9cd0b45089 Mon Sep 17 00:00:00 2001 From: Hao Lyu <20434183+IncubatorShokuhou@users.noreply.github.com> Date: Tue, 20 Jul 2021 23:29:37 +0800 Subject: [PATCH 85/94] feat: add support for pandas.DataFrame and list (#86) * add support for pandas.DataFrame and list * remove check_array of y * format deepforest/cascade.py * add check_array for all the predict and fix method * fix annotation * add support for dataframe in y, change annotation * fix multi-output in check_X_y * update contributors Co-authored-by: xuyxu --- .all-contributorsrc | 9 +++++++ CONTRIBUTORS.md | 1 + deepforest/cascade.py | 62 +++++++++++++++++++++++++++++++++---------- 3 files changed, 58 insertions(+), 14 deletions(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index f7ce66c..f553bef 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -129,6 +129,15 @@ "contributions": [ "bug" ] + }, + { + "login": "IncubatorShokuhou", + "name": "Hao Lyu", + "avatar_url": "https://avatars.githubusercontent.com/u/20434183?v=4", + "profile": "https://github.com/IncubatorShokuhou", + "contributions": [ + "code" + ] } ], } diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index e546219..d78a316 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -10,6 +10,7 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d
Alex-Medium

💻 ⚠️
Dwaipayan Munshi

📖 +
Hao Lyu

💻
Joey Gao

💻
Mr-memorandum

🐛
NiMaZi

💻 ⚠️ diff --git a/deepforest/cascade.py b/deepforest/cascade.py index 79d9df4..686f311 100644 --- a/deepforest/cascade.py +++ b/deepforest/cascade.py @@ -3,23 +3,28 @@ __all__ = ["CascadeForestClassifier", "CascadeForestRegressor"] -import time import numbers -import numpy as np +import time from abc import ABCMeta, abstractmethod + +import numpy as np +from sklearn.base import ( + BaseEstimator, + ClassifierMixin, + RegressorMixin, + is_classifier, +) from sklearn.preprocessing import LabelEncoder +from sklearn.utils import check_array, check_X_y from sklearn.utils.multiclass import type_of_target -from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin -from sklearn.base import is_classifier -from . import _utils -from . import _io +from . import _io, _utils +from ._binner import Binner from ._layer import ( ClassificationCascadeLayer, - RegressionCascadeLayer, CustomCascadeLayer, + RegressionCascadeLayer, ) -from ._binner import Binner def _get_predictor_kwargs(predictor_kwargs, **kwargs) -> dict: @@ -315,7 +320,7 @@ def _build_regressor_predictor( Parameters ---------- - X : :obj:`numpy.ndarray` of shape (n_samples, n_features) + X : :obj: array-like of shape (n_samples, n_features) The training data. Internally, it will be converted to ``np.uint8``. y : :obj:`numpy.ndarray` of shape (n_samples,) @@ -436,7 +441,7 @@ def _build_regressor_predictor( Parameters ---------- - X : :obj:`numpy.ndarray` of shape (n_samples, n_features) + X : :obj: array-like of shape (n_samples, n_features) The training data. Internally, it will be converted to ``np.uint8``. y : :obj:`numpy.ndarray` of shape (n_samples,) or (n_samples, n_outputs) @@ -762,6 +767,14 @@ def n_aug_features_(self): # flake8: noqa: E501 def fit(self, X, y, sample_weight=None): + X, y = check_X_y( + X, + y, + multi_output=True + if type_of_target(y) + in ("continuous-multioutput", "multiclass-multioutput") + else False, + ) self._check_input(X, y) self._validate_params() @@ -1411,7 +1424,14 @@ def _repr_performance(self, pivot): """Build a deep forest using the training data.""", "classifier_fit" ) def fit(self, X, y, sample_weight=None): - + X, y = check_X_y( + X, + y, + multi_output=True + if type_of_target(y) + in ("continuous-multioutput", "multiclass-multioutput") + else False, + ) # Check the input for classification y = self._encode_class_labels(y) @@ -1423,7 +1443,7 @@ def predict_proba(self, X): Parameters ---------- - X : :obj:`numpy.ndarray` of shape (n_samples, n_features) + X : :obj: array-like of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``np.uint8``. @@ -1432,6 +1452,8 @@ def predict_proba(self, X): proba : :obj:`numpy.ndarray` of shape (n_samples, n_classes) The class probabilities of the input samples. """ + X = check_array(X) + if not self.is_fitted_: raise AttributeError("Please fit the model first.") self._check_input(X) @@ -1505,7 +1527,7 @@ def predict(self, X): Parameters ---------- - X : :obj:`numpy.ndarray` of shape (n_samples, n_features) + X : :obj: array-like of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``np.uint8``. @@ -1514,6 +1536,8 @@ def predict(self, X): y : :obj:`numpy.ndarray` of shape (n_samples,) The predicted classes. """ + X = check_array(X) + proba = self.predict_proba(X) y = self._decode_class_labels(np.argmax(proba, axis=1)) return y @@ -1612,6 +1636,14 @@ def _repr_performance(self, pivot): """Build a deep forest using the training data.""", "regressor_fit" ) def fit(self, X, y, sample_weight=None): + X, y = check_X_y( + X, + y, + multi_output=True + if type_of_target(y) + in ("continuous-multioutput", "multiclass-multioutput") + else False, + ) # Check the input for regression self._check_target_values(y) @@ -1624,7 +1656,7 @@ def predict(self, X): Parameters ---------- - X : :obj:`numpy.ndarray` of shape (n_samples, n_features) + X : :obj: array-like of shape (n_samples, n_features) The input samples. Internally, its dtype will be converted to ``np.uint8``. @@ -1633,6 +1665,8 @@ def predict(self, X): y : :obj:`numpy.ndarray` of shape (n_samples,) or (n_samples, n_outputs) The predicted values. """ + X = check_array(X) + if not self.is_fitted_: raise AttributeError("Please fit the model first.") self._check_input(X) From 8335bdaf8abdc4ec52a708d4a92ff811cfd30b1a Mon Sep 17 00:00:00 2001 From: xuyxu Date: Fri, 23 Jul 2021 23:17:30 +0800 Subject: [PATCH 86/94] doc: update CHANGELOG.rst --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 641c925..2ec2629 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| |API| add support on :obj:`pandas.DataFrame` for ``X`` and ``y`` (`#86 `__) @IncubatorShokuhou - |Fix| fix missing functionality of :meth:`_set_n_trees` @xuyxu - |Fix| |API| add docstrings for parameter ``bin_type`` (`#74 `__) @xuyxu - |Feature| |API| recover the parameter ``min_samples_split`` (`#73 `__) @xuyxu From 689cee38ce18df9cc5d568b9d6da22d18dd7af11 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Thu, 12 May 2022 21:47:12 +0800 Subject: [PATCH 87/94] doc: update README.rst --- README.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.rst b/README.rst index 9a176d1..bb3db5f 100644 --- a/README.rst +++ b/README.rst @@ -32,6 +32,8 @@ DF21 offers an effective & powerful option to the tree-based machine learning al For a quick start, please refer to `How to Get Started `__. For a detailed guidance on parameter tunning, please refer to `Parameters Tunning `__. +DF21 is optimized for what a tree-based ensemble excels at (i.e., tabular data), if you want to use the multi-grained scanning part to better handle structured data like images, please refer to the `origin implementation `__ for details. + Installation ------------ From bdc07326581e14b0e46c68d87c580463de1b71ba Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sat, 14 May 2022 14:49:26 +0800 Subject: [PATCH 88/94] mnt: pin click version for build tools --- build_tools/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/build_tools/requirements.txt b/build_tools/requirements.txt index c0f047e..6fb9f1a 100644 --- a/build_tools/requirements.txt +++ b/build_tools/requirements.txt @@ -2,6 +2,7 @@ pytest pre-commit black==20.8b1 +click==8.0.3 flake8==3.8.4 pytest-cov lightgbm From d51d74ffeb40c1586046ed82cf80ff0aa644ab2f Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Sat, 17 Sep 2022 22:03:40 +0800 Subject: [PATCH 89/94] fix: drop support on py36 (#115) * Update CHANGELOG.rst * remove `_joblib_parallel_args` * update dependency * update code * update doc --- .github/workflows/build-and-test.yml | 2 +- CHANGELOG.rst | 1 + deepforest/forest.py | 16 ++++------------ docs/installation_guide.rst | 4 ---- docs/requirements.txt | 4 +++- requirements.txt | 6 +++--- 6 files changed, 12 insertions(+), 21 deletions(-) diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index c27d57d..9757219 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -12,7 +12,7 @@ jobs: strategy: matrix: os: [ubuntu-latest, windows-latest, macos-latest] - python-version: [3.6, 3.7, 3.8, 3.9] + python-version: [3.7, 3.8, 3.9] steps: - uses: actions/checkout@v2 - name: Set up Python diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2ec2629..3af9ee1 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Version 0.1.* .. |Fix| replace:: :raw-html:`Fix` :raw-latex:`{\small\sc [Fix]}` .. |API| replace:: :raw-html:`API Change` :raw-latex:`{\small\sc [API Change]}` +- |Feature| support the latest version of scikit-learn and drop support on python 3.6 (`#115 `__) @xuyxu - |Feature| |API| add support on :obj:`pandas.DataFrame` for ``X`` and ``y`` (`#86 `__) @IncubatorShokuhou - |Fix| fix missing functionality of :meth:`_set_n_trees` @xuyxu - |Fix| |API| add docstrings for parameter ``bin_type`` (`#74 `__) @xuyxu diff --git a/deepforest/forest.py b/deepforest/forest.py index 2b039c6..90eb02c 100644 --- a/deepforest/forest.py +++ b/deepforest/forest.py @@ -31,7 +31,6 @@ from sklearn.base import ClassifierMixin, RegressorMixin, MultiOutputMixin from sklearn.utils import check_random_state, compute_sample_weight from sklearn.exceptions import DataConversionWarning -from sklearn.utils.fixes import _joblib_parallel_args from sklearn.utils.validation import check_is_fitted, _check_sample_weight from sklearn.utils.validation import _deprecate_positional_args @@ -463,7 +462,8 @@ def fit(self, X, y, sample_weight=None): rets = Parallel( n_jobs=n_jobs, verbose=self.verbose, - **_joblib_parallel_args(prefer="threads", require="sharedmem") + prefer="threads", + require="sharedmem", )( delayed(_parallel_build_trees)( t, @@ -609,11 +609,7 @@ def predict_proba(self, X): for j in np.atleast_1d(self.n_classes_) ] lock = threading.Lock() - Parallel( - n_jobs=n_jobs, - verbose=self.verbose, - **_joblib_parallel_args(require="sharedmem") - )( + Parallel(n_jobs=n_jobs, verbose=self.verbose, require="sharedmem",)( delayed(_accumulate_prediction)( self.features[i], self.thresholds[i], @@ -796,11 +792,7 @@ def predict(self, X): # Parallel loop lock = threading.Lock() - Parallel( - n_jobs=n_jobs, - verbose=self.verbose, - **_joblib_parallel_args(require="sharedmem") - )( + Parallel(n_jobs=n_jobs, verbose=self.verbose, require="sharedmem",)( delayed(_accumulate_prediction)( self.features[i], self.thresholds[i], diff --git a/docs/installation_guide.rst b/docs/installation_guide.rst index 850c0ae..1a2659f 100644 --- a/docs/installation_guide.rst +++ b/docs/installation_guide.rst @@ -52,10 +52,6 @@ Building from source is required to work on a contribution (bug fix, new feature $ cd tests % pytest -.. warning:: - - **[Jan 31, 2021]** The Numpy developers have released the version 1.20.0 of `Numpy `__, which makes many changes on the C-APIs, and can be incompatible with those used in the package. You are at your own risks to build the package from source with the version 1.20.0 of Numpy installed. - Acknowledgement --------------- diff --git a/docs/requirements.txt b/docs/requirements.txt index 2dbaa5f..803df12 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -2,4 +2,6 @@ sphinx==3.1.2 sphinx_rtd_theme==0.5.0 sphinx-panels==0.5.* sphinx-copybutton -m2r2==0.2.7 \ No newline at end of file +m2r2==0.2.7 +jinja2<3.1.0 +mistune==0.8.4 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 74fb246..749d1ee 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -numpy>=1.16.0,<1.20.0 -scipy>=0.19.1 +numpy>=1.14.6 +scipy>=1.1.0 joblib>=0.11 -scikit-learn>=0.22 \ No newline at end of file +scikit-learn>=1.0 \ No newline at end of file From f6606e4febc0c368b8e23645ac00d96f6e52c145 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Sat, 17 Sep 2022 22:43:24 +0800 Subject: [PATCH 90/94] mnt: bump version 0.1.6 (#116) * Update CHANGELOG.rst * remove `_joblib_parallel_args` * update dependency * update code * update doc * bump version --- LICENSE | 4 ++-- pyproject.toml | 4 ++-- setup.py | 11 +++++------ 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/LICENSE b/LICENSE index 4997f31..afb43bd 100644 --- a/LICENSE +++ b/LICENSE @@ -1,8 +1,8 @@ COPYRIGHT ========= -Copyright (c) 2021 LAMDA (http://www.lamda.nju.edu.cn), Nanjing University, China -All rights reserved. +Copyright (c) 2021-2023 LAMDA (http://www.lamda.nju.edu.cn), Nanjing +University, China All rights reserved. LICENSE ======= diff --git a/pyproject.toml b/pyproject.toml index 80498c5..a0093c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,8 +3,8 @@ requires = [ "setuptools>=42", "wheel", "Cython>=0.28.5", - "numpy>=1.16.0,<1.20.0", - "scipy>=0.19.1" + "numpy>=1.14.6", + "scipy>=1.1.0" ] [tool.black] line-length = 79 diff --git a/setup.py b/setup.py index 9c9081a..65e0d49 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ MAINTAINER = "Yi-Xuan Xu" MAINTAINER_EMAIL = "xuyx@lamda.nju.edu.cn" URL = "https://github.com/LAMDA-NJU/Deep-Forest" -VERSION = "0.1.5" +VERSION = "0.1.6" def configuration(parent_package="", top_path=None): @@ -56,17 +56,16 @@ def configuration(parent_package="", top_path=None): "Topic :: Scientific/Engineering", "Operating System :: Microsoft :: Windows", "Operating System :: Unix", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", ], - python_requires=">=3.6", + python_requires=">=3.7", install_requires=[ - "numpy>=1.16.0,<1.20.0", - "scipy>=0.19.1", + "numpy>=1.14.6", + "scipy>=1.1.0", "joblib>=0.11", - "scikit-learn>=0.22", + "scikit-learn>=1.0", ], setup_requires=["cython"], ) From eafa6ac1137948079b66b5c04d5bd19f2eb7b13b Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Sat, 17 Sep 2022 22:52:09 +0800 Subject: [PATCH 91/94] mnt: drop support on py36 --- .github/workflows/build-wheels.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-wheels.yml b/.github/workflows/build-wheels.yml index 260862d..14a8ff7 100644 --- a/.github/workflows/build-wheels.yml +++ b/.github/workflows/build-wheels.yml @@ -31,7 +31,7 @@ jobs: CIBW_ARCHS_WINDOWS: "AMD64" CIBW_ARCHS_MACOS: "x86_64" CIBW_BUILD: cp3*-macosx_x86_64 cp3*-win_amd64 cp3*-manylinux_x86_64 cp3*-manylinux_aarch64 - CIBW_SKIP: cp35-* + CIBW_SKIP: cp35-* cp36-* - name: Store artifacts uses: actions/upload-artifact@v2 with: From 851c854d8e99d10db77f9b8fbca71053cbf05b67 Mon Sep 17 00:00:00 2001 From: Yi-Xuan Xu Date: Sun, 18 Sep 2022 23:05:37 +0800 Subject: [PATCH 92/94] mnt: fix wheel build environment (#117) --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a0093c2..e72d738 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,10 @@ [build-system] requires = [ - "setuptools>=42", + "setuptools<60.0", "wheel", "Cython>=0.28.5", - "numpy>=1.14.6", - "scipy>=1.1.0" + "oldest-supported-numpy", + "scipy>=1.3.2", ] [tool.black] line-length = 79 From bcd625775f46b37f74db3dbe06662a4e7b9aaf90 Mon Sep 17 00:00:00 2001 From: xuyxu Date: Sat, 1 Oct 2022 11:16:30 +0800 Subject: [PATCH 93/94] mnt: bump version 0.1.7 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 65e0d49..90158c6 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ MAINTAINER = "Yi-Xuan Xu" MAINTAINER_EMAIL = "xuyx@lamda.nju.edu.cn" URL = "https://github.com/LAMDA-NJU/Deep-Forest" -VERSION = "0.1.6" +VERSION = "0.1.7" def configuration(parent_package="", top_path=None): From 96e762b91a9205d9980b7013ea854392e5556ab8 Mon Sep 17 00:00:00 2001 From: David Pratella <36034699+dprat@users.noreply.github.com> Date: Thu, 30 Mar 2023 16:47:07 +0200 Subject: [PATCH 94/94] fix: correct `np.int` to `int` for numpy compatibility (#123) Co-authored-by: David Pratella --- deepforest/forest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deepforest/forest.py b/deepforest/forest.py index 90eb02c..a917729 100644 --- a/deepforest/forest.py +++ b/deepforest/forest.py @@ -200,7 +200,7 @@ def _partition_estimators(n_estimators, n_jobs): # Partition estimators between jobs n_estimators_per_job = np.full( - n_jobs, n_estimators // n_jobs, dtype=np.int + n_jobs, n_estimators // n_jobs, dtype=int ) n_estimators_per_job[: n_estimators % n_jobs] += 1 starts = np.cumsum(n_estimators_per_job) @@ -563,7 +563,7 @@ def _validate_y_class_weight(self, y): self.classes_ = [] self.n_classes_ = [] - y_store_unique_indices = np.zeros(y.shape, dtype=np.int) + y_store_unique_indices = np.zeros(y.shape, dtype=int) for k in range(self.n_outputs_): classes_k, y_store_unique_indices[:, k] = np.unique( y[:, k], return_inverse=True