From 1d3564becbb6356a5a20d6d5006cf82f12560d9a Mon Sep 17 00:00:00 2001 From: Vadim Levin Date: Tue, 13 Dec 2022 23:25:34 +0300 Subject: [PATCH 1/6] config: timm version bump 0.4.12 -> 0.6.12 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f645db4d..aa55ff5f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ torchvision>=0.5.0 pretrainedmodels==0.7.4 efficientnet-pytorch==0.7.1 -timm==0.4.12 +timm==0.6.12 tqdm pillow From aca487d2b9062bc0ed1e5f9536ee4f0c1f597252 Mon Sep 17 00:00:00 2001 From: Vadim Levin Date: Tue, 13 Dec 2022 23:47:22 +0300 Subject: [PATCH 2/6] fix: timm based EfficientNetBaseEncoder after timm upgrade to 0.6.12 EfficientNet act1 layer in timm is merged with bn1 layer after ab49d275de8a9c344aea086fd86d04c4cabb6098 commit --- segmentation_models_pytorch/encoders/timm_efficientnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/segmentation_models_pytorch/encoders/timm_efficientnet.py b/segmentation_models_pytorch/encoders/timm_efficientnet.py index 2358daf0..1af1824d 100644 --- a/segmentation_models_pytorch/encoders/timm_efficientnet.py +++ b/segmentation_models_pytorch/encoders/timm_efficientnet.py @@ -105,7 +105,7 @@ def __init__(self, stage_idxs, out_channels, depth=5, **kwargs): def get_stages(self): return [ nn.Identity(), - nn.Sequential(self.conv_stem, self.bn1, self.act1), + nn.Sequential(self.conv_stem, self.bn1), self.blocks[: self._stage_idxs[0]], self.blocks[self._stage_idxs[0] : self._stage_idxs[1]], self.blocks[self._stage_idxs[1] : self._stage_idxs[2]], From 9549ca96734791451faf3da3460017712919316f Mon Sep 17 00:00:00 2001 From: Vadim Levin Date: Tue, 13 Dec 2022 23:58:53 +0300 Subject: [PATCH 3/6] fix: use "public" API for timm models availability check --- segmentation_models_pytorch/encoders/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/segmentation_models_pytorch/encoders/__init__.py b/segmentation_models_pytorch/encoders/__init__.py index 3a40be56..0e1bffd2 100644 --- a/segmentation_models_pytorch/encoders/__init__.py +++ b/segmentation_models_pytorch/encoders/__init__.py @@ -97,9 +97,9 @@ def get_preprocessing_params(encoder_name, pretrained="imagenet"): if encoder_name.startswith("tu-"): encoder_name = encoder_name[3:] - if encoder_name not in timm.models.registry._model_has_pretrained: + if not timm.models.is_model_pretrained(encoder_name): raise ValueError(f"{encoder_name} does not have pretrained weights and preprocessing parameters") - settings = timm.models.registry._model_default_cfgs[encoder_name] + settings = timm.models.get_pretrained_cfg(encoder_name) else: all_settings = encoders[encoder_name]["pretrained_settings"] if pretrained not in all_settings.keys(): From c4f02e287040f675ea8137fad401469a113859e8 Mon Sep 17 00:00:00 2001 From: Vadim Levin Date: Wed, 14 Dec 2022 00:08:34 +0300 Subject: [PATCH 4/6] fix: timm SkNetEncoder after timm upgrade to 0.6.12 `zero_init_last_bn` was renamed to `zero_init_last` in 372ad5fa0dbeb74dcec81db06e9ff69b3d5a2eb6 commit --- segmentation_models_pytorch/encoders/timm_sknet.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/segmentation_models_pytorch/encoders/timm_sknet.py b/segmentation_models_pytorch/encoders/timm_sknet.py index 9969c90a..0b5e4776 100644 --- a/segmentation_models_pytorch/encoders/timm_sknet.py +++ b/segmentation_models_pytorch/encoders/timm_sknet.py @@ -73,7 +73,7 @@ def load_state_dict(self, state_dict, **kwargs): "out_channels": (3, 64, 64, 128, 256, 512), "block": SelectiveKernelBasic, "layers": [2, 2, 2, 2], - "zero_init_last_bn": False, + "zero_init_last": False, "block_args": {"sk_kwargs": {"rd_ratio": 1 / 8, "split_input": True}}, }, }, @@ -84,7 +84,7 @@ def load_state_dict(self, state_dict, **kwargs): "out_channels": (3, 64, 64, 128, 256, 512), "block": SelectiveKernelBasic, "layers": [3, 4, 6, 3], - "zero_init_last_bn": False, + "zero_init_last": False, "block_args": {"sk_kwargs": {"rd_ratio": 1 / 8, "split_input": True}}, }, }, @@ -95,7 +95,7 @@ def load_state_dict(self, state_dict, **kwargs): "out_channels": (3, 64, 256, 512, 1024, 2048), "block": SelectiveKernelBottleneck, "layers": [3, 4, 6, 3], - "zero_init_last_bn": False, + "zero_init_last": False, "cardinality": 32, "base_width": 4, }, From 5e15774cd98588ba6a9c4362bc47bada8531ad3c Mon Sep 17 00:00:00 2001 From: Vadim Levin Date: Wed, 14 Dec 2022 00:42:32 +0300 Subject: [PATCH 5/6] fix: timm RegNet encoder after timm upgrade to 0.6.12 Instead of plain dict config RegNet uses RegNetCfg dataclass. Dataclasses module were added in Python 3.7, thats why min required Python version for package is also increased. --- pyproject.toml | 4 +- .../encoders/timm_regnet.py | 51 ++++++++++--------- setup.py | 2 +- 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7cd083f0..cca7d480 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.black] line-length = 119 -target-version = ['py36', 'py37', 'py38'] +target-version = ['py37', 'py38'] include = '\.pyi?$' exclude = ''' /( @@ -16,4 +16,4 @@ exclude = ''' | build | dist )/ -''' \ No newline at end of file +''' diff --git a/segmentation_models_pytorch/encoders/timm_regnet.py b/segmentation_models_pytorch/encoders/timm_regnet.py index 1c11c586..e7b37997 100644 --- a/segmentation_models_pytorch/encoders/timm_regnet.py +++ b/segmentation_models_pytorch/encoders/timm_regnet.py @@ -1,10 +1,11 @@ from ._base import EncoderMixin -from timm.models.regnet import RegNet +from timm.models.regnet import RegNet, RegNetCfg import torch.nn as nn class RegNetEncoder(RegNet, EncoderMixin): def __init__(self, out_channels, depth=5, **kwargs): + kwargs["cfg"] = RegNetCfg(**kwargs["cfg"]) super().__init__(**kwargs) self._depth = depth self._out_channels = out_channels @@ -141,7 +142,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_002"], "params": { "out_channels": (3, 32, 24, 56, 152, 368), - "cfg": _mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13), + "cfg": _mcfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13), }, }, "timm-regnetx_004": { @@ -149,7 +150,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_004"], "params": { "out_channels": (3, 32, 32, 64, 160, 384), - "cfg": _mcfg(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22), + "cfg": _mcfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22), }, }, "timm-regnetx_006": { @@ -157,7 +158,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_006"], "params": { "out_channels": (3, 32, 48, 96, 240, 528), - "cfg": _mcfg(w0=48, wa=36.97, wm=2.24, group_w=24, depth=16), + "cfg": _mcfg(w0=48, wa=36.97, wm=2.24, group_size=24, depth=16), }, }, "timm-regnetx_008": { @@ -165,7 +166,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_008"], "params": { "out_channels": (3, 32, 64, 128, 288, 672), - "cfg": _mcfg(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16), + "cfg": _mcfg(w0=56, wa=35.73, wm=2.28, group_size=16, depth=16), }, }, "timm-regnetx_016": { @@ -173,7 +174,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_016"], "params": { "out_channels": (3, 32, 72, 168, 408, 912), - "cfg": _mcfg(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18), + "cfg": _mcfg(w0=80, wa=34.01, wm=2.25, group_size=24, depth=18), }, }, "timm-regnetx_032": { @@ -181,7 +182,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_032"], "params": { "out_channels": (3, 32, 96, 192, 432, 1008), - "cfg": _mcfg(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25), + "cfg": _mcfg(w0=88, wa=26.31, wm=2.25, group_size=48, depth=25), }, }, "timm-regnetx_040": { @@ -189,7 +190,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_040"], "params": { "out_channels": (3, 32, 80, 240, 560, 1360), - "cfg": _mcfg(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23), + "cfg": _mcfg(w0=96, wa=38.65, wm=2.43, group_size=40, depth=23), }, }, "timm-regnetx_064": { @@ -197,7 +198,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_064"], "params": { "out_channels": (3, 32, 168, 392, 784, 1624), - "cfg": _mcfg(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17), + "cfg": _mcfg(w0=184, wa=60.83, wm=2.07, group_size=56, depth=17), }, }, "timm-regnetx_080": { @@ -205,7 +206,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_080"], "params": { "out_channels": (3, 32, 80, 240, 720, 1920), - "cfg": _mcfg(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23), + "cfg": _mcfg(w0=80, wa=49.56, wm=2.88, group_size=120, depth=23), }, }, "timm-regnetx_120": { @@ -213,7 +214,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_120"], "params": { "out_channels": (3, 32, 224, 448, 896, 2240), - "cfg": _mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19), + "cfg": _mcfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19), }, }, "timm-regnetx_160": { @@ -221,7 +222,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_160"], "params": { "out_channels": (3, 32, 256, 512, 896, 2048), - "cfg": _mcfg(w0=216, wa=55.59, wm=2.1, group_w=128, depth=22), + "cfg": _mcfg(w0=216, wa=55.59, wm=2.1, group_size=128, depth=22), }, }, "timm-regnetx_320": { @@ -229,7 +230,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnetx_320"], "params": { "out_channels": (3, 32, 336, 672, 1344, 2520), - "cfg": _mcfg(w0=320, wa=69.86, wm=2.0, group_w=168, depth=23), + "cfg": _mcfg(w0=320, wa=69.86, wm=2.0, group_size=168, depth=23), }, }, # regnety @@ -238,7 +239,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_002"], "params": { "out_channels": (3, 32, 24, 56, 152, 368), - "cfg": _mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13, se_ratio=0.25), + "cfg": _mcfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13, se_ratio=0.25), }, }, "timm-regnety_004": { @@ -246,7 +247,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_004"], "params": { "out_channels": (3, 32, 48, 104, 208, 440), - "cfg": _mcfg(w0=48, wa=27.89, wm=2.09, group_w=8, depth=16, se_ratio=0.25), + "cfg": _mcfg(w0=48, wa=27.89, wm=2.09, group_size=8, depth=16, se_ratio=0.25), }, }, "timm-regnety_006": { @@ -254,7 +255,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_006"], "params": { "out_channels": (3, 32, 48, 112, 256, 608), - "cfg": _mcfg(w0=48, wa=32.54, wm=2.32, group_w=16, depth=15, se_ratio=0.25), + "cfg": _mcfg(w0=48, wa=32.54, wm=2.32, group_size=16, depth=15, se_ratio=0.25), }, }, "timm-regnety_008": { @@ -262,7 +263,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_008"], "params": { "out_channels": (3, 32, 64, 128, 320, 768), - "cfg": _mcfg(w0=56, wa=38.84, wm=2.4, group_w=16, depth=14, se_ratio=0.25), + "cfg": _mcfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25), }, }, "timm-regnety_016": { @@ -270,7 +271,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_016"], "params": { "out_channels": (3, 32, 48, 120, 336, 888), - "cfg": _mcfg(w0=48, wa=20.71, wm=2.65, group_w=24, depth=27, se_ratio=0.25), + "cfg": _mcfg(w0=48, wa=20.71, wm=2.65, group_size=24, depth=27, se_ratio=0.25), }, }, "timm-regnety_032": { @@ -278,7 +279,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_032"], "params": { "out_channels": (3, 32, 72, 216, 576, 1512), - "cfg": _mcfg(w0=80, wa=42.63, wm=2.66, group_w=24, depth=21, se_ratio=0.25), + "cfg": _mcfg(w0=80, wa=42.63, wm=2.66, group_size=24, depth=21, se_ratio=0.25), }, }, "timm-regnety_040": { @@ -286,7 +287,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_040"], "params": { "out_channels": (3, 32, 128, 192, 512, 1088), - "cfg": _mcfg(w0=96, wa=31.41, wm=2.24, group_w=64, depth=22, se_ratio=0.25), + "cfg": _mcfg(w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25), }, }, "timm-regnety_064": { @@ -294,7 +295,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_064"], "params": { "out_channels": (3, 32, 144, 288, 576, 1296), - "cfg": _mcfg(w0=112, wa=33.22, wm=2.27, group_w=72, depth=25, se_ratio=0.25), + "cfg": _mcfg(w0=112, wa=33.22, wm=2.27, group_size=72, depth=25, se_ratio=0.25), }, }, "timm-regnety_080": { @@ -302,7 +303,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_080"], "params": { "out_channels": (3, 32, 168, 448, 896, 2016), - "cfg": _mcfg(w0=192, wa=76.82, wm=2.19, group_w=56, depth=17, se_ratio=0.25), + "cfg": _mcfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25), }, }, "timm-regnety_120": { @@ -310,7 +311,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_120"], "params": { "out_channels": (3, 32, 224, 448, 896, 2240), - "cfg": _mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, se_ratio=0.25), + "cfg": _mcfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19, se_ratio=0.25), }, }, "timm-regnety_160": { @@ -318,7 +319,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_160"], "params": { "out_channels": (3, 32, 224, 448, 1232, 3024), - "cfg": _mcfg(w0=200, wa=106.23, wm=2.48, group_w=112, depth=18, se_ratio=0.25), + "cfg": _mcfg(w0=200, wa=106.23, wm=2.48, group_size=112, depth=18, se_ratio=0.25), }, }, "timm-regnety_320": { @@ -326,7 +327,7 @@ def _mcfg(**kwargs): "pretrained_settings": pretrained_settings["timm-regnety_320"], "params": { "out_channels": (3, 32, 232, 696, 1392, 3712), - "cfg": _mcfg(w0=232, wa=115.89, wm=2.53, group_w=232, depth=20, se_ratio=0.25), + "cfg": _mcfg(w0=232, wa=115.89, wm=2.53, group_size=232, depth=20, se_ratio=0.25), }, }, } diff --git a/setup.py b/setup.py index a472352f..ae50ef0d 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ URL = "https://github.com/qubvel/segmentation_models.pytorch" EMAIL = "qubvel@gmail.com" AUTHOR = "Pavel Iakubovskii" -REQUIRES_PYTHON = ">=3.6.0" +REQUIRES_PYTHON = ">=3.7.0" VERSION = None # The rest you shouldn't have to touch too much :) From a545e6fb5d1e1fa6915923aeff2dcf282a29d277 Mon Sep 17 00:00:00 2001 From: Vadim Levin Date: Wed, 14 Dec 2022 12:35:35 +0300 Subject: [PATCH 6/6] feat: bump Python version used in Github Actions to 3.7 --- .github/workflows/pypi.yml | 2 +- .github/workflows/tests.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml index 496bb7b4..f7bc352d 100644 --- a/.github/workflows/pypi.yml +++ b/.github/workflows/pypi.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: '3.6' + python-version: '3.7' - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3821b358..e5ab2adc 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -18,7 +18,7 @@ jobs: - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: - python-version: 3.6 + python-version: 3.7 - name: Install dependencies run: | python -m pip install --upgrade pip