Skip to content

Commit d84c08e

Browse files
LysandreJikmfuntowicz
authored andcommitted
Solve circular dependency & __main__
1 parent e2d4895 commit d84c08e

26 files changed

+420
-408
lines changed

src/transformers/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@
6060

6161
# Base objects, independent of any specific backend
6262
_import_structure = {
63-
"configuration_utils": ["PretrainedConfig", "OnnxConfig", "OnnxVariable"],
63+
"configuration_utils": ["OnnxConfig", "OnnxVariable", "PretrainedConfig"],
6464
"data": [
6565
"DataProcessor",
6666
"InputExample",
@@ -1640,7 +1640,7 @@
16401640
# Direct imports for type-checking
16411641
if TYPE_CHECKING:
16421642
# Configuration
1643-
from .configuration_utils import PretrainedConfig, OnnxConfig, OnnxVariable
1643+
from .configuration_utils import OnnxConfig, OnnxVariable, PretrainedConfig
16441644

16451645
# Data
16461646
from .data import (

src/transformers/configuration_utils.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,7 @@
1919
import copy
2020
import json
2121
import os
22-
from dataclasses import dataclass
23-
from typing import Any, Dict, Tuple, Union, Optional, Set, List, NamedTuple
22+
from typing import Any, Dict, Tuple, Union
2423

2524
from . import __version__
2625
from .file_utils import CONFIG_NAME, PushToHubMixin, cached_path, hf_bucket_url, is_offline_mode, is_remote_url

src/transformers/models/albert/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828

2929

3030
_import_structure = {
31-
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ALBERT_ONNX_CONFIG", "AlbertConfig"],
31+
"configuration_albert": ["ALBERT_ONNX_CONFIG", "ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig"],
3232
}
3333

3434
if is_sentencepiece_available():
@@ -67,7 +67,7 @@
6767

6868

6969
if TYPE_CHECKING:
70-
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ALBERT_ONNX_CONFIG, AlbertConfig
70+
from .configuration_albert import ALBERT_ONNX_CONFIG, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig
7171

7272
if is_sentencepiece_available():
7373
from .tokenization_albert import AlbertTokenizer

src/transformers/models/albert/configuration_albert.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from ...configuration_utils import PretrainedConfig
1919
from ...onnx import OnnxConfig, OnnxVariable
2020

21+
2122
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
2223
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
2324
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
@@ -177,8 +178,5 @@ def __init__(
177178
"enable_bias_gelu": True,
178179
"enable_gelu_approximation": False,
179180
},
180-
optimizer_additional_args={
181-
"num_heads": "$config.num_attention_heads",
182-
"hidden_size": "$config.hidden_size"
183-
}
184-
)
181+
optimizer_additional_args={"num_heads": "$config.num_attention_heads", "hidden_size": "$config.hidden_size"},
182+
)

src/transformers/models/bart/__init__.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,12 @@
2727

2828

2929
_import_structure = {
30-
"configuration_bart": ["BART_PRETRAINED_CONFIG_ARCHIVE_MAP", "BartConfig", "BART_ONNX_CONFIG", "BART_ONNX_CONFIG_WITH_PAST"],
30+
"configuration_bart": [
31+
"BART_ONNX_CONFIG",
32+
"BART_ONNX_CONFIG_WITH_PAST",
33+
"BART_PRETRAINED_CONFIG_ARCHIVE_MAP",
34+
"BartConfig",
35+
],
3136
"tokenization_bart": ["BartTokenizer"],
3237
}
3338

@@ -59,7 +64,12 @@
5964
]
6065

6166
if TYPE_CHECKING:
62-
from .configuration_bart import BART_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_ONNX_CONFIG, BART_ONNX_CONFIG_WITH_PAST, BartConfig
67+
from .configuration_bart import (
68+
BART_ONNX_CONFIG,
69+
BART_ONNX_CONFIG_WITH_PAST,
70+
BART_PRETRAINED_CONFIG_ARCHIVE_MAP,
71+
BartConfig,
72+
)
6373
from .tokenization_bart import BartTokenizer
6474

6575
if is_tokenizers_available():

src/transformers/models/bart/configuration_bart.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -198,17 +198,12 @@ def hidden_size(self) -> int:
198198
OnnxVariable("last_hidden_state", {0: "batch", 1: "sequence"}, repeated=1, value=None),
199199
OnnxVariable("encoder_last_hidden_state", {0: "batch", 1: "sequence"}, repeated=1, value=None),
200200
],
201-
runtime_config_overrides={
202-
"use_cache": False
203-
},
201+
runtime_config_overrides={"use_cache": False},
204202
use_external_data_format=False,
205203
minimum_required_onnx_opset=11,
206204
optimizer="bert",
207205
optimizer_features=None,
208-
optimizer_additional_args={
209-
"num_heads": "$config.decoder_attention_heads",
210-
"hidden_size": "$config.d_model"
211-
}
206+
optimizer_additional_args={"num_heads": "$config.decoder_attention_heads", "hidden_size": "$config.d_model"},
212207
)
213208

214209
BART_ONNX_CONFIG_WITH_PAST = OnnxConfig(
@@ -221,15 +216,10 @@ def hidden_size(self) -> int:
221216
OnnxVariable("past_keys", {0: "batch", 2: "sequence"}, repeated="$config.decoder_layers * 4", value=None),
222217
OnnxVariable("encoder_last_hidden_state", {0: "batch", 1: "sequence"}, repeated=1, value=None),
223218
],
224-
runtime_config_overrides={
225-
"use_cache": True
226-
},
219+
runtime_config_overrides={"use_cache": True},
227220
use_external_data_format=False,
228221
minimum_required_onnx_opset=11,
229222
optimizer="bert",
230223
optimizer_features=None,
231-
optimizer_additional_args={
232-
"num_heads": "$config.decoder_attention_heads",
233-
"hidden_size": "$config.d_model"
234-
}
224+
optimizer_additional_args={"num_heads": "$config.decoder_attention_heads", "hidden_size": "$config.d_model"},
235225
)

src/transformers/models/bert/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828

2929

3030
_import_structure = {
31-
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BERT_ONNX_CONFIG", "BertConfig"],
31+
"configuration_bert": ["BERT_ONNX_CONFIG", "BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig"],
3232
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
3333
}
3434

@@ -83,7 +83,7 @@
8383
]
8484

8585
if TYPE_CHECKING:
86-
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BERT_ONNX_CONFIG, BertConfig
86+
from .configuration_bert import BERT_ONNX_CONFIG, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig
8787
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
8888

8989
if is_tokenizers_available():

src/transformers/models/bert/configuration_bert.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,9 @@
1414
# See the License for the specific language governing permissions and
1515
# limitations under the License.
1616
""" BERT model configuration """
17-
from ...onnx import OnnxConfig, OnnxVariable
17+
18+
from ...configuration_utils import PretrainedConfig
19+
from ...onnx.config import OnnxConfig, OnnxVariable
1820
from ...utils import logging
1921

2022

@@ -179,8 +181,5 @@ def __init__(
179181
"enable_bias_gelu": True,
180182
"enable_gelu_approximation": False,
181183
},
182-
optimizer_additional_args={
183-
"num_heads": "$config.num_attention_heads",
184-
"hidden_size": "$config.hidden_size"
185-
}
186-
)
184+
optimizer_additional_args={"num_heads": "$config.num_attention_heads", "hidden_size": "$config.hidden_size"},
185+
)

src/transformers/models/distilbert/__init__.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,12 @@
2222

2323

2424
_import_structure = {
25-
"configuration_distilbert": ["DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DISTILBERT_ONNX_CONFIG", "DISTILBERT_TOKEN_CLASSIFICATION_ONNX_CONFIG", "DistilBertConfig"],
25+
"configuration_distilbert": [
26+
"DISTILBERT_ONNX_CONFIG",
27+
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
28+
"DISTILBERT_TOKEN_CLASSIFICATION_ONNX_CONFIG",
29+
"DistilBertConfig",
30+
],
2631
"tokenization_distilbert": ["DistilBertTokenizer"],
2732
}
2833

@@ -56,7 +61,12 @@
5661

5762

5863
if TYPE_CHECKING:
59-
from .configuration_distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_ONNX_CONFIG, DISTILBERT_TOKEN_CLASSIFICATION_ONNX_CONFIG, DistilBertConfig
64+
from .configuration_distilbert import (
65+
DISTILBERT_ONNX_CONFIG,
66+
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
67+
DISTILBERT_TOKEN_CLASSIFICATION_ONNX_CONFIG,
68+
DistilBertConfig,
69+
)
6070
from .tokenization_distilbert import DistilBertTokenizer
6171

6272
if is_tokenizers_available():

src/transformers/models/distilbert/configuration_distilbert.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@
1414
# limitations under the License.
1515
""" DistilBERT model configuration """
1616

17-
from ...configuration_utils import PretrainedConfig, OnnxConfig, OnnxVariable
17+
from ...configuration_utils import PretrainedConfig
18+
from ...onnx import OnnxConfig, OnnxVariable
1819
from ...utils import logging
1920

2021

@@ -159,10 +160,7 @@ def num_hidden_layers(self):
159160
"enable_bias_gelu": True,
160161
"enable_gelu_approximation": False,
161162
},
162-
optimizer_additional_args={
163-
"num_heads": "$config.num_attention_heads",
164-
"hidden_size": "$config.hidden_size"
165-
}
163+
optimizer_additional_args={"num_heads": "$config.num_attention_heads", "hidden_size": "$config.hidden_size"},
166164
)
167165

168166

@@ -188,8 +186,5 @@ def num_hidden_layers(self):
188186
"enable_bias_gelu": True,
189187
"enable_gelu_approximation": False,
190188
},
191-
optimizer_additional_args={
192-
"num_heads": "$config.num_attention_heads",
193-
"hidden_size": "$config.hidden_size"
194-
}
195-
)
189+
optimizer_additional_args={"num_heads": "$config.num_attention_heads", "hidden_size": "$config.hidden_size"},
190+
)

src/transformers/models/gpt2/__init__.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,12 @@
2828

2929

3030
_import_structure = {
31-
"configuration_gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2_ONNX_CONFIG", "GPT2_ONNX_CONFIG_WITH_PAST", "GPT2Config"],
31+
"configuration_gpt2": [
32+
"GPT2_ONNX_CONFIG",
33+
"GPT2_ONNX_CONFIG_WITH_PAST",
34+
"GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP",
35+
"GPT2Config",
36+
],
3237
"tokenization_gpt2": ["GPT2Tokenizer"],
3338
}
3439

@@ -61,7 +66,12 @@
6166
_import_structure["modeling_flax_gpt2"] = ["FlaxGPT2LMHeadModel", "FlaxGPT2Model", "FlaxGPT2PreTrainedModel"]
6267

6368
if TYPE_CHECKING:
64-
from .configuration_gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_ONNX_CONFIG, GPT2_ONNX_CONFIG_WITH_PAST, GPT2Config
69+
from .configuration_gpt2 import (
70+
GPT2_ONNX_CONFIG,
71+
GPT2_ONNX_CONFIG_WITH_PAST,
72+
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
73+
GPT2Config,
74+
)
6575
from .tokenization_gpt2 import GPT2Tokenizer
6676

6777
if is_tokenizers_available():

src/transformers/models/gpt2/configuration_gpt2.py

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -206,17 +206,12 @@ def num_hidden_layers(self):
206206
outputs=[
207207
OnnxVariable("last_hidden_state", {0: "sequence", 1: "batch"}, repeated=1, value=None),
208208
],
209-
runtime_config_overrides={
210-
"use_cache": False
211-
},
209+
runtime_config_overrides={"use_cache": False},
212210
use_external_data_format=False,
213211
minimum_required_onnx_opset=11,
214212
optimizer="gpt2",
215213
optimizer_features=None,
216-
optimizer_additional_args={
217-
"num_heads": "$config.num_attention_heads",
218-
"hidden_size": "$config.hidden_size"
219-
}
214+
optimizer_additional_args={"num_heads": "$config.num_attention_heads", "hidden_size": "$config.hidden_size"},
220215
)
221216

222217
GPT2_ONNX_CONFIG_WITH_PAST = OnnxConfig(
@@ -228,15 +223,10 @@ def num_hidden_layers(self):
228223
OnnxVariable("last_hidden_state", {0: "sequence", 1: "batch"}, repeated=1, value=None),
229224
OnnxVariable("past_key_values", {0: "batch", 2: "sequence"}, repeated="$config.n_layer * 2", value=None),
230225
],
231-
runtime_config_overrides={
232-
"use_cache": True
233-
},
226+
runtime_config_overrides={"use_cache": True},
234227
use_external_data_format=False,
235228
minimum_required_onnx_opset=11,
236229
optimizer="gpt2",
237230
optimizer_features=None,
238-
optimizer_additional_args={
239-
"num_heads": "$config.num_attention_heads",
240-
"hidden_size": "$config.hidden_size"
241-
}
242-
)
231+
optimizer_additional_args={"num_heads": "$config.num_attention_heads", "hidden_size": "$config.hidden_size"},
232+
)

src/transformers/models/longformer/__init__.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,11 @@
2222

2323

2424
_import_structure = {
25-
"configuration_longformer": ["LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LONGFORMER_ONNX_CONFIG", "LongformerConfig"],
25+
"configuration_longformer": [
26+
"LONGFORMER_ONNX_CONFIG",
27+
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
28+
"LongformerConfig",
29+
],
2630
"tokenization_longformer": ["LongformerTokenizer"],
2731
}
2832

@@ -57,7 +61,11 @@
5761

5862

5963
if TYPE_CHECKING:
60-
from .configuration_longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LONGFORMER_ONNX_CONFIG, LongformerConfig
64+
from .configuration_longformer import (
65+
LONGFORMER_ONNX_CONFIG,
66+
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
67+
LongformerConfig,
68+
)
6169
from .tokenization_longformer import LongformerTokenizer
6270

6371
if is_tokenizers_available():

src/transformers/models/longformer/configuration_longformer.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -95,8 +95,5 @@ def __init__(self, attention_window: Union[List[int], int] = 512, sep_token_id:
9595
"enable_bias_gelu": True,
9696
"enable_gelu_approximation": False,
9797
},
98-
optimizer_additional_args={
99-
"num_heads": "$config.num_attention_heads",
100-
"hidden_size": "$config.hidden_size"
101-
}
102-
)
98+
optimizer_additional_args={"num_heads": "$config.num_attention_heads", "hidden_size": "$config.hidden_size"},
99+
)

src/transformers/models/roberta/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828

2929

3030
_import_structure = {
31-
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ROBERTA_ONNX_CONFIG", "RobertaConfig"],
31+
"configuration_roberta": ["ROBERTA_ONNX_CONFIG", "ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig"],
3232
"tokenization_roberta": ["RobertaTokenizer"],
3333
}
3434

@@ -74,7 +74,7 @@
7474

7575

7676
if TYPE_CHECKING:
77-
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_ONNX_CONFIG, RobertaConfig
77+
from .configuration_roberta import ROBERTA_ONNX_CONFIG, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig
7878
from .tokenization_roberta import RobertaTokenizer
7979

8080
if is_tokenizers_available():

src/transformers/models/roberta/configuration_roberta.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,5 @@ def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
8787
"enable_bias_gelu": True,
8888
"enable_gelu_approximation": False,
8989
},
90-
optimizer_additional_args={
91-
"num_heads": "$config.num_attention_heads",
92-
"hidden_size": "$config.hidden_size"
93-
}
94-
)
90+
optimizer_additional_args={"num_heads": "$config.num_attention_heads", "hidden_size": "$config.hidden_size"},
91+
)

src/transformers/models/t5/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030

3131
_import_structure = {
3232
# "configuration_t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5_ONNX_CONFIG", "T5_ONNX_CONFIG_WITH_PAST", "T5Config"],
33-
"configuration_t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5_ONNX_CONFIG", "T5Config"],
33+
"configuration_t5": ["T5_ONNX_CONFIG", "T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config"],
3434
}
3535

3636
if is_sentencepiece_available():
@@ -68,7 +68,7 @@
6868

6969
if TYPE_CHECKING:
7070
# from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_ONNX_CONFIG, T5_ONNX_CONFIG_WITH_PAST, T5Config
71-
from .configuration_t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_ONNX_CONFIG, T5Config
71+
from .configuration_t5 import T5_ONNX_CONFIG, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
7272

7373
if is_sentencepiece_available():
7474
from .tokenization_t5 import T5Tokenizer

0 commit comments

Comments
 (0)