|
| 1 | +# coding=utf-8 |
| 2 | +# Copyright 2021 The HuggingFace Inc. team. |
| 3 | +# |
| 4 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +# you may not use this file except in compliance with the License. |
| 6 | +# You may obtain a copy of the License at |
| 7 | +# |
| 8 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +# |
| 10 | +# Unless required by applicable law or agreed to in writing, software |
| 11 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +# See the License for the specific language governing permissions and |
| 14 | +# limitations under the License. |
| 15 | + |
| 16 | +import argparse |
| 17 | +import collections |
| 18 | +import importlib.util |
| 19 | +import os |
| 20 | +import re |
| 21 | +import tempfile |
| 22 | + |
| 23 | +import pandas as pd |
| 24 | +from datasets import Dataset |
| 25 | + |
| 26 | +from huggingface_hub import Repository |
| 27 | + |
| 28 | + |
| 29 | +# All paths are set with the intent you should run this script from the root of the repo with the command |
| 30 | +# python utils/update_metadata.py |
| 31 | +TRANSFORMERS_PATH = "src/transformers" |
| 32 | + |
| 33 | + |
| 34 | +# This is to make sure the transformers module imported is the one in the repo. |
| 35 | +spec = importlib.util.spec_from_file_location( |
| 36 | + "transformers", |
| 37 | + os.path.join(TRANSFORMERS_PATH, "__init__.py"), |
| 38 | + submodule_search_locations=[TRANSFORMERS_PATH], |
| 39 | +) |
| 40 | +transformers_module = spec.loader.load_module() |
| 41 | + |
| 42 | + |
| 43 | +# Regexes that match TF/Flax/PT model names. |
| 44 | +_re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") |
| 45 | +_re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") |
| 46 | +# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. |
| 47 | +_re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") |
| 48 | + |
| 49 | + |
| 50 | +# Fill this with tuples (pipeline_tag, model_mapping, auto_model) |
| 51 | +PIPELINE_TAGS_AND_AUTO_MODELS = [ |
| 52 | + ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), |
| 53 | + ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), |
| 54 | + ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), |
| 55 | + ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), |
| 56 | + ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), |
| 57 | + ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), |
| 58 | + ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), |
| 59 | + ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), |
| 60 | + ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), |
| 61 | + ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), |
| 62 | + ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), |
| 63 | + ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), |
| 64 | + ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), |
| 65 | + ( |
| 66 | + "table-question-answering", |
| 67 | + "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", |
| 68 | + "AutoModelForTableQuestionAnswering", |
| 69 | + ), |
| 70 | + ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), |
| 71 | + ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), |
| 72 | + ( |
| 73 | + "next-sentence-prediction", |
| 74 | + "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", |
| 75 | + "AutoModelForNextSentencePrediction", |
| 76 | + ), |
| 77 | +] |
| 78 | + |
| 79 | + |
| 80 | +# Thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python |
| 81 | +def camel_case_split(identifier): |
| 82 | + "Split a camelcased `identifier` into words." |
| 83 | + matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier) |
| 84 | + return [m.group(0) for m in matches] |
| 85 | + |
| 86 | + |
| 87 | +def get_frameworks_table(): |
| 88 | + """ |
| 89 | + Generates a dataframe containing the supported auto classes for each model type, using the content of the auto |
| 90 | + modules. |
| 91 | + """ |
| 92 | + # Dictionary model names to config. |
| 93 | + config_maping_names = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES |
| 94 | + model_prefix_to_model_type = { |
| 95 | + config.replace("Config", ""): model_type for model_type, config in config_maping_names.items() |
| 96 | + } |
| 97 | + |
| 98 | + # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. |
| 99 | + pt_models = collections.defaultdict(bool) |
| 100 | + tf_models = collections.defaultdict(bool) |
| 101 | + flax_models = collections.defaultdict(bool) |
| 102 | + |
| 103 | + # Let's lookup through all transformers object (once) and find if models are supported by a given backend. |
| 104 | + for attr_name in dir(transformers_module): |
| 105 | + lookup_dict = None |
| 106 | + if _re_tf_models.match(attr_name) is not None: |
| 107 | + lookup_dict = tf_models |
| 108 | + attr_name = _re_tf_models.match(attr_name).groups()[0] |
| 109 | + elif _re_flax_models.match(attr_name) is not None: |
| 110 | + lookup_dict = flax_models |
| 111 | + attr_name = _re_flax_models.match(attr_name).groups()[0] |
| 112 | + elif _re_pt_models.match(attr_name) is not None: |
| 113 | + lookup_dict = pt_models |
| 114 | + attr_name = _re_pt_models.match(attr_name).groups()[0] |
| 115 | + |
| 116 | + if lookup_dict is not None: |
| 117 | + while len(attr_name) > 0: |
| 118 | + if attr_name in model_prefix_to_model_type: |
| 119 | + lookup_dict[model_prefix_to_model_type[attr_name]] = True |
| 120 | + break |
| 121 | + # Try again after removing the last word in the name |
| 122 | + attr_name = "".join(camel_case_split(attr_name)[:-1]) |
| 123 | + |
| 124 | + all_models = set(list(pt_models.keys()) + list(tf_models.keys()) + list(flax_models.keys())) |
| 125 | + all_models = list(all_models) |
| 126 | + all_models.sort() |
| 127 | + |
| 128 | + data = {"model_type": all_models} |
| 129 | + data["pytorch"] = [pt_models[t] for t in all_models] |
| 130 | + data["tensorflow"] = [tf_models[t] for t in all_models] |
| 131 | + data["flax"] = [flax_models[t] for t in all_models] |
| 132 | + |
| 133 | + # Now let's use the auto-mapping names to make sure |
| 134 | + processors = {} |
| 135 | + for t in all_models: |
| 136 | + if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: |
| 137 | + processors[t] = "AutoProcessor" |
| 138 | + elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: |
| 139 | + processors[t] = "AutoTokenizer" |
| 140 | + elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: |
| 141 | + processors[t] = "AutoFeatureExtractor" |
| 142 | + else: |
| 143 | + # Default to AutoTokenizer if a model has nothing, for backward compatibility. |
| 144 | + processors[t] = "AutoTokenizer" |
| 145 | + |
| 146 | + data["processor"] = [processors[t] for t in all_models] |
| 147 | + |
| 148 | + return pd.DataFrame(data) |
| 149 | + |
| 150 | + |
| 151 | +def update_pipeline_and_auto_class_table(table): |
| 152 | + """ |
| 153 | + Update the table of model class to (pipeline_tag, auto_class) without removing old keys if they don't exist |
| 154 | + anymore. |
| 155 | + """ |
| 156 | + auto_modules = [ |
| 157 | + transformers_module.models.auto.modeling_auto, |
| 158 | + transformers_module.models.auto.modeling_tf_auto, |
| 159 | + transformers_module.models.auto.modeling_flax_auto, |
| 160 | + ] |
| 161 | + for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: |
| 162 | + model_mappings = [model_mapping, f"TF_{model_mapping}", f"FLAX_{model_mapping}"] |
| 163 | + auto_classes = [auto_class, f"TF_{auto_class}", f"Flax_{auto_class}"] |
| 164 | + # Loop through all three frameworks |
| 165 | + for module, cls, mapping in zip(auto_modules, auto_classes, model_mappings): |
| 166 | + # The type of pipeline may not exist in this framework |
| 167 | + if not hasattr(module, mapping): |
| 168 | + continue |
| 169 | + # First extract all model_names |
| 170 | + model_names = [] |
| 171 | + for name in getattr(module, mapping).values(): |
| 172 | + if isinstance(name, str): |
| 173 | + model_names.append(name) |
| 174 | + else: |
| 175 | + model_names.extend(list(name)) |
| 176 | + |
| 177 | + # Add pipeline tag and auto model class for those models |
| 178 | + table.update({model_name: (pipeline_tag, cls) for model_name in model_names}) |
| 179 | + |
| 180 | + return table |
| 181 | + |
| 182 | + |
| 183 | +def update_metadata(token, commit_sha): |
| 184 | + """ |
| 185 | + Update the metada for the Transformers repo. |
| 186 | + """ |
| 187 | + with tempfile.TemporaryDirectory() as tmp_dir: |
| 188 | + repo = Repository( |
| 189 | + tmp_dir, clone_from="huggingface/transformers-metadata", repo_type="dataset", use_auth_token=token |
| 190 | + ) |
| 191 | + |
| 192 | + frameworks_table = get_frameworks_table() |
| 193 | + frameworks_dataset = Dataset.from_pandas(frameworks_table) |
| 194 | + frameworks_dataset.to_json(os.path.join(tmp_dir, "frameworks.json")) |
| 195 | + |
| 196 | + tags_dataset = Dataset.from_json(os.path.join(tmp_dir, "pipeline_tags.json")) |
| 197 | + table = { |
| 198 | + tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"]) |
| 199 | + for i in range(len(tags_dataset)) |
| 200 | + } |
| 201 | + table = update_pipeline_and_auto_class_table(table) |
| 202 | + |
| 203 | + # Sort the model classes to avoid some nondeterministic updates to create false update commits. |
| 204 | + model_classes = sorted(list(table.keys())) |
| 205 | + tags_table = pd.DataFrame( |
| 206 | + { |
| 207 | + "model_class": model_classes, |
| 208 | + "pipeline_tag": [table[m][0] for m in model_classes], |
| 209 | + "auto_class": [table[m][1] for m in model_classes], |
| 210 | + } |
| 211 | + ) |
| 212 | + tags_dataset = Dataset.from_pandas(tags_table) |
| 213 | + tags_dataset.to_json(os.path.join(tmp_dir, "pipeline_tags.json")) |
| 214 | + |
| 215 | + if repo.is_repo_clean(): |
| 216 | + print("Nothing to commit!") |
| 217 | + else: |
| 218 | + commit_message = f"Update with commit {commit_sha}" if commit_sha is not None else "Update" |
| 219 | + repo.push_to_hub(commit_message) |
| 220 | + |
| 221 | + |
| 222 | +if __name__ == "__main__": |
| 223 | + parser = argparse.ArgumentParser() |
| 224 | + parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") |
| 225 | + parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") |
| 226 | + args = parser.parse_args() |
| 227 | + |
| 228 | + update_metadata(args.token, args.commit_sha) |
0 commit comments