|
23 | 23 | """
|
24 | 24 |
|
25 | 25 | import transformers.models as hf_models
|
26 |
| -from .blip.modelings_blip import BlipWrapper |
27 |
| -from .blip.modelings_blip_itm import BlipITMWrapper |
28 | 26 | from .mlp.modelings_mlp import MLPModel, MLPForClassification
|
29 | 27 | from .gru.modelings_gru import GRUModel, GRULMHeadModel, GRUForClassification
|
30 | 28 | from .backpack_gpt2.modelings_backpack_gpt2 import BackpackGPT2LMHeadModel
|
31 | 29 |
|
| 30 | +enable_blip = True |
| 31 | +try: |
| 32 | + from .blip.modelings_blip import BlipWrapper |
| 33 | + from .blip.modelings_blip_itm import BlipITMWrapper |
| 34 | +except: |
| 35 | + print("Failed to import blip model, skipping.") |
| 36 | + enable_blip = False |
| 37 | + |
32 | 38 | global type_to_module_mapping
|
33 | 39 | global type_to_dimension_mapping
|
34 | 40 | global output_to_subcomponent_fn_mapping
|
|
54 | 60 | hf_models.gemma.modeling_gemma.GemmaForSequenceClassification: gemma_classifier_type_to_module_mapping,
|
55 | 61 | hf_models.blip.modeling_blip.BlipForQuestionAnswering: blip_type_to_module_mapping,
|
56 | 62 | hf_models.blip.modeling_blip.BlipForImageTextRetrieval: blip_itm_type_to_module_mapping,
|
57 |
| - BlipWrapper: blip_wrapper_type_to_module_mapping, |
58 |
| - BlipITMWrapper: blip_itm_wrapper_type_to_module_mapping, |
59 | 63 | MLPModel: mlp_type_to_module_mapping,
|
60 | 64 | MLPForClassification: mlp_classifier_type_to_module_mapping,
|
61 | 65 | GRUModel: gru_type_to_module_mapping,
|
|
64 | 68 | BackpackGPT2LMHeadModel: backpack_gpt2_lm_type_to_module_mapping,
|
65 | 69 | # new model type goes here after defining the model files
|
66 | 70 | }
|
67 |
| - |
| 71 | +if enable_blip: |
| 72 | + type_to_module_mapping[BlipWrapper] = blip_wrapper_type_to_module_mapping |
| 73 | + type_to_module_mapping[BlipITMWrapper] = blip_wrapper_type_to_module_mapping |
68 | 74 |
|
69 | 75 | type_to_dimension_mapping = {
|
70 | 76 | hf_models.gpt2.modeling_gpt2.GPT2Model: gpt2_type_to_dimension_mapping,
|
|
85 | 91 | hf_models.gemma.modeling_gemma.GemmaForSequenceClassification: gemma_classifier_type_to_dimension_mapping,
|
86 | 92 | hf_models.blip.modeling_blip.BlipForQuestionAnswering: blip_type_to_dimension_mapping,
|
87 | 93 | hf_models.blip.modeling_blip.BlipForImageTextRetrieval: blip_itm_type_to_dimension_mapping,
|
88 |
| - BlipWrapper: blip_wrapper_type_to_dimension_mapping, |
89 |
| - BlipITMWrapper: blip_itm_wrapper_type_to_dimension_mapping, |
90 | 94 | MLPModel: mlp_type_to_dimension_mapping,
|
91 | 95 | MLPForClassification: mlp_classifier_type_to_dimension_mapping,
|
92 | 96 | GRUModel: gru_type_to_dimension_mapping,
|
|
95 | 99 | BackpackGPT2LMHeadModel: backpack_gpt2_lm_type_to_dimension_mapping,
|
96 | 100 | # new model type goes here after defining the model files
|
97 | 101 | }
|
| 102 | +if enable_blip: |
| 103 | + type_to_dimension_mapping[BlipWrapper] = blip_wrapper_type_to_dimension_mapping |
| 104 | + type_to_dimension_mapping[BlipITMWrapper] = blip_itm_wrapper_type_to_dimension_mapping |
98 | 105 | #########################################################################
|
0 commit comments