Skip to content

Commit 1b2f76f

Browse files
authored
Merge pull request #180 from stanfordnlp/zen/blip_fix
[Minor] Updating the import to bypass blip in case of failure
2 parents d083c82 + 54ae03e commit 1b2f76f

File tree

2 files changed

+15
-8
lines changed

2 files changed

+15
-8
lines changed

pyvene/models/intervenable_modelcard.py

+14-7
Original file line numberDiff line numberDiff line change
@@ -23,12 +23,18 @@
2323
"""
2424

2525
import transformers.models as hf_models
26-
from .blip.modelings_blip import BlipWrapper
27-
from .blip.modelings_blip_itm import BlipITMWrapper
2826
from .mlp.modelings_mlp import MLPModel, MLPForClassification
2927
from .gru.modelings_gru import GRUModel, GRULMHeadModel, GRUForClassification
3028
from .backpack_gpt2.modelings_backpack_gpt2 import BackpackGPT2LMHeadModel
3129

30+
enable_blip = True
31+
try:
32+
from .blip.modelings_blip import BlipWrapper
33+
from .blip.modelings_blip_itm import BlipITMWrapper
34+
except:
35+
print("Failed to import blip model, skipping.")
36+
enable_blip = False
37+
3238
global type_to_module_mapping
3339
global type_to_dimension_mapping
3440
global output_to_subcomponent_fn_mapping
@@ -54,8 +60,6 @@
5460
hf_models.gemma.modeling_gemma.GemmaForSequenceClassification: gemma_classifier_type_to_module_mapping,
5561
hf_models.blip.modeling_blip.BlipForQuestionAnswering: blip_type_to_module_mapping,
5662
hf_models.blip.modeling_blip.BlipForImageTextRetrieval: blip_itm_type_to_module_mapping,
57-
BlipWrapper: blip_wrapper_type_to_module_mapping,
58-
BlipITMWrapper: blip_itm_wrapper_type_to_module_mapping,
5963
MLPModel: mlp_type_to_module_mapping,
6064
MLPForClassification: mlp_classifier_type_to_module_mapping,
6165
GRUModel: gru_type_to_module_mapping,
@@ -64,7 +68,9 @@
6468
BackpackGPT2LMHeadModel: backpack_gpt2_lm_type_to_module_mapping,
6569
# new model type goes here after defining the model files
6670
}
67-
71+
if enable_blip:
72+
type_to_module_mapping[BlipWrapper] = blip_wrapper_type_to_module_mapping
73+
type_to_module_mapping[BlipITMWrapper] = blip_wrapper_type_to_module_mapping
6874

6975
type_to_dimension_mapping = {
7076
hf_models.gpt2.modeling_gpt2.GPT2Model: gpt2_type_to_dimension_mapping,
@@ -85,8 +91,6 @@
8591
hf_models.gemma.modeling_gemma.GemmaForSequenceClassification: gemma_classifier_type_to_dimension_mapping,
8692
hf_models.blip.modeling_blip.BlipForQuestionAnswering: blip_type_to_dimension_mapping,
8793
hf_models.blip.modeling_blip.BlipForImageTextRetrieval: blip_itm_type_to_dimension_mapping,
88-
BlipWrapper: blip_wrapper_type_to_dimension_mapping,
89-
BlipITMWrapper: blip_itm_wrapper_type_to_dimension_mapping,
9094
MLPModel: mlp_type_to_dimension_mapping,
9195
MLPForClassification: mlp_classifier_type_to_dimension_mapping,
9296
GRUModel: gru_type_to_dimension_mapping,
@@ -95,4 +99,7 @@
9599
BackpackGPT2LMHeadModel: backpack_gpt2_lm_type_to_dimension_mapping,
96100
# new model type goes here after defining the model files
97101
}
102+
if enable_blip:
103+
type_to_dimension_mapping[BlipWrapper] = blip_wrapper_type_to_dimension_mapping
104+
type_to_dimension_mapping[BlipITMWrapper] = blip_itm_wrapper_type_to_dimension_mapping
98105
#########################################################################

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
setup(
1212
name="pyvene",
13-
version="0.1.3",
13+
version="0.1.4",
1414
description="Use Activation Intervention to Interpret Causal Mechanism of Model",
1515
long_description=long_description,
1616
long_description_content_type='text/markdown',

0 commit comments

Comments
 (0)