Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions colpali_engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
ColQwen2,
ColQwen2_5,
ColQwen2_5_Processor,
# ColQwen2_5Omni,
# ColQwen2_5OmniProcessor,
ColQwen2_5Omni,
ColQwen2_5OmniProcessor,
ColQwen2Processor,
)
1 change: 1 addition & 0 deletions colpali_engine/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@
from .paligemma import BiPali, BiPaliProcessor, BiPaliProj, ColPali, ColPaliProcessor
from .qwen2 import BiQwen2, BiQwen2Processor, ColQwen2, ColQwen2Processor
from .qwen2_5 import BiQwen2_5, BiQwen2_5_Processor, ColQwen2_5, ColQwen2_5_Processor
from .qwen_omni import ColQwen2_5Omni, ColQwen2_5OmniProcessor
1 change: 1 addition & 0 deletions colpali_engine/models/qwen_omni/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .colqwen_omni import ColQwen2_5Omni, ColQwen2_5OmniProcessor
2 changes: 2 additions & 0 deletions colpali_engine/models/qwen_omni/colqwen_omni/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .modeling_colqwen_omni import ColQwen2_5Omni
from .processing_colqwen_omni import ColQwen2_5OmniProcessor
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,17 @@ def __init__(self, config: Qwen2_5OmniThinkerConfig, mask_non_image_embeddings:
self.lm_head = nn.Identity() # Disable the original lm_head
self.padding_side = "left"
self.mask_non_image_embeddings = mask_non_image_embeddings
self.lm_head = nn.Identity() # Disable the original lm_head
self.post_init()

def get_output_embeddings(self) -> None: # -> None | Any:
"""
Transformers >=4.54.0 fails during resize_token_embeddings() due to a new get_output_embeddings()
impl. The latter used to return None unless overridden, but they made it try harder to return
*something*. Of course, this was not flagged as a breaking change. Eventually I found the
responsible PR, and it endorses this change: https://github.com/huggingface/transformers/pull/39339
"""
return None

def forward(self, *args, **kwargs) -> torch.Tensor:
# # Handle the custom "pixel_values" input obtained with `ColQwen2Processor` through unpadding
# if "pixel_values" in kwargs:
Expand Down
Loading