Skip to content

Commit 288c0cc

Browse files
committed
add check for state_dict, required to load TI's
1 parent 1cf14ab commit 288c0cc

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

invokeai/backend/model_manager/load/model_cache/cached_model/cached_model_only_full_load.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,8 @@ def full_load_to_vram(self) -> int:
7878
new_state_dict[k] = v.to(self._compute_device, copy=True)
7979
self._model.load_state_dict(new_state_dict, assign=True)
8080

81-
check_for_gguf = self._model.state_dict().get("img_in.weight")
81+
82+
check_for_gguf = hasattr(self._model, 'state_dict') and self._model.state_dict().get("img_in.weight")
8283
if isinstance(check_for_gguf, GGMLTensor):
8384
old_value = torch.__future__.get_overwrite_module_params_on_conversion()
8485
torch.__future__.set_overwrite_module_params_on_conversion(True)
@@ -102,7 +103,7 @@ def full_unload_from_vram(self) -> int:
102103
if self._cpu_state_dict is not None:
103104
self._model.load_state_dict(self._cpu_state_dict, assign=True)
104105

105-
check_for_gguf = self._model.state_dict().get("img_in.weight")
106+
check_for_gguf = hasattr(self._model, 'state_dict') and self._model.state_dict().get("img_in.weight")
106107
if isinstance(check_for_gguf, GGMLTensor):
107108
old_value = torch.__future__.get_overwrite_module_params_on_conversion()
108109
torch.__future__.set_overwrite_module_params_on_conversion(True)

0 commit comments

Comments
 (0)