Skip to content

[bug]: Model Manager fails to properly identify and load Prodigy optimized Flux.2 Klein LoRAs #9021

@NiceDragon8066

Description

@NiceDragon8066

Is there an existing issue for this problem?

  • I have searched the existing issues

Install method

Invoke's Launcher

Operating system

Windows

GPU vendor

Nvidia (CUDA)

GPU model

RTX 5090 Laptop GPU

GPU VRAM

24GB

Version number

6.12.0

Browser

Microsoft Edge

System Information

{
"version": "6.12.0",
"dependencies": {
"absl-py" : "2.4.0",
"accelerate" : "1.13.0",
"annotated-types" : "0.7.0",
"anyio" : "4.12.1",
"attrs" : "26.1.0",
"bcrypt" : "3.2.2",
"bidict" : "0.23.1",
"bitsandbytes" : "0.49.2",
"blake3" : "1.0.8",
"certifi" : "2022.12.7",
"cffi" : "2.0.0",
"charset-normalizer" : "2.1.1",
"click" : "8.3.1",
"colorama" : "0.4.6",
"coloredlogs" : "15.0.1",
"compel" : "2.1.1",
"contourpy" : "1.3.3",
"cryptography" : "46.0.5",
"CUDA" : "12.8",
"cycler" : "0.12.1",
"Deprecated" : "1.3.1",
"diffusers" : "0.36.0",
"dnspython" : "2.8.0",
"dynamicprompts" : "0.31.0",
"ecdsa" : "0.19.1",
"einops" : "0.8.2",
"email-validator" : "2.3.0",
"fastapi" : "0.118.3",
"fastapi-events" : "0.12.2",
"filelock" : "3.20.0",
"flatbuffers" : "25.12.19",
"fonttools" : "4.62.1",
"fsspec" : "2025.12.0",
"gguf" : "0.18.0",
"h11" : "0.16.0",
"httpcore" : "1.0.9",
"httptools" : "0.7.1",
"httpx" : "0.28.1",
"huggingface_hub" : "0.36.2",
"humanfriendly" : "10.0",
"idna" : "3.4",
"importlib_metadata" : "7.1.0",
"InvokeAI" : "6.12.0",
"jax" : "0.7.1",
"jaxlib" : "0.7.1",
"Jinja2" : "3.1.6",
"kiwisolver" : "1.5.0",
"MarkupSafe" : "3.0.2",
"matplotlib" : "3.10.8",
"mediapipe" : "0.10.14",
"ml_dtypes" : "0.5.4",
"mpmath" : "1.3.0",
"networkx" : "3.6.1",
"numpy" : "1.26.4",
"onnx" : "1.16.1",
"onnxruntime" : "1.19.2",
"opencv-contrib-python": "4.11.0.86",
"opt_einsum" : "3.4.0",
"packaging" : "24.1",
"passlib" : "1.7.4",
"picklescan" : "1.0.4",
"pillow" : "12.0.0",
"prompt_toolkit" : "3.0.52",
"protobuf" : "4.25.8",
"psutil" : "7.2.2",
"pyasn1" : "0.6.3",
"pycparser" : "3.0",
"pydantic" : "2.12.5",
"pydantic-settings" : "2.13.1",
"pydantic_core" : "2.41.5",
"pyparsing" : "3.3.2",
"PyPatchMatch" : "1.0.2",
"pyreadline3" : "3.5.4",
"python-dateutil" : "2.9.0.post0",
"python-dotenv" : "1.2.2",
"python-engineio" : "4.13.1",
"python-jose" : "3.5.0",
"python-multipart" : "0.0.22",
"python-socketio" : "5.16.1",
"PyWavelets" : "1.9.0",
"PyYAML" : "6.0.3",
"regex" : "2026.2.28",
"requests" : "2.28.1",
"rsa" : "4.9.1",
"safetensors" : "0.7.0",
"scipy" : "1.17.1",
"semver" : "3.0.4",
"sentencepiece" : "0.2.0",
"setuptools" : "70.2.0",
"simple-websocket" : "1.1.0",
"six" : "1.17.0",
"sounddevice" : "0.5.5",
"spandrel" : "0.4.2",
"starlette" : "0.48.0",
"sympy" : "1.14.0",
"tokenizers" : "0.22.2",
"torch" : "2.7.1+cu128",
"torchsde" : "0.2.6",
"torchvision" : "0.22.1+cu128",
"tqdm" : "4.66.5",
"trampoline" : "0.1.2",
"transformers" : "4.57.6",
"typing-inspection" : "0.4.2",
"typing_extensions" : "4.15.0",
"urllib3" : "1.26.13",
"uvicorn" : "0.42.0",
"watchfiles" : "1.1.1",
"wcwidth" : "0.6.0",
"websockets" : "16.0",
"wrapt" : "2.1.2",
"wsproto" : "1.3.2",
"zipp" : "3.19.2"
},
"config": {
"schema_version": "4.0.2",
"legacy_models_yaml_path": null,
"host": "127.0.0.1",
"port": 9090,
"allow_origins": [],
"allow_credentials": true,
"allow_methods": [""],
"allow_headers": ["
"],
"ssl_certfile": null,
"ssl_keyfile": null,
"log_tokenization": false,
"patchmatch": true,
"models_dir": "models",
"convert_cache_dir": "models\.convert_cache",
"download_cache_dir": "models\.download_cache",
"legacy_conf_dir": "configs",
"db_dir": "databases",
"outputs_dir": "outputs",
"custom_nodes_dir": "nodes",
"style_presets_dir": "style_presets",
"workflow_thumbnails_dir": "workflow_thumbnails",
"log_handlers": ["console"],
"log_format": "color",
"log_level": "info",
"log_sql": false,
"log_level_network": "warning",
"use_memory_db": false,
"dev_reload": false,
"profile_graphs": false,
"profile_prefix": null,
"profiles_dir": "profiles",
"max_cache_ram_gb": null,
"max_cache_vram_gb": null,
"log_memory_usage": false,
"model_cache_keep_alive_min": 0,
"device_working_mem_gb": 3,
"enable_partial_loading": true,
"keep_ram_copy_of_weights": true,
"ram": null,
"vram": null,
"lazy_offload": true,
"pytorch_cuda_alloc_conf": "backend:cudaMallocAsync",
"device": "auto",
"precision": "auto",
"sequential_guidance": false,
"attention_type": "auto",
"attention_slice_size": "auto",
"force_tiled_decode": false,
"pil_compress_level": 1,
"max_queue_size": 10000,
"clear_queue_on_startup": false,
"allow_nodes": null,
"deny_nodes": null,
"node_cache_size": 512,
"hashing_algorithm": "blake3_single",
"remote_api_tokens": [ {"url_regex": "huggingface.co", "token": "REDACTED"} ],
"scan_models_on_startup": false,
"unsafe_disable_picklescan": false,
"allow_unknown_models": true,
"multiuser": false,
"strict_password_checking": false
},
"set_config_fields": [
"enable_partial_loading", "legacy_models_yaml_path", "pytorch_cuda_alloc_conf", "remote_api_tokens"
]
}

What happened

I have been importing about couple of dozen Flux.2 Klein 9B LoRAs I downloaded from Huggingface into InvokeAI. The Model Manager successfully imported about 50% of those, for the rest the import failed with a message like "Unable to Identify" and placed those models in the models list as "Unknown" model type (essentially unusable). I noticed that this import/loading problem only happened with the LoRA files which were created with Prodigy. All the LoRA models were in safetensors format, but only the _prodigy.safetensors files failed to import/load. I am not sure what causes it, perhaps incorrect read/conflict with header and metadata. Other app platforms were able to import these "Prodigy" formatted LoRAs without any problem (Wan2GP, ComfyUI, Forge Neo). This may be due to the InvokeAI model manager being overly restrictive or not trained to recognize the Prodigy formatted data. I would greatly appreciate a fix.

What you expected to happen

I expect the Flux.2 Klein 9B LoRA Prodigy models to be imported/loaded in the model manager same as any other LoRA.

How to reproduce the problem

Follow the process of adding a Prodigy formatted Flux.2 Klein 9B LoRA model in the model manager using the "URL/Local Path" option.

Additional context

I have only run into this issue in the case of Flux.2 Klein 9B model, I don't know if this issue exists for other models.

Discord username

No response

Metadata

Metadata

Assignees

No one assigned

    Labels

    bugSomething isn't working

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions