I was able to run the workflow provided by Comfy on MacOS - but this seems to be better, so I gave it a try. I'm now stuck with this error message from the LTXVGemmaCLIPModelLoader:
Could not load processor from /Users/user/ComfyUI/ltx2/models/text_encoders: expected value at line 1 column 1
Loading checkpoint shards: 100%|██████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 17.78it/s]
!!! Exception during processing !!! expected value at line 1 column 1
Traceback (most recent call last):
File "/Users/user/ComfyUI/ltx2/execution.py", line 518, in execute
output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, v3_data=v3_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/execution.py", line 329, in get_output_data
return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, v3_data=v3_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/execution.py", line 303, in _async_map_node_over_list
await process_inputs(input_dict, i)
File "/Users/user/ComfyUI/ltx2/execution.py", line 291, in process_inputs
result = f(**inputs)
^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/custom_nodes/ComfyUI-LTXVideo/gemma_encoder.py", line 599, in load_model
return (comfy.sd.CLIP(clip_target),)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/comfy/sd.py", line 130, in init
self.tokenizer = tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/custom_nodes/ComfyUI-LTXVideo/gemma_encoder.py", line 488, in init
super().init(tokenizer_path, max_length=max_length)
File "/Users/user/ComfyUI/ltx2/custom_nodes/ComfyUI-LTXVideo/gemma_encoder.py", line 150, in init
self.tokenizer = AutoTokenizer.from_pretrained(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/venv/lib/python3.12/site-packages/transformers/models/auto/tokenization_auto.py", line 1156, in from_pretrained
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/venv/lib/python3.12/site-packages/transformers/tokenization_utils_base.py", line 2113, in from_pretrained
return cls._from_pretrained(
^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/venv/lib/python3.12/site-packages/transformers/tokenization_utils_base.py", line 2359, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/venv/lib/python3.12/site-packages/transformers/models/gemma/tokenization_gemma_fast.py", line 100, in init
super().init(
File "/Users/user/ComfyUI/ltx2/venv/lib/python3.12/site-packages/transformers/tokenization_utils_fast.py", line 117, in init
fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Exception: expected value at line 1 column 1
I cloned the complete model folder, and I also tried with a different 4bit variant, same result.
I was able to run the workflow provided by Comfy on MacOS - but this seems to be better, so I gave it a try. I'm now stuck with this error message from the LTXVGemmaCLIPModelLoader:
Could not load processor from /Users/user/ComfyUI/ltx2/models/text_encoders: expected value at line 1 column 1
Loading checkpoint shards: 100%|██████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 17.78it/s]
!!! Exception during processing !!! expected value at line 1 column 1
Traceback (most recent call last):
File "/Users/user/ComfyUI/ltx2/execution.py", line 518, in execute
output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, v3_data=v3_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/execution.py", line 329, in get_output_data
return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, v3_data=v3_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/execution.py", line 303, in _async_map_node_over_list
await process_inputs(input_dict, i)
File "/Users/user/ComfyUI/ltx2/execution.py", line 291, in process_inputs
result = f(**inputs)
^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/custom_nodes/ComfyUI-LTXVideo/gemma_encoder.py", line 599, in load_model
return (comfy.sd.CLIP(clip_target),)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/comfy/sd.py", line 130, in init
self.tokenizer = tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/custom_nodes/ComfyUI-LTXVideo/gemma_encoder.py", line 488, in init
super().init(tokenizer_path, max_length=max_length)
File "/Users/user/ComfyUI/ltx2/custom_nodes/ComfyUI-LTXVideo/gemma_encoder.py", line 150, in init
self.tokenizer = AutoTokenizer.from_pretrained(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/venv/lib/python3.12/site-packages/transformers/models/auto/tokenization_auto.py", line 1156, in from_pretrained
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/venv/lib/python3.12/site-packages/transformers/tokenization_utils_base.py", line 2113, in from_pretrained
return cls._from_pretrained(
^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/venv/lib/python3.12/site-packages/transformers/tokenization_utils_base.py", line 2359, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/user/ComfyUI/ltx2/venv/lib/python3.12/site-packages/transformers/models/gemma/tokenization_gemma_fast.py", line 100, in init
super().init(
File "/Users/user/ComfyUI/ltx2/venv/lib/python3.12/site-packages/transformers/tokenization_utils_fast.py", line 117, in init
fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Exception: expected value at line 1 column 1
I cloned the complete model folder, and I also tried with a different 4bit variant, same result.