Spaces:
Running
on
Zero
Issues running the main script.
Hi im having issues running this on my own, anyone has any tips or ideas? Im essentially runnign it 1:1 whats in the gradio app. But somehow it fails.
Inpainting image
(768, 768)
(768, 768)
0%| | 0/28 [00:00<?, ?it/s]
Traceback (most recent call last):
File "/pkg/modal/_container_io_manager.py", line 774, in handle_input_exception
yield
File "/pkg/modal/_container_entrypoint.py", line 394, in run_input_sync
res = io_context.call_finalized_function()
File "/pkg/modal/_container_io_manager.py", line 195, in call_finalized_function
res = self.finalized_function.callable(*args, **kwargs)
File "/root/inpaint_flux.py", line 94, in inpaint_flux
result = pipe(
File "/usr/local/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
File "/root/fluxinpaint/pipeline_flux_controlnet_inpaint.py", line 959, in call
) = self.controlnet(
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "/root/fluxinpaint/controlnet_flux.py", line 329, in forward
encoder_hidden_states, hidden_states = block(
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "/root/fluxinpaint/transformer_flux.py", line 213, in forward
attn_output, context_attn_output = self.attn(
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/diffusers/models/attention_processor.py", line 495, in forward
return self.processor(
File "/usr/local/lib/python3.10/site-packages/diffusers/models/attention_processor.py", line 1778, in call
query = apply_rotary_emb(query, image_rotary_emb)
File "/usr/local/lib/python3.10/site-packages/diffusers/models/embeddings.py", line 734, in apply_rotary_emb
out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype)
RuntimeError: The size of tensor a (128) must match the size of tensor b (2) at non-singleton dimension 6