Update worker_runpod.py
Browse files- worker_runpod.py +3 -2
worker_runpod.py
CHANGED
@@ -13,6 +13,8 @@ with torch.inference_mode():
|
|
13 |
vae = AutoencoderKLCogVideoX.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float16)
|
14 |
tokenizer = T5Tokenizer.from_pretrained(model_id, subfolder="tokenizer")
|
15 |
pipe = CogVideoXImageToVideoPipeline.from_pretrained(model_id, tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, torch_dtype=torch.float16).to("cuda")
|
|
|
|
|
16 |
pipe = merge_lora(pipe, lora_path, lora_weight)
|
17 |
# pipe.enable_model_cpu_offload()
|
18 |
|
@@ -30,8 +32,7 @@ def download_file(url, save_dir, file_name):
|
|
30 |
@torch.inference_mode()
|
31 |
def generate(input):
|
32 |
values = input["input"]
|
33 |
-
|
34 |
-
lora_weight = 1.0
|
35 |
input_image = values['input_image_check']
|
36 |
input_image = download_file(url=input_image, save_dir='/content/input', file_name='input_image_tost')
|
37 |
prompt = values['prompt']
|
|
|
13 |
vae = AutoencoderKLCogVideoX.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float16)
|
14 |
tokenizer = T5Tokenizer.from_pretrained(model_id, subfolder="tokenizer")
|
15 |
pipe = CogVideoXImageToVideoPipeline.from_pretrained(model_id, tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, torch_dtype=torch.float16).to("cuda")
|
16 |
+
lora_path = "/content/shirtlift.safetensors"
|
17 |
+
lora_weight = 1.0
|
18 |
pipe = merge_lora(pipe, lora_path, lora_weight)
|
19 |
# pipe.enable_model_cpu_offload()
|
20 |
|
|
|
32 |
@torch.inference_mode()
|
33 |
def generate(input):
|
34 |
values = input["input"]
|
35 |
+
|
|
|
36 |
input_image = values['input_image_check']
|
37 |
input_image = download_file(url=input_image, save_dir='/content/input', file_name='input_image_tost')
|
38 |
prompt = values['prompt']
|