Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,176 Bytes
944dd2b e368cec 944dd2b e368cec 944dd2b e368cec 944dd2b e368cec 944dd2b e368cec 944dd2b 26dad4e 944dd2b 26dad4e e368cec 944dd2b e368cec 944dd2b e368cec 944dd2b e368cec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
import fal_client
from PIL import Image
import requests
import io
import os
class FalModel():
def __init__(self, model_name, model_type):
self.model_name = model_name
self.model_type = model_type
os.environ['FAL_KEY'] = os.environ['FalAPI']
def __call__(self, *args, **kwargs):
if self.model_type == "text2image":
assert "prompt" in kwargs, "prompt is required for text2image model"
handler = fal_client.submit(
f"fal-ai/{self.model_name}",
arguments={
"prompt": kwargs["prompt"]
},
)
for event in handler.iter_events(with_logs=True):
if isinstance(event, fal_client.InProgress):
print('Request in progress')
print(event.logs)
result = handler.get()
result_url = result['images'][0]['url']
response = requests.get(result_url)
result = Image.open(io.BytesIO(response.content))
return result
elif self.model_type == "image2image":
raise NotImplementedError("image2image model is not implemented yet")
# assert "image" in kwargs or "image_url" in kwargs, "image or image_url is required for image2image model"
# if "image" in kwargs:
# image_url = None
# pass
# handler = fal_client.submit(
# f"fal-ai/{self.model_name}",
# arguments={
# "image_url": image_url
# },
# )
#
# for event in handler.iter_events():
# if isinstance(event, fal_client.InProgress):
# print('Request in progress')
# print(event.logs)
#
# result = handler.get()
# return result
elif self.model_type == "text2video":
assert "prompt" in kwargs, "prompt is required for text2video model"
if self.model_name == 'AnimateDiff':
fal_model_name = 'fast-animatediff/text-to-video'
elif self.model_name == 'AnimateDiffTurbo':
fal_model_name = 'fast-animatediff/turbo/text-to-video'
else:
raise NotImplementedError(f"text2video model of {self.model_name} in fal is not implemented yet")
handler = fal_client.submit(
f"fal-ai/{fal_model_name}",
arguments={
"prompt": kwargs["prompt"]
},
)
for event in handler.iter_events(with_logs=True):
if isinstance(event, fal_client.InProgress):
print('Request in progress')
print(event.logs)
result = handler.get()
print("result video: ====")
print(result)
result_url = result['video']['url']
return result_url
else:
raise ValueError("model_type must be text2image or image2image")
def load_fal_model(model_name, model_type):
return FalModel(model_name, model_type) |