File size: 1,654 Bytes
b26990a fb90db2 b26990a 9131a83 b26990a ad2dbbc b26990a 923f16e a9acd17 9131a83 b26990a 16a4dc4 c0a488d c7a2391 b26990a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import gradio as gr
import requests
import json
url = "https://api.prodia.com/v1/job"
payload = { "model": "timeless-1.0.ckpt [7c4971d4]" }
headers = {
"accept": "application/json",
"content-type": "application/json",
"X-Prodia-Key": "69e66898-010d-4cd1-9e22-090f73ad007b"
}
models = [
{"name": "Timeless", "url": "timeless-1.0.ckpt 1.0.ckpt [7c4971d4]"},
{"name": "Dreamlike-diffusion-2.0.", "url": "dreamlike-diffusion-2.0.safetensors [fdcf65e7]"},
{"name": "Deliberate_v2", "url": "deliberate_v2.safetensors [10ec4b29]"},
{"name": "Anything-v4.5-pruned", "url": "anything-v4.5-pruned.ckpt [65745d25]"},
]
response = requests.post(url, json=payload, headers=headers)
generator = "timeless-1.0.ckpt [7c4971d4]"
print(response.text)
response = requests.post(url, headers=headers)
#generator = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
def generate(prompts):
images = generator(list(prompts)).images
return [images]
def set_model(current_model_index):
global current_model
current_model = models[current_model_index]
return gr.update(value=f"{current_model['name']}")
with gr.Blocks() as demo:
gr.HTML(
)
with gr.Row():
with gr.Row():
input_text = gr.Textbox(label="Input Prompt", placeholder="", lines=1)
# Model selection dropdown
model_name1 = gr.Dropdown(
label="Choose Model",
choices=[m["name"] for m in models],
type="index",
value=current_model["name"],
interactive=True,
)
if __name__ == "__main__":
demo.launch()
|