Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -32,8 +32,8 @@ JS = """function () {
|
|
32 |
}
|
33 |
}"""
|
34 |
|
35 |
-
|
36 |
-
|
37 |
|
38 |
def enable_lora(lora_in, lora_add):
|
39 |
if not lora_in and not lora_add:
|
@@ -61,7 +61,7 @@ async def generate_image(
|
|
61 |
|
62 |
#generator = torch.Generator().manual_seed(seed)
|
63 |
|
64 |
-
image1 = await
|
65 |
prompt=text,
|
66 |
height=height,
|
67 |
width=width,
|
@@ -69,7 +69,7 @@ async def generate_image(
|
|
69 |
num_inference_steps=steps,
|
70 |
model=basemodel,
|
71 |
)
|
72 |
-
image2 = await
|
73 |
prompt=text,
|
74 |
height=height,
|
75 |
width=width,
|
|
|
32 |
}
|
33 |
}"""
|
34 |
|
35 |
+
client1 = AsyncInferenceClient()
|
36 |
+
client2 = AsyncInferenceClient()
|
37 |
|
38 |
def enable_lora(lora_in, lora_add):
|
39 |
if not lora_in and not lora_add:
|
|
|
61 |
|
62 |
#generator = torch.Generator().manual_seed(seed)
|
63 |
|
64 |
+
image1 = await client1.text_to_image(
|
65 |
prompt=text,
|
66 |
height=height,
|
67 |
width=width,
|
|
|
69 |
num_inference_steps=steps,
|
70 |
model=basemodel,
|
71 |
)
|
72 |
+
image2 = await client2.text_to_image(
|
73 |
prompt=text,
|
74 |
height=height,
|
75 |
width=width,
|