Spaces:
Sleeping
Sleeping
updates to UI
Browse files- main.py +4 -5
- static/index.html +19 -1
- static/script.js +5 -3
- static/style.css +8 -3
main.py
CHANGED
@@ -18,11 +18,10 @@ print(f"Is CUDA available: {torch.cuda.is_available()}")
|
|
18 |
app = FastAPI()
|
19 |
|
20 |
@app.get("/generate")
|
21 |
-
def generate_image(prompt):
|
22 |
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
23 |
|
24 |
#model_id = "CompVis/stable-diffusion-v1-4" #stabilityai/stable-diffusion-2-1
|
25 |
-
model_id = "runwayml/stable-diffusion-v1-5"
|
26 |
|
27 |
# Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead
|
28 |
#pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
@@ -31,12 +30,12 @@ def generate_image(prompt):
|
|
31 |
#pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
32 |
#pipe = pipe.to("cuda")
|
33 |
|
34 |
-
pipeline = DiffusionPipeline.from_pretrained(
|
35 |
#pipeline = pipeline.to("cuda")
|
36 |
generator = torch.Generator("cpu").manual_seed(0)
|
37 |
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
38 |
#image = pipeline(prompt, generator=generator).images[0]
|
39 |
-
image = pipeline(prompt, generator=generator, num_inference_steps=
|
40 |
|
41 |
|
42 |
#prompt = "a photo of an astronaut riding a horse on mars"
|
@@ -53,7 +52,7 @@ def generate_image(prompt):
|
|
53 |
{
|
54 |
"label": "com.truepic.custom.ai",
|
55 |
"data": {
|
56 |
-
"model_name":
|
57 |
"model_version": "1.0",
|
58 |
"prompt": prompt
|
59 |
}
|
|
|
18 |
app = FastAPI()
|
19 |
|
20 |
@app.get("/generate")
|
21 |
+
def generate_image(prompt, inference_steps, model):
|
22 |
print(f"Is CUDA available: {torch.cuda.is_available()}")
|
23 |
|
24 |
#model_id = "CompVis/stable-diffusion-v1-4" #stabilityai/stable-diffusion-2-1
|
|
|
25 |
|
26 |
# Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead
|
27 |
#pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
|
|
30 |
#pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
31 |
#pipe = pipe.to("cuda")
|
32 |
|
33 |
+
pipeline = DiffusionPipeline.from_pretrained(model)
|
34 |
#pipeline = pipeline.to("cuda")
|
35 |
generator = torch.Generator("cpu").manual_seed(0)
|
36 |
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
37 |
#image = pipeline(prompt, generator=generator).images[0]
|
38 |
+
image = pipeline(prompt, generator=generator, num_inference_steps=inference_steps).images[0]
|
39 |
|
40 |
|
41 |
#prompt = "a photo of an astronaut riding a horse on mars"
|
|
|
52 |
{
|
53 |
"label": "com.truepic.custom.ai",
|
54 |
"data": {
|
55 |
+
"model_name": model,
|
56 |
"model_version": "1.0",
|
57 |
"prompt": prompt
|
58 |
}
|
static/index.html
CHANGED
@@ -14,13 +14,31 @@
|
|
14 |
<h1>Text to Image with Signed Attribution</h1>
|
15 |
<truepic-display><img src="/output.jpg" /></truepic-display>
|
16 |
<form class="text-gen-form" style="padding:40px 0;">
|
17 |
-
<label for="text-gen-input">Enter
|
18 |
<input
|
19 |
id="text-gen-input"
|
20 |
type="text"
|
21 |
value=""
|
22 |
style="margin:40px 0;"
|
23 |
/>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
<button id="text-gen-submit">Submit</button>
|
25 |
</form>
|
26 |
|
|
|
14 |
<h1>Text to Image with Signed Attribution</h1>
|
15 |
<truepic-display><img src="/output.jpg" /></truepic-display>
|
16 |
<form class="text-gen-form" style="padding:40px 0;">
|
17 |
+
<label for="text-gen-input">Enter prompt: </label>
|
18 |
<input
|
19 |
id="text-gen-input"
|
20 |
type="text"
|
21 |
value=""
|
22 |
style="margin:40px 0;"
|
23 |
/>
|
24 |
+
|
25 |
+
<label>Inference Steps: </label>
|
26 |
+
|
27 |
+
<select style="width:20%" id="inference_steps">
|
28 |
+
<option value="5">5 (1m 10s)</option>
|
29 |
+
<option value="10">10</option>
|
30 |
+
<option value="20">20</option>
|
31 |
+
<option value="50">50</option>
|
32 |
+
</select>
|
33 |
+
|
34 |
+
<label>Model: </label>
|
35 |
+
|
36 |
+
<select style="width:20%" id="model">
|
37 |
+
<option value="runwayml/stable-diffusion-v1-5">runwayml/stable-diffusion-v1-5</option>
|
38 |
+
<option value="CompVis/stable-diffusion-v1-4">CompVis/stable-diffusion-v1-4</option>
|
39 |
+
<option value="stabilityai/stable-diffusion-2-1">stabilityai/stable-diffusion-2-1</option>
|
40 |
+
</select>
|
41 |
+
|
42 |
<button id="text-gen-submit">Submit</button>
|
43 |
</form>
|
44 |
|
static/script.js
CHANGED
@@ -23,8 +23,8 @@ textGenForm.addEventListener('submit', async (event) => {
|
|
23 |
|
24 |
*/
|
25 |
|
26 |
-
const generateImage = async (text) => {
|
27 |
-
const inferResponse = await fetch(`generate?prompt=${text}`);
|
28 |
const inferJson = await inferResponse.json();
|
29 |
|
30 |
return inferJson.output;
|
@@ -35,9 +35,11 @@ textGenForm.addEventListener('submit', async (event) => {
|
|
35 |
event.preventDefault();
|
36 |
|
37 |
const textGenInput = document.getElementById('text-gen-input');
|
|
|
|
|
38 |
|
39 |
try {
|
40 |
-
const resp = await generateImage(textGenInput.value);
|
41 |
document.getElementById("redirect-form").submit();
|
42 |
} catch (err) {
|
43 |
console.error(err);
|
|
|
23 |
|
24 |
*/
|
25 |
|
26 |
+
const generateImage = async (text, inference_steps, model) => {
|
27 |
+
const inferResponse = await fetch(`generate?prompt=${text}&inference_steps=${inference_steps}&model=${model}`);
|
28 |
const inferJson = await inferResponse.json();
|
29 |
|
30 |
return inferJson.output;
|
|
|
35 |
event.preventDefault();
|
36 |
|
37 |
const textGenInput = document.getElementById('text-gen-input');
|
38 |
+
const inferenceSteps = document.getElementById('inference_steps');
|
39 |
+
const model = document.getElementById('model');
|
40 |
|
41 |
try {
|
42 |
+
const resp = await generateImage(textGenInput.value, inferenceSteps.value, model.value);
|
43 |
document.getElementById("redirect-form").submit();
|
44 |
} catch (err) {
|
45 |
console.error(err);
|
static/style.css
CHANGED
@@ -11,8 +11,7 @@ body {
|
|
11 |
}
|
12 |
|
13 |
main {
|
14 |
-
max-width:
|
15 |
-
text-align: center;
|
16 |
}
|
17 |
|
18 |
section {
|
@@ -30,12 +29,18 @@ body {
|
|
30 |
margin: 0 auto;
|
31 |
}
|
32 |
|
|
|
|
|
|
|
|
|
|
|
33 |
input {
|
34 |
-
width:
|
35 |
}
|
36 |
|
37 |
button {
|
38 |
cursor: pointer;
|
|
|
39 |
}
|
40 |
|
41 |
.text-gen-output {
|
|
|
11 |
}
|
12 |
|
13 |
main {
|
14 |
+
max-width: 680rem;
|
|
|
15 |
}
|
16 |
|
17 |
section {
|
|
|
29 |
margin: 0 auto;
|
30 |
}
|
31 |
|
32 |
+
label {
|
33 |
+
display: inline-block;
|
34 |
+
width: 18%;
|
35 |
+
}
|
36 |
+
|
37 |
input {
|
38 |
+
width: 80%;
|
39 |
}
|
40 |
|
41 |
button {
|
42 |
cursor: pointer;
|
43 |
+
display: block;
|
44 |
}
|
45 |
|
46 |
.text-gen-output {
|