dgoot commited on
Commit
98307a5
1 Parent(s): f50acd6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -8
app.py CHANGED
@@ -15,10 +15,7 @@ DEFAULT_MODEL = "stabilityai/stable-diffusion-xl-base-1.0"
15
 
16
  def load_pipeline(model):
17
  return DiffusionPipeline.from_pretrained(
18
- model,
19
- torch_dtype=torch.float16,
20
- use_safetensors=True,
21
- variant="fp16"
22
  )
23
 
24
 
@@ -29,12 +26,17 @@ loaded_models = {DEFAULT_MODEL}
29
  def generate_image(
30
  model: str,
31
  prompt: str,
 
 
 
32
  progress,
33
  ):
34
  logger.debug(f"Loading pipeline: {dict(model=model)}")
35
  pipe = load_pipeline(model).to("cuda")
36
 
37
- logger.debug(f"Generating image: {dict(prompt=prompt)}")
 
 
38
 
39
  def progress_callback(pipe, step_index, timestep, callback_kwargs):
40
  logger.trace(
@@ -45,6 +47,9 @@ def generate_image(
45
 
46
  images = pipe(
47
  prompt=prompt,
 
 
 
48
  callback_on_step_end=progress_callback,
49
  ).images
50
  return images[0]
@@ -59,10 +64,13 @@ def gpu(*args, **kwargs):
59
  def generate(
60
  model: str,
61
  prompt: str,
 
 
 
62
  progress=gr.Progress(),
63
  ):
64
  logger.info(
65
- f"Starting image generation: {dict(model=model, prompt=prompt)}"
66
  )
67
 
68
  # Cache the model files for the pipeline
@@ -72,7 +80,9 @@ def generate(
72
  load_pipeline(model)
73
  loaded_models.add(model)
74
 
75
- return gpu(model, prompt, progress)
 
 
76
 
77
 
78
  demo = gr.Interface(
@@ -82,8 +92,11 @@ demo = gr.Interface(
82
  label="Model", choices=models, value=DEFAULT_MODEL, allow_custom_value=True
83
  ),
84
  gr.Text(label="Prompt"),
 
 
 
85
  ],
86
  outputs=[gr.Image(label="Output")],
87
  )
88
 
89
- demo.launch()
 
15
 
16
  def load_pipeline(model):
17
  return DiffusionPipeline.from_pretrained(
18
+ model, torch_dtype=torch.float16, use_safetensors=True, variant="fp16"
 
 
 
19
  )
20
 
21
 
 
26
  def generate_image(
27
  model: str,
28
  prompt: str,
29
+ negative_prompt: str,
30
+ num_inference_steps: int,
31
+ guidance_scale: float,
32
  progress,
33
  ):
34
  logger.debug(f"Loading pipeline: {dict(model=model)}")
35
  pipe = load_pipeline(model).to("cuda")
36
 
37
+ logger.debug(
38
+ f"Generating image: {dict(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale)}"
39
+ )
40
 
41
  def progress_callback(pipe, step_index, timestep, callback_kwargs):
42
  logger.trace(
 
47
 
48
  images = pipe(
49
  prompt=prompt,
50
+ negative_prompt=negative_prompt,
51
+ num_inference_steps=num_inference_steps,
52
+ guidance_scale=guidance_scale,
53
  callback_on_step_end=progress_callback,
54
  ).images
55
  return images[0]
 
64
  def generate(
65
  model: str,
66
  prompt: str,
67
+ negative_prompt: str,
68
+ num_inference_steps: int,
69
+ guidance_scale: float,
70
  progress=gr.Progress(),
71
  ):
72
  logger.info(
73
+ f"Starting image generation: {dict(model=model, prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale)}"
74
  )
75
 
76
  # Cache the model files for the pipeline
 
80
  load_pipeline(model)
81
  loaded_models.add(model)
82
 
83
+ return gpu(
84
+ model, prompt, negative_prompt, num_inference_steps, guidance_scale, progress
85
+ )
86
 
87
 
88
  demo = gr.Interface(
 
92
  label="Model", choices=models, value=DEFAULT_MODEL, allow_custom_value=True
93
  ),
94
  gr.Text(label="Prompt"),
95
+ gr.Text(label="Ngeative Prompt"),
96
+ gr.Slider(label="Num Inference Steps", minimum=0, maximum=200, value=50),
97
+ gr.Slider(label="Guidance Scale", minimum=0.0, maximum=300.0, value=7.5),
98
  ],
99
  outputs=[gr.Image(label="Output")],
100
  )
101
 
102
+ demo.launch()