KingNish commited on
Commit
d50653b
1 Parent(s): 43225a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -14
app.py CHANGED
@@ -17,10 +17,35 @@ dtype = torch.float16
17
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
18
 
19
  repo = "fluently/Fluently-XL-Final"
20
- pipe = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
21
- pipe.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
22
- pipe.set_adapters("lora")
23
- pipe.to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  help_text = """
26
  To optimize image results:
@@ -88,14 +113,7 @@ def king(type ,
88
  if randomize_seed:
89
  seed = random.randint(0, 99999)
90
  generator = torch.Generator().manual_seed(seed)
91
- image = pipe(
92
- prompt = instruction,
93
- guidance_scale = 5,
94
- num_inference_steps = steps,
95
- width = width,
96
- height = height,
97
- generator = generator
98
- ).images[0]
99
  return seed, image
100
 
101
  client = InferenceClient()
@@ -169,6 +187,10 @@ with gr.Blocks(css=css) as demo:
169
  with gr.Row():
170
  input_image = gr.Image(label="Image", type="pil", interactive=True)
171
 
 
 
 
 
172
  with gr.Row():
173
  text_cfg_scale = gr.Number(value=7.3, step=0.1, label="Text CFG", interactive=True)
174
  image_cfg_scale = gr.Number(value=1.7, step=0.1,label="Image CFG", interactive=True)
@@ -191,9 +213,7 @@ with gr.Blocks(css=css) as demo:
191
  )
192
 
193
  gr.Markdown(help_text)
194
-
195
  instruction.change(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
196
-
197
  input_image.upload(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
198
 
199
  gr.on(triggers=[
@@ -209,6 +229,8 @@ with gr.Blocks(css=css) as demo:
209
  seed,
210
  text_cfg_scale,
211
  image_cfg_scale,
 
 
212
  ],
213
  outputs=[seed, input_image],
214
  )
 
17
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
18
 
19
  repo = "fluently/Fluently-XL-Final"
20
+
21
+ pipe_best = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
22
+ pipe_best.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle2")
23
+ pipe_best.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
24
+ pipe_best.set_adapters(["lora","0.5"], adapter_weights=[1.5, 0.7])
25
+ pipe_best.to("cuda")
26
+
27
+ pipe_ori = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
28
+ pipe_ori.load_lora_weights("RalFinger/origami-style-sdxl-lora", weight_name="ral-orgmi-sdxl.safetensors", adapter_name="origami")
29
+ pipe_ori.set_adapters(["origami"], adapter_weights=[2])
30
+ pipe_ori.to("cuda")
31
+
32
+ pipe_3D = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
33
+ pipe_3D.load_lora_weights("artificialguybr/3DRedmond-V1", weight_name="3DRedmond-3DRenderStyle-3DRenderAF.safetensors", adapter_name="dalle2")
34
+ pipe_3D.load_lora_weights("goofyai/3d_render_style_xl", weight_name="3d_render_style_xl.safetensors", adapter_name="dalle1")
35
+ pipe_3D.set_adapters(["dalle2","dalle1"], adapter_weights=[1.1, 0.8])
36
+ pipe_3D.to("cuda")
37
+
38
+ pipe_pixel = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
39
+ pipe_pixel.load_lora_weights("artificialguybr/PixelArtRedmond", weight_name="PixelArtRedmond-Lite64.safetensors", adapter_name="lora")
40
+ pipe_pixel.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
41
+ pipe_pixel.set_adapters(["lora", "pixel"], adapter_weights=[1.0, 1.2])
42
+ pipe_pixel.to("cuda")
43
+
44
+ pipe_logo = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
45
+ pipe_logo.load_lora_weights("artificialguybr/StickersRedmond", weight_name="StickersRedmond.safetensors", adapter_name="lora")
46
+ pipe_logo.load_lora_weights("artificialguybr/LogoRedmond-LogoLoraForSDXL", weight_name="LogoRedmond_LogoRedAF.safetensors", adapter_name="pixel")
47
+ pipe_logo.set_adapters(["lora", "pixel"], adapter_weights=[0.5, 1.2])
48
+ pipe_logo.to("cuda")
49
 
50
  help_text = """
51
  To optimize image results:
 
113
  if randomize_seed:
114
  seed = random.randint(0, 99999)
115
  generator = torch.Generator().manual_seed(seed)
116
+ image = pipe_best( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
 
 
 
 
 
 
 
117
  return seed, image
118
 
119
  client = InferenceClient()
 
187
  with gr.Row():
188
  input_image = gr.Image(label="Image", type="pil", interactive=True)
189
 
190
+ with gr.Row():
191
+ width = gr.Number(value=1024, step=16,label="Width", interactive=True)
192
+ height = gr.Number(value=1024, step=16,label="Height", interactive=True)
193
+
194
  with gr.Row():
195
  text_cfg_scale = gr.Number(value=7.3, step=0.1, label="Text CFG", interactive=True)
196
  image_cfg_scale = gr.Number(value=1.7, step=0.1,label="Image CFG", interactive=True)
 
213
  )
214
 
215
  gr.Markdown(help_text)
 
216
  instruction.change(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
 
217
  input_image.upload(fn=response, inputs=[instruction,input_image], outputs=type, queue=False)
218
 
219
  gr.on(triggers=[
 
229
  seed,
230
  text_cfg_scale,
231
  image_cfg_scale,
232
+ width,
233
+ height
234
  ],
235
  outputs=[seed, input_image],
236
  )