KingNish commited on
Commit
dd6d711
1 Parent(s): ad0113c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -36
app.py CHANGED
@@ -10,7 +10,18 @@ from diffusers import StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffus
10
  from custom_pipeline import CosStableDiffusionXLInstructPix2PixPipeline
11
  from huggingface_hub import hf_hub_download
12
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
13
 
 
 
14
 
15
  help_text = """
16
  To optimize image results:
@@ -56,28 +67,11 @@ if not torch.cuda.is_available():
56
 
57
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
58
 
59
-
60
- # Image Generator
61
- if torch.cuda.is_available():
62
- pipe = StableDiffusionXLPipeline.from_pretrained(
63
- "fluently/Fluently-XL-Final",
64
- torch_dtype=torch.float16,
65
- use_safetensors=True,
66
- )
67
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
68
- pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
69
- pipe.set_adapters("dalle")
70
-
71
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
72
- if randomize_seed:
73
- seed = random.randint(0, 999999)
74
- return seed
75
-
76
  # Generator
77
  @spaces.GPU(duration=30, queue=False)
78
  def king(type = "Image Generation",
79
  input_image = None,
80
- instruction: str = "Eiffel tower",
81
  steps: int = 8,
82
  randomize_seed: bool = False,
83
  seed: int = 25,
@@ -90,7 +84,8 @@ def king(type = "Image Generation",
90
  progress=gr.Progress(track_tqdm=True),
91
  ):
92
  if type=="Image Editing" :
93
- seed = int(randomize_seed_fn(seed, randomize_seed))
 
94
  text_cfg_scale = text_cfg_scale
95
  image_cfg_scale = image_cfg_scale
96
  input_image = input_image
@@ -103,23 +98,18 @@ def king(type = "Image Generation",
103
  num_inference_steps=steps, generator=generator).images[0]
104
  return seed, output_image
105
  else :
106
- pipe.to(device)
107
- seed = int(randomize_seed_fn(seed, randomize_seed))
108
- generator = torch.Generator().manual_seed(seed)
109
-
110
- options = {
111
- "prompt":instruction,
112
- "width":width,
113
- "height":height,
114
- "guidance_scale":guidance_scale,
115
- "num_inference_steps":steps,
116
- "generator":generator,
117
- "use_resolution_binning":use_resolution_binning,
118
- "output_type":"pil",
119
- }
120
-
121
- output_image = pipe(**options).images[0]
122
- return seed, output_image
123
 
124
  # Prompt classifier
125
  def response(instruction, input_image=None):
 
10
  from custom_pipeline import CosStableDiffusionXLInstructPix2PixPipeline
11
  from huggingface_hub import hf_hub_download
12
  from huggingface_hub import InferenceClient
13
+ import gradio as gr
14
+ import numpy as np
15
+ import random
16
+ import torch
17
+ from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler
18
+ import spaces
19
+
20
+ device = "cuda" if torch.cuda.is_available() else "cpu"
21
+ dtype = torch.float16
22
 
23
+ repo = "stabilityai/stable-diffusion-3-medium-diffusers"
24
+ pipe = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16).to(device)
25
 
26
  help_text = """
27
  To optimize image results:
 
67
 
68
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  # Generator
71
  @spaces.GPU(duration=30, queue=False)
72
  def king(type = "Image Generation",
73
  input_image = None,
74
+ instruction: str ,
75
  steps: int = 8,
76
  randomize_seed: bool = False,
77
  seed: int = 25,
 
84
  progress=gr.Progress(track_tqdm=True),
85
  ):
86
  if type=="Image Editing" :
87
+ if randomize_seed:
88
+ seed = random.randint(0, 99999)
89
  text_cfg_scale = text_cfg_scale
90
  image_cfg_scale = image_cfg_scale
91
  input_image = input_image
 
98
  num_inference_steps=steps, generator=generator).images[0]
99
  return seed, output_image
100
  else :
101
+ if randomize_seed:
102
+ seed = random.randint(0, 99999)
103
+ generator = torch.Generator().manual_seed(seed)
104
+ image = pipe(
105
+ prompt = prompt,
106
+ guidance_scale = guidance_scale,
107
+ num_inference_steps = steps,
108
+ width = width,
109
+ height = height,
110
+ generator = generator
111
+ ).images[0]
112
+ return seed, image
 
 
 
 
 
113
 
114
  # Prompt classifier
115
  def response(instruction, input_image=None):