lucianosb commited on
Commit
ab54b88
1 Parent(s): a5d42f0

update for Sintetico Models

Browse files
Files changed (1) hide show
  1. app.py +21 -62
app.py CHANGED
@@ -19,7 +19,7 @@ import stone
19
  import os
20
  import spaces
21
 
22
- access_token = os.getenv("AccessTokenSD3")
23
 
24
 
25
  from huggingface_hub import login
@@ -28,71 +28,34 @@ login(token = access_token)
28
 
29
  # Define model initialization functions
30
  def load_model(model_name):
31
- if model_name == "stabilityai/sdxl-turbo":
32
- pipeline = DiffusionPipeline.from_pretrained(
33
- model_name,
34
- torch_dtype=torch.float16,
35
- variant="fp16"
 
36
  ).to("cuda")
37
- elif model_name == "runwayml/stable-diffusion-v1-5":
38
- pipeline = StableDiffusionPipeline.from_pretrained(
39
- model_name,
40
- torch_dtype=torch.float16
41
- ).to("cuda")
42
- elif model_name == "ByteDance/SDXL-Lightning":
43
- base = "stabilityai/stable-diffusion-xl-base-1.0"
44
- ckpt = "sdxl_lightning_4step_unet.safetensors"
45
- unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
46
- unet.load_state_dict(load_file(hf_hub_download(model_name, ckpt), device="cuda"))
47
- pipeline = StableDiffusionXLPipeline.from_pretrained(
48
- base,
49
- unet=unet,
50
- torch_dtype=torch.float16,
51
- variant="fp16"
52
- ).to("cuda")
53
- pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config, timestep_spacing="trailing")
54
- elif model_name == "segmind/SSD-1B":
55
- pipeline = StableDiffusionXLPipeline.from_pretrained(
56
- model_name,
57
- torch_dtype=torch.float16,
58
- use_safetensors=True,
59
- variant="fp16"
60
- ).to("cuda")
61
- elif model_name == "stabilityai/stable-diffusion-3-medium-diffusers":
62
- pipeline = StableDiffusion3Pipeline.from_pretrained(
63
- model_name,
64
- torch_dtype=torch.float16
65
- ).to("cuda")
66
- elif model_name == "stabilityai/stable-diffusion-2":
67
- scheduler = EulerDiscreteScheduler.from_pretrained(model_name, subfolder="scheduler")
68
- pipeline = StableDiffusionPipeline.from_pretrained(
69
- model_name,
70
- scheduler=scheduler,
71
- torch_dtype=torch.float16
72
  ).to("cuda")
73
  else:
74
  raise ValueError("Unknown model name")
75
  return pipeline
76
 
77
  # Initialize the default model
78
- default_model = "stabilityai/stable-diffusion-3-medium-diffusers"
79
  pipeline_text2image = load_model(default_model)
80
 
81
  @spaces.GPU
82
  def getimgen(prompt, model_name):
83
- if model_name == "stabilityai/sdxl-turbo":
84
- return pipeline_text2image(prompt=prompt, guidance_scale=0.0, num_inference_steps=2).images[0]
85
- elif model_name == "runwayml/stable-diffusion-v1-5":
86
- return pipeline_text2image(prompt).images[0]
87
- elif model_name == "ByteDance/SDXL-Lightning":
88
- return pipeline_text2image(prompt, num_inference_steps=4, guidance_scale=0).images[0]
89
- elif model_name == "segmind/SSD-1B":
90
- neg_prompt = "ugly, blurry, poor quality"
91
- return pipeline_text2image(prompt=prompt, negative_prompt=neg_prompt).images[0]
92
- elif model_name == "stabilityai/stable-diffusion-3-medium-diffusers":
93
- return pipeline_text2image(prompt=prompt, negative_prompt="", num_inference_steps=28, guidance_scale=7.0).images[0]
94
- elif model_name == "stabilityai/stable-diffusion-2":
95
- return pipeline_text2image(prompt=prompt).images[0]
96
 
97
  blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
98
  blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large", torch_dtype=torch.float16).to("cuda")
@@ -181,16 +144,12 @@ This demo provides an insightful look into how current text-to-image models hand
181
  model_dropdown = gr.Dropdown(
182
  label="Choose a model",
183
  choices=[
184
- "stabilityai/stable-diffusion-3-medium-diffusers",
185
- "stabilityai/sdxl-turbo",
186
- "ByteDance/SDXL-Lightning",
187
- "stabilityai/stable-diffusion-2",
188
- "runwayml/stable-diffusion-v1-5",
189
- "segmind/SSD-1B"
190
  ],
191
  value=default_model
192
  )
193
- prompt = gr.Textbox(label="Enter the Prompt", value = "photo of a doctor in india, detailed, 8k, sharp, high quality, good lighting")
194
  gallery = gr.Gallery(
195
  label="Generated images",
196
  show_label=False,
 
19
  import os
20
  import spaces
21
 
22
+ # access_token = os.getenv("AccessTokenSD3")
23
 
24
 
25
  from huggingface_hub import login
 
28
 
29
  # Define model initialization functions
30
  def load_model(model_name):
31
+ if model_name == "sinteticoXL":
32
+ pipeline = StableDiffusionXLPipeline.from_single_file(
33
+ "https://huggingface.co/lucianosb/sinteticoXL-models/blob/main/sinteticoXL_v1dot2.safetensors",
34
+ torch_dtype=torch.float16,
35
+ variant="fp16",
36
+ use_safetensors=True,
37
  ).to("cuda")
38
+ elif model_name == "sinteticoXL_Prude":
39
+ pipeline = StableDiffusionXLPipeline.from_single_file(
40
+ "https://huggingface.co/lucianosb/sinteticoXL-models/blob/main/sinteticoXL_prude_v1dot2.safetensors",
41
+ torch_dtype=torch.float16,
42
+ variant="fp16",
43
+ use_safetensors=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  ).to("cuda")
45
  else:
46
  raise ValueError("Unknown model name")
47
  return pipeline
48
 
49
  # Initialize the default model
50
+ default_model = "sinteticoXL"
51
  pipeline_text2image = load_model(default_model)
52
 
53
  @spaces.GPU
54
  def getimgen(prompt, model_name):
55
+ if model_name == "sinteticoXL":
56
+ return pipeline_text2image(prompt=prompt, guidance_scale=6.0, num_inference_steps=20).images[0]
57
+ elif model_name == "sinteticoXL_Prude":
58
+ return pipeline_text2image(prompt=prompt, guidance_scale=6.0, num_inference_steps=20).images[0]
 
 
 
 
 
 
 
 
 
59
 
60
  blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
61
  blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large", torch_dtype=torch.float16).to("cuda")
 
144
  model_dropdown = gr.Dropdown(
145
  label="Choose a model",
146
  choices=[
147
+ "sinteticoXL",
148
+ "sinteticoXL_Prude"
 
 
 
 
149
  ],
150
  value=default_model
151
  )
152
+ prompt = gr.Textbox(label="Enter the Prompt", value = "photo of a beautiful Brazilian woman, high quality, good lighting")
153
  gallery = gr.Gallery(
154
  label="Generated images",
155
  show_label=False,