saicharan1234 commited on
Commit
edb8520
1 Parent(s): 12f2adb

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +60 -108
main.py CHANGED
@@ -3,80 +3,75 @@ from fastapi import FastAPI, File, UploadFile, Form
3
  from fastapi.responses import StreamingResponse
4
  import torch
5
  from diffusers import StableDiffusionPipeline, StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler, DPMSolverSinglestepScheduler
6
- from diffusers.pipelines import StableDiffusionInpaintPipeline, StableDiffusionXLInpaintPipeline
7
  from huggingface_hub import hf_hub_download
8
  import numpy as np
9
  import random
10
  from PIL import Image
11
  import io
12
- import os
13
 
14
  app = FastAPI()
15
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
-
18
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
19
 
20
  # Load HF token from environment variable
21
  HF_TOKEN = os.getenv("HF_TOKEN")
22
 
23
- # Load pipelines
24
- pipe_xl_final = StableDiffusionXLPipeline.from_single_file(
25
- hf_hub_download(repo_id="fluently/Fluently-XL-Final", filename="FluentlyXL-Final.safetensors", token=HF_TOKEN),
26
- torch_dtype=torch.float16,
27
- use_safetensors=True,
28
- )
29
- pipe_xl_final.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_xl_final.scheduler.config)
30
- pipe_xl_final.to(device)
31
-
32
- pipe_anime = StableDiffusionPipeline.from_pretrained(
33
- "fluently/Fluently-anime",
34
- torch_dtype=torch.float16,
35
- use_safetensors=True,
36
- )
37
- pipe_anime.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_anime.scheduler.config)
38
- pipe_anime.to(device)
39
-
40
- pipe_epic = StableDiffusionPipeline.from_pretrained(
41
- "fluently/Fluently-epic",
42
- torch_dtype=torch.float16,
43
- use_safetensors=True,
44
- )
45
- pipe_epic.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_epic.scheduler.config)
46
- pipe_epic.to(device)
47
-
48
-
49
-
50
- pipe_inpaint = StableDiffusionInpaintPipeline.from_pretrained(
51
- "fluently/Fluently-v4-inpainting",
52
- torch_dtype=torch.float16,
53
- use_safetensors=True,
54
- )
55
- pipe_inpaint.to(device)
56
-
57
- pipe_xl = StableDiffusionXLPipeline.from_pretrained(
58
- "fluently/Fluently-XL-v4",
59
- torch_dtype=torch.float16,
60
- use_safetensors=True,
61
- )
62
- pipe_xl.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_xl.scheduler.config)
63
- pipe_xl.to(device)
64
-
65
- pipe_xl_lightning = StableDiffusionXLPipeline.from_pretrained(
66
- "fluently/Fluently-XL-v3-lightning",
67
- torch_dtype=torch.float16,
68
- use_safetensors=True,
69
- )
70
- pipe_xl_lightning.scheduler = DPMSolverSinglestepScheduler.from_config(pipe_xl_lightning.scheduler.config, use_karras_sigmas=False, timestep_spacing="trailing", lower_order_final=True)
71
- pipe_xl_lightning.to(device)
72
-
73
 
74
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
75
  if randomize_seed:
76
  seed = random.randint(0, MAX_SEED)
77
  return seed
78
 
79
-
80
  @app.post("/generate")
81
  async def generate(
82
  model: str = Form(...),
@@ -101,73 +96,31 @@ async def generate(
101
  inpaint_image_pil = Image.open(io.BytesIO(await inpaint_image.read())) if inpaint_image else None
102
  mask_image_pil = Image.open(io.BytesIO(await mask_image.read())) if mask_image else None
103
 
104
- if model == "Fluently XL Final":
105
- images = pipe_xl_final(
106
- prompt=prompt,
107
- negative_prompt=negative_prompt,
108
- width=width,
109
- height=height,
110
- guidance_scale=guidance_scale,
111
- num_inference_steps=25,
112
- num_images_per_prompt=1,
113
- output_type="pil",
114
- ).images
115
- elif model == "Fluently Anime":
116
- images = pipe_anime(
117
- prompt=prompt,
118
- negative_prompt=negative_prompt,
119
- width=width,
120
- height=height,
121
- guidance_scale=guidance_scale,
122
- num_inference_steps=30,
123
- num_images_per_prompt=1,
124
- output_type="pil",
125
- ).images
126
- elif model == "Fluently Epic":
127
- images = pipe_epic(
128
  prompt=prompt,
 
 
129
  negative_prompt=negative_prompt,
130
  width=width,
131
  height=height,
132
  guidance_scale=guidance_scale,
133
  num_inference_steps=30,
 
134
  num_images_per_prompt=1,
135
  output_type="pil",
136
  ).images
137
- elif model == "Fluently XL v4":
138
- images = pipe_xl(
139
- prompt=prompt,
140
- negative_prompt=negative_prompt,
141
- width=width,
142
- height=height,
143
- guidance_scale=guidance_scale,
144
- num_inference_steps=25,
145
- num_images_per_prompt=1,
146
- output_type="pil",
147
- ).images
148
- elif model == "Fluently XL v3 Lightning":
149
- images = pipe_xl_lightning(
150
- prompt=prompt,
151
- negative_prompt=negative_prompt,
152
- width=width,
153
- height=height,
154
- guidance_scale=2,
155
- num_inference_steps=5,
156
- num_images_per_prompt=1,
157
- output_type="pil",
158
- ).images
159
- elif model == "Fluently v4 inpaint" or model == "Fluently XL v3 inpaint":
160
- blurred_mask = pipe_inpaint.mask_processor.blur(mask_image_pil, blur_factor=blur_factor)
161
- images = pipe_inpaint(
162
  prompt=prompt,
163
- image=inpaint_image_pil,
164
- mask_image=blurred_mask,
165
  negative_prompt=negative_prompt,
166
  width=width,
167
  height=height,
168
  guidance_scale=guidance_scale,
169
- num_inference_steps=30,
170
- strength=strength,
171
  num_images_per_prompt=1,
172
  output_type="pil",
173
  ).images
@@ -179,7 +132,6 @@ async def generate(
179
 
180
  return StreamingResponse(img_byte_arr, media_type="image/png")
181
 
182
-
183
  if __name__ == "__main__":
184
  import uvicorn
185
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
3
  from fastapi.responses import StreamingResponse
4
  import torch
5
  from diffusers import StableDiffusionPipeline, StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler, DPMSolverSinglestepScheduler
6
+ from diffusers.pipelines import StableDiffusionInpaintPipeline
7
  from huggingface_hub import hf_hub_download
8
  import numpy as np
9
  import random
10
  from PIL import Image
11
  import io
 
12
 
13
  app = FastAPI()
14
 
15
  MAX_SEED = np.iinfo(np.int32).max
 
16
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
17
 
18
  # Load HF token from environment variable
19
  HF_TOKEN = os.getenv("HF_TOKEN")
20
 
21
+ # Function to load pipeline dynamically
22
+ def load_pipeline(model_name: str):
23
+ if model_name == "Fluently XL Final":
24
+ pipe = StableDiffusionXLPipeline.from_single_file(
25
+ hf_hub_download(repo_id="fluently/Fluently-XL-Final", filename="FluentlyXL-Final.safetensors", token=HF_TOKEN),
26
+ torch_dtype=torch.float16,
27
+ use_safetensors=True,
28
+ )
29
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
30
+ elif model_name == "Fluently Anime":
31
+ pipe = StableDiffusionPipeline.from_pretrained(
32
+ "fluently/Fluently-anime",
33
+ torch_dtype=torch.float16,
34
+ use_safetensors=True,
35
+ )
36
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
37
+ elif model_name == "Fluently Epic":
38
+ pipe = StableDiffusionPipeline.from_pretrained(
39
+ "fluently/Fluently-epic",
40
+ torch_dtype=torch.float16,
41
+ use_safetensors=True,
42
+ )
43
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
44
+ elif model_name == "Fluently XL v4":
45
+ pipe = StableDiffusionXLPipeline.from_pretrained(
46
+ "fluently/Fluently-XL-v4",
47
+ torch_dtype=torch.float16,
48
+ use_safetensors=True,
49
+ )
50
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
51
+ elif model_name == "Fluently XL v3 Lightning":
52
+ pipe = StableDiffusionXLPipeline.from_pretrained(
53
+ "fluently/Fluently-XL-v3-lightning",
54
+ torch_dtype=torch.float16,
55
+ use_safetensors=True,
56
+ )
57
+ pipe.scheduler = DPMSolverSinglestepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=False, timestep_spacing="trailing", lower_order_final=True)
58
+ elif model_name == "Fluently v4 inpaint":
59
+ pipe = StableDiffusionInpaintPipeline.from_pretrained(
60
+ "fluently/Fluently-v4-inpainting",
61
+ torch_dtype=torch.float16,
62
+ use_safetensors=True,
63
+ )
64
+ else:
65
+ raise ValueError(f"Unknown model: {model_name}")
66
+
67
+ pipe.to(device)
68
+ return pipe
 
 
69
 
70
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
71
  if randomize_seed:
72
  seed = random.randint(0, MAX_SEED)
73
  return seed
74
 
 
75
  @app.post("/generate")
76
  async def generate(
77
  model: str = Form(...),
 
96
  inpaint_image_pil = Image.open(io.BytesIO(await inpaint_image.read())) if inpaint_image else None
97
  mask_image_pil = Image.open(io.BytesIO(await mask_image.read())) if mask_image else None
98
 
99
+ pipe = load_pipeline(model)
100
+
101
+ if model in ["Fluently v4 inpaint"]:
102
+ blurred_mask = pipe.mask_processor.blur(mask_image_pil, blur_factor=blur_factor)
103
+ images = pipe(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  prompt=prompt,
105
+ image=inpaint_image_pil,
106
+ mask_image=blurred_mask,
107
  negative_prompt=negative_prompt,
108
  width=width,
109
  height=height,
110
  guidance_scale=guidance_scale,
111
  num_inference_steps=30,
112
+ strength=strength,
113
  num_images_per_prompt=1,
114
  output_type="pil",
115
  ).images
116
+ else:
117
+ images = pipe(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  prompt=prompt,
 
 
119
  negative_prompt=negative_prompt,
120
  width=width,
121
  height=height,
122
  guidance_scale=guidance_scale,
123
+ num_inference_steps=25 if model == "Fluently XL Final" else 30,
 
124
  num_images_per_prompt=1,
125
  output_type="pil",
126
  ).images
 
132
 
133
  return StreamingResponse(img_byte_arr, media_type="image/png")
134
 
 
135
  if __name__ == "__main__":
136
  import uvicorn
137
  uvicorn.run(app, host="0.0.0.0", port=7860)