Spaces:
Running
on
Zero
Running
on
Zero
AlekseyCalvin
commited on
Commit
•
327757d
1
Parent(s):
11dd587
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,9 @@ import os
|
|
3 |
import gradio as gr
|
4 |
import numpy as np
|
5 |
import json
|
|
|
|
|
|
|
6 |
import torch
|
7 |
import random
|
8 |
import time
|
@@ -27,11 +30,30 @@ with open('loras.json', 'r') as f:
|
|
27 |
|
28 |
# Initialize the base model with authentication and specify the device
|
29 |
# Initialize the base model with authentication and specify the device
|
30 |
-
pipe = DiffusionPipeline.from_pretrained("sayakpaul/FLUX.1-merged", torch_dtype=dtype, token=hf_token).to(device)
|
31 |
|
32 |
MAX_SEED = np.iinfo(np.int32).max
|
33 |
MAX_IMAGE_SIZE = 2048
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
class calculateDuration:
|
36 |
def __init__(self, activity_name=""):
|
37 |
self.activity_name = activity_name
|
@@ -48,7 +70,7 @@ class calculateDuration:
|
|
48 |
else:
|
49 |
print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
|
50 |
|
51 |
-
@spaces.GPU(duration=
|
52 |
def generate_images(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, num_images, progress):
|
53 |
generator = torch.Generator(device=device).manual_seed(seed)
|
54 |
images = []
|
|
|
3 |
import gradio as gr
|
4 |
import numpy as np
|
5 |
import json
|
6 |
+
from accelerate import dispatch_model, infer_auto_device_map
|
7 |
+
from accelerate.utils import get_balanced_memory
|
8 |
+
from torch.cuda.amp import autocast
|
9 |
import torch
|
10 |
import random
|
11 |
import time
|
|
|
30 |
|
31 |
# Initialize the base model with authentication and specify the device
|
32 |
# Initialize the base model with authentication and specify the device
|
33 |
+
pipe = DiffusionPipeline.from_pretrained("sayakpaul/FLUX.1-merged", torch_dtype=dtype, token=hf_token, trust_remote_code=True, device_map='auto').to(device)
|
34 |
|
35 |
MAX_SEED = np.iinfo(np.int32).max
|
36 |
MAX_IMAGE_SIZE = 2048
|
37 |
|
38 |
+
max_memory = get_balanced_memory(
|
39 |
+
model,
|
40 |
+
max_memory=None,
|
41 |
+
no_split_module_classes=["DecoderLayer", "Attention", "MLP", "LayerNorm", "Linear"],
|
42 |
+
dtype='float16',
|
43 |
+
low_zero=False,
|
44 |
+
)
|
45 |
+
|
46 |
+
device_map = infer_auto_device_map(
|
47 |
+
model,
|
48 |
+
max_memory=max_memory,
|
49 |
+
no_split_module_classes=["DecoderLayer", "Attention", "MLP", "LayerNorm", "Linear"],
|
50 |
+
dtype='float16'
|
51 |
+
)
|
52 |
+
|
53 |
+
model = dispatch_model(model, device_map=device_map)
|
54 |
+
|
55 |
+
device = ‘cuda:0’
|
56 |
+
|
57 |
class calculateDuration:
|
58 |
def __init__(self, activity_name=""):
|
59 |
self.activity_name = activity_name
|
|
|
70 |
else:
|
71 |
print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
|
72 |
|
73 |
+
@spaces.GPU(duration=200)
|
74 |
def generate_images(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, num_images, progress):
|
75 |
generator = torch.Generator(device=device).manual_seed(seed)
|
76 |
images = []
|