JOY-Huang fffiloni commited on
Commit
8de4d6e
0 Parent(s):

Duplicate from gradio-templates/text-to-image-gradio-template

Browse files

Co-authored-by: Sylvain Filoni <[email protected]>

Files changed (4) hide show
  1. .gitattributes +35 -0
  2. README.md +12 -0
  3. app.py +142 -0
  4. requirements.txt +6 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Text-to-Image Gradio Template
3
+ emoji: 🖼
4
+ colorFrom: purple
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 4.42.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ #import spaces #[uncomment to use ZeroGPU]
5
+ from diffusers import DiffusionPipeline
6
+ import torch
7
+
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+ model_repo_id = "stabilityai/sdxl-turbo" #Replace to the model you would like to use
10
+
11
+ if torch.cuda.is_available():
12
+ torch_dtype = torch.float16
13
+ else:
14
+ torch_dtype = torch.float32
15
+
16
+ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
17
+ pipe = pipe.to(device)
18
+
19
+ MAX_SEED = np.iinfo(np.int32).max
20
+ MAX_IMAGE_SIZE = 1024
21
+
22
+ #@spaces.GPU #[uncomment to use ZeroGPU]
23
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
24
+
25
+ if randomize_seed:
26
+ seed = random.randint(0, MAX_SEED)
27
+
28
+ generator = torch.Generator().manual_seed(seed)
29
+
30
+ image = pipe(
31
+ prompt = prompt,
32
+ negative_prompt = negative_prompt,
33
+ guidance_scale = guidance_scale,
34
+ num_inference_steps = num_inference_steps,
35
+ width = width,
36
+ height = height,
37
+ generator = generator
38
+ ).images[0]
39
+
40
+ return image, seed
41
+
42
+ examples = [
43
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
44
+ "An astronaut riding a green horse",
45
+ "A delicious ceviche cheesecake slice",
46
+ ]
47
+
48
+ css="""
49
+ #col-container {
50
+ margin: 0 auto;
51
+ max-width: 640px;
52
+ }
53
+ """
54
+
55
+ with gr.Blocks(css=css) as demo:
56
+
57
+ with gr.Column(elem_id="col-container"):
58
+ gr.Markdown(f"""
59
+ # Text-to-Image Gradio Template
60
+ """)
61
+
62
+ with gr.Row():
63
+
64
+ prompt = gr.Text(
65
+ label="Prompt",
66
+ show_label=False,
67
+ max_lines=1,
68
+ placeholder="Enter your prompt",
69
+ container=False,
70
+ )
71
+
72
+ run_button = gr.Button("Run", scale=0)
73
+
74
+ result = gr.Image(label="Result", show_label=False)
75
+
76
+ with gr.Accordion("Advanced Settings", open=False):
77
+
78
+ negative_prompt = gr.Text(
79
+ label="Negative prompt",
80
+ max_lines=1,
81
+ placeholder="Enter a negative prompt",
82
+ visible=False,
83
+ )
84
+
85
+ seed = gr.Slider(
86
+ label="Seed",
87
+ minimum=0,
88
+ maximum=MAX_SEED,
89
+ step=1,
90
+ value=0,
91
+ )
92
+
93
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
94
+
95
+ with gr.Row():
96
+
97
+ width = gr.Slider(
98
+ label="Width",
99
+ minimum=256,
100
+ maximum=MAX_IMAGE_SIZE,
101
+ step=32,
102
+ value=1024, #Replace with defaults that work for your model
103
+ )
104
+
105
+ height = gr.Slider(
106
+ label="Height",
107
+ minimum=256,
108
+ maximum=MAX_IMAGE_SIZE,
109
+ step=32,
110
+ value=1024, #Replace with defaults that work for your model
111
+ )
112
+
113
+ with gr.Row():
114
+
115
+ guidance_scale = gr.Slider(
116
+ label="Guidance scale",
117
+ minimum=0.0,
118
+ maximum=10.0,
119
+ step=0.1,
120
+ value=0.0, #Replace with defaults that work for your model
121
+ )
122
+
123
+ num_inference_steps = gr.Slider(
124
+ label="Number of inference steps",
125
+ minimum=1,
126
+ maximum=50,
127
+ step=1,
128
+ value=2, #Replace with defaults that work for your model
129
+ )
130
+
131
+ gr.Examples(
132
+ examples = examples,
133
+ inputs = [prompt]
134
+ )
135
+ gr.on(
136
+ triggers=[run_button.click, prompt.submit],
137
+ fn = infer,
138
+ inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
139
+ outputs = [result, seed]
140
+ )
141
+
142
+ demo.queue().launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ accelerate
2
+ diffusers
3
+ invisible_watermark
4
+ torch
5
+ transformers
6
+ xformers