Ricercar commited on
Commit
5436b58
1 Parent(s): f32c8cf
Files changed (1) hide show
  1. app.py +265 -0
app.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ import numpy as np
5
+ import torch
6
+ import torchvision.transforms as T
7
+
8
+ from clip_interrogator import Config, Interrogator
9
+
10
+ from ditail import DitailDemo, seed_everything
11
+
12
+ BASE_MODEL = {
13
+ 'sd1.5': 'runwayml/stable-diffusion-v1-5',
14
+ # 'sd1.5': './ditail/model/stable-diffusion-v1-5'
15
+ 'realistic vision': 'stablediffusionapi/realistic-vision-v51',
16
+ 'pastel mix (anime)': 'stablediffusionapi/pastel-mix-stylized-anime',
17
+ 'chaos (abstract)': 'MAPS-research/Chaos3.0',
18
+ }
19
+
20
+ # LoRA trigger words
21
+ LORA_TRIGGER_WORD = {
22
+ 'none': [],
23
+ 'film': ['film overlay', 'film grain'],
24
+ 'snow': ['snow'],
25
+ 'flat': ['sdh', 'flat illustration'],
26
+ 'minecraft': ['minecraft square style', 'cg, computer graphics'],
27
+ 'animeoutline': ['lineart', 'monochrome'],
28
+ # 'caravaggio': ['oil painting', 'in the style of caravaggio'],
29
+ 'impressionism': ['impressionist', 'in the style of Monet'],
30
+ 'pop': ['POP ART'],
31
+ 'shinkai_makoto': ['shinkai makoto', 'kimi no na wa.', 'tenki no ko', 'kotonoha no niwa'],
32
+ }
33
+
34
+
35
+ class WebApp():
36
+ def __init__(self, debug_mode=False):
37
+ self.args_base = {
38
+ "seed": 42,
39
+ "device": "cuda",
40
+ "output_dir": "output_demo",
41
+ "caption_model_name": "blip-large",
42
+ "clip_model_name": "ViT-L-14/openai",
43
+ "inv_model": "stablediffusionapi/realistic-vision-v51",
44
+ "spl_model": "runwayml/stable-diffusion-v1-5",
45
+ "inv_steps": 50,
46
+ "spl_steps": 50,
47
+ "img": None,
48
+ "pos_prompt": '',
49
+ "neg_prompt": 'worst quality, blurry, NSFW',
50
+ "alpha": 3.0,
51
+ "beta": 0.5,
52
+ "omega": 15,
53
+ "mask": None,
54
+ "lora": "none",
55
+ "lora_dir": "./ditail/lora",
56
+ "lora_scale": 0.7,
57
+ "no_injection": False,
58
+ }
59
+
60
+ self.args_input = {} # for gr.components only
61
+ self.gr_loras = list(LORA_TRIGGER_WORD.keys())
62
+
63
+ self.gtag = os.environ.get('GTag')
64
+
65
+ self.ga_script = f"""
66
+ <script async src="https://www.googletagmanager.com/gtag/js?id={self.gtag}"></script>
67
+ """
68
+ self.ga_load = f"""
69
+ function() {{
70
+ window.dataLayer = window.dataLayer || [];
71
+ function gtag(){{dataLayer.push(arguments);}}
72
+ gtag('js', new Date());
73
+
74
+ gtag('config', '{self.gtag}');
75
+ }}
76
+ """
77
+
78
+ self.debug_mode = debug_mode # turn off clip interrogator when debugging for faster building speed
79
+ if not self.debug_mode:
80
+ self.init_interrogator()
81
+
82
+
83
+ def init_interrogator(self):
84
+ # init clip interrogator
85
+ config = Config()
86
+ config.clip_model_name = self.args_base['clip_model_name']
87
+ config.caption_model_name = self.args_base['caption_model_name']
88
+ self.ci = Interrogator(config)
89
+ self.ci.config.chunk_size = 2048 if self.ci.config.clip_model_name == "ViT-L-14/openai" else 1024
90
+ self.ci.config.flavor_intermediate_count = 2048 if self.ci.config.clip_model_name == "ViT-L-14/openai" else 1024
91
+
92
+ def title(self):
93
+ gr.HTML(
94
+ """
95
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
96
+ <div>
97
+ <h1 >Diffusion Cocktail 🍸: Fused Generation from Diffusion Models</h1>
98
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; margin: 20px; gap: 10px;>
99
+ <a class="flex-item" href="https://arxiv.org/abs/your-arxiv-id" target="_blank">
100
+ <img src="https://img.shields.io/badge/arXiv-paper-darkred.svg" alt="arXiv Paper">
101
+ </a>
102
+ <a class="flex-item" href="https://MAPS-research.github.io/Ditail" target="_blank">
103
+ <img src="https://img.shields.io/badge/Project_Page-Diffusion_Cocktail-yellow.svg" alt="Project Page">
104
+ </a>
105
+ <a class="flex-item" href="https://github.com/MAPS-research/Ditail" target="_blank">
106
+ <img src="https://img.shields.io/badge/Github-Code-green.svg" alt="GitHub Code">
107
+ </a>
108
+ </div>
109
+ </div>
110
+ </div>
111
+ """
112
+ )
113
+
114
+
115
+ def get_image(self):
116
+ self.args_input['img'] = gr.Image(label='content image', type='pil', show_share_button=False, elem_classes="input_image")
117
+
118
+ def get_prompts(self):
119
+ # with gr.Row():
120
+ generate_prompt = gr.Checkbox(label='generate prompt with clip', value=True)
121
+ self.args_input['pos_prompt'] = gr.Textbox(label='prompt')
122
+
123
+
124
+ # event listeners
125
+ self.args_input['img'].upload(self._interrogate_image, inputs=[self.args_input['img'], generate_prompt], outputs=[self.args_input['pos_prompt']])
126
+ generate_prompt.change(self._interrogate_image, inputs=[self.args_input['img'], generate_prompt], outputs=[self.args_input['pos_prompt']])
127
+
128
+
129
+ def _interrogate_image(self, image, generate_prompt):
130
+ # self.init_interrogator()
131
+ if hasattr(self, 'ci') and generate_prompt:
132
+ return self.ci.interrogate_fast(image).split(',')[0].replace('arafed', '')
133
+ else:
134
+ return ''
135
+
136
+
137
+ def get_base_model(self):
138
+ self.args_input['spl_model'] = gr.Radio(choices=list(BASE_MODEL.keys()), value=list(BASE_MODEL.keys())[0], label='target base model')
139
+
140
+ def get_lora(self, num_cols=3):
141
+ self.args_input['lora'] = gr.State('none')
142
+ lora_gallery = gr.Gallery(label='target LoRA (optional)', columns=num_cols, value=[(os.path.join(self.args_base['lora_dir'], f"{lora}.jpeg"), lora) for lora in self.gr_loras], allow_preview=False, show_share_button=False, selected_index=0)
143
+ lora_gallery.select(self._update_lora_selection, inputs=[], outputs=[self.args_input['lora']])
144
+
145
+ def _update_lora_selection(self, selected_state: gr.SelectData):
146
+ return self.gr_loras[selected_state.index]
147
+
148
+ def get_params(self):
149
+ with gr.Row():
150
+ with gr.Column():
151
+ self.args_input['inv_model'] = gr.Radio(choices=list(BASE_MODEL.keys()), value=list(BASE_MODEL.keys())[1], label='inversion base model')
152
+ self.args_input['neg_prompt'] = gr.Textbox(label='negative prompt', value=self.args_base['neg_prompt'])
153
+ # with gr.Row():
154
+ self.args_input['alpha'] = gr.Number(label='positive prompt scaling weight (alpha)', value=self.args_base['alpha'], interactive=True)
155
+ self.args_input['beta'] = gr.Number(label='negative prompt scaling weight (beta)', value=self.args_base['beta'], interactive=True)
156
+
157
+ with gr.Column():
158
+ self.args_input['omega'] = gr.Slider(label='cfg', value=self.args_base['omega'], maximum=25, interactive=True)
159
+
160
+ self.args_input['inv_steps'] = gr.Slider(minimum=1, maximum=100, label='edit steps', interactive=True, value=self.args_base['inv_steps'], step=1)
161
+ self.args_input['spl_steps'] = gr.Slider(minimum=1, maximum=100, label='sample steps', interactive=False, value=self.args_base['spl_steps'], step=1, visible=False)
162
+ # sync inv_steps with spl_steps
163
+ self.args_input['inv_steps'].change(lambda x: x, inputs=self.args_input['inv_steps'], outputs=self.args_input['spl_steps'])
164
+
165
+ self.args_input['lora_scale'] = gr.Slider(minimum=0, maximum=1, label='LoRA scale', value=0.7)
166
+ self.args_input['seed'] = gr.Number(label='seed', value=self.args_base['seed'], interactive=True, precision=0, step=1)
167
+
168
+ def run_ditail(self, *values):
169
+ self.args = self.args_base.copy()
170
+ print(self.args_input.keys())
171
+ for k, v in zip(list(self.args_input.keys()), values):
172
+ self.args[k] = v
173
+ # quick fix for example
174
+ self.args['lora'] = 'none' if not isinstance(self.args['lora'], str) else self.args['lora']
175
+ print('selected lora: ', self.args['lora'])
176
+ # map inversion model to url
177
+ self.args['pos_prompt'] = ', '.join(LORA_TRIGGER_WORD.get(self.args['lora'], [])+[self.args['pos_prompt']])
178
+ self.args['inv_model'] = BASE_MODEL[self.args['inv_model']]
179
+ self.args['spl_model'] = BASE_MODEL[self.args['spl_model']]
180
+ print('selected model: ', self.args['inv_model'], self.args['spl_model'])
181
+
182
+ seed_everything(self.args['seed'])
183
+ ditail = DitailDemo(self.args)
184
+
185
+ metadata_to_show = ['inv_model', 'spl_model', 'lora', 'lora_scale', 'inv_steps', 'spl_steps', 'pos_prompt', 'alpha', 'neg_prompt', 'beta', 'omega']
186
+ self.args_to_show = {}
187
+ for key in metadata_to_show:
188
+ self.args_to_show[key] = self.args[key ]
189
+
190
+ return ditail.run_ditail(), self.args_to_show
191
+ # return self.args['img'], self.args
192
+
193
+ def run_example(self, img, prompt, inv_model, spl_model, lora):
194
+ return self.run_ditail(img, prompt, spl_model, gr.State(lora), inv_model)
195
+
196
+ def show_credits(self):
197
+ # gr.Markdown(
198
+ # """
199
+ # ### About Diffusion Cocktail (Ditail)
200
+ # * This is a research project by [MAPS Lab](https://whongyi.github.io/MAPS-research), [NYU Shanghai](https://shanghai.nyu.edu)
201
+ # * Authors: Haoming Liu ([email protected]), Yuanhe Guo ([email protected]), Hongyi Wen ([email protected])
202
+ # """
203
+ # )
204
+ gr.Markdown(
205
+ """
206
+ ### Model Credits
207
+ * Diffusion Models are downloaded from [huggingface](https://huggingface.co) and [civitai](https://civitai.com): [stable diffusion 1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5), [realistic vision](https://huggingface.co/stablediffusionapi/realistic-vision-v51), [pastel mix](https://huggingface.co/stablediffusionapi/pastel-mix-stylized-anime), [chaos3.0](https://civitai.com/models/91534/chaos30)
208
+ * LoRA Models are downloaded from [civitai](https://civitai.com) and [liblib](https://www.liblib.art): [film](https://civitai.com/models/90393/japan-vibes-film-color), [snow](https://www.liblib.art/modelinfo/f732b23b02f041bdb7f8f3f8a256ca8b), [flat](https://www.liblib.art/modelinfo/76dcb8b59d814960b0244849f2747a15), [minecraft](https://civitai.com/models/113741/minecraft-square-style), [animeoutline](https://civitai.com/models/16014/anime-lineart-manga-like-style), [impressionism](https://civitai.com/models/113383/y5-impressionism-style), [pop](https://civitai.com/models/161450?modelVersionId=188417), [shinkai_makoto](https://civitai.com/models/10626?modelVersionId=12610)
209
+ """
210
+ )
211
+
212
+
213
+ def ui(self):
214
+ with gr.Blocks(css='.input_image img {object-fit: contain;}', head=self.ga_script) as demo:
215
+ self.title()
216
+ with gr.Row():
217
+ # with gr.Column():
218
+ self.get_image()
219
+
220
+ with gr.Column():
221
+ self.get_prompts()
222
+ self.get_base_model()
223
+ self.get_lora(num_cols=3)
224
+ submit_btn = gr.Button("Generate", variant='primary')
225
+
226
+ with gr.Accordion("advanced options", open=False):
227
+ self.get_params()
228
+
229
+ with gr.Row():
230
+ output_image = gr.Image(label="output image")
231
+ # expected_output_image = gr.Image(label="expected output image", visible=False)
232
+ metadata = gr.JSON(label='metadata')
233
+
234
+ submit_btn.click(self.run_ditail,
235
+ inputs=list(self.args_input.values()),
236
+ outputs=[output_image, metadata],
237
+ scroll_to_output=True,
238
+ )
239
+
240
+ with gr.Row():
241
+ cache_examples = not self.debug_mode
242
+ gr.Examples(
243
+ examples=[[os.path.join(os.path.dirname(__file__), "example", "Lenna.png"), 'a woman called Lenna wearing a feathered hat', list(BASE_MODEL.keys())[1], list(BASE_MODEL.keys())[2], 'none']],
244
+ inputs=[self.args_input['img'], self.args_input['pos_prompt'], self.args_input['inv_model'], self.args_input['spl_model'], gr.Textbox(label='LoRA', visible=False), ],
245
+ fn = self.run_example,
246
+ outputs=[output_image, metadata],
247
+ run_on_click=True,
248
+ cache_examples=cache_examples,
249
+ )
250
+
251
+ self.show_credits()
252
+
253
+ demo.load(None, js=self.ga_load)
254
+ return demo
255
+
256
+
257
+ app = WebApp(debug_mode=False)
258
+ demo = app.ui()
259
+
260
+
261
+ if __name__ == "__main__":
262
+ demo.launch(share=True)
263
+ # demo.launch()
264
+
265
+