File size: 13,484 Bytes
5e70012
431f639
5e70012
c14628b
a6ac31b
 
7f16cc8
c14628b
899c80f
5e70012
 
 
c14628b
a6ac31b
 
6303ab5
a6ac31b
98286a3
c14628b
a6ac31b
 
6303ab5
5e70012
 
 
 
 
f9acf8f
a6ac31b
431f639
 
 
 
a6ac31b
 
 
 
 
 
 
 
 
624e415
a6ac31b
5e70012
bfda42f
47f4d74
bfda42f
624e415
899c80f
 
431f639
8c44916
624e415
899c80f
 
431f639
8c44916
6303ab5
 
431f639
 
 
 
 
 
 
6303ab5
624e415
899c80f
 
431f639
8c44916
73627b7
 
 
a6ac31b
263cee5
431f639
263cee5
431f639
a6ac31b
 
47f4d74
5e70012
bfda42f
1db878b
 
 
 
 
 
 
5e70012
 
 
 
47f4d74
 
 
 
5e70012
f26498b
 
 
 
 
 
 
de423d8
6d4d808
eb74a82
 
5e70012
a6c0409
7507930
42a9377
 
ee99142
5e70012
 
7d1ffa7
5434e08
5e70012
 
 
 
bfda42f
5434e08
5e70012
cd534b2
5e70012
 
bfda42f
cd534b2
5e70012
 
 
431f639
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a6ac31b
 
 
431f639
a6ac31b
 
431f639
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a6ac31b
 
 
 
 
 
5e70012
 
a6ac31b
431f639
a6ac31b
 
 
 
 
 
 
 
 
 
899c80f
a6ac31b
 
7dfd19e
a6ac31b
5e70012
899c80f
2db0441
 
578fb43
5e70012
 
578fb43
5e70012
8f1b415
09dd938
a6ac31b
 
 
 
 
 
 
 
bfda42f
1db878b
7893837
 
 
3a9ba34
a6ac31b
 
899c80f
a6ac31b
 
 
 
 
 
5e70012
361d7a3
431f639
361d7a3
899c80f
361d7a3
a6ac31b
 
5e70012
a6ac31b
 
 
 
 
 
 
5e70012
624e415
a6ac31b
 
 
844032f
a6ac31b
 
9992492
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
# Thank AK. https://huggingface.co/spaces/akhaliq/cool-japan-diffusion-2-1-0/blob/main/app.py
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, EulerAncestralDiscreteScheduler,StableDiffusionLatentUpscalePipeline
from transformers import CLIPFeatureExtractor
import gradio as gr
import torch
from PIL import Image
import random

model_id = 'aipicasso/cool-japan-diffusion-2-1-2-beta'

scheduler = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
feature_extractor = CLIPFeatureExtractor.from_pretrained(model_id)

pipe = StableDiffusionPipeline.from_pretrained(
  model_id,
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
  scheduler=scheduler)
pipe.enable_xformers_memory_efficient_attention()

pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
  model_id,
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
  scheduler=scheduler,
  requires_safety_checker=False,
  safety_checker=None,
  feature_extractor=feature_extractor
)
pipe_i2i.enable_xformers_memory_efficient_attention()

upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16)
upscaler.enable_xformers_memory_efficient_attention()
upscaler.to("cuda")

if torch.cuda.is_available():
  pipe = pipe.to("cuda")
  pipe_i2i = pipe_i2i.to("cuda")

def error_str(error, title="Error"):
    return f"""#### {title}
            {error}"""  if error else ""


def inference(prompt, guidance, steps, image_size="Square", seed=0, img=None, strength=0.5, neg_prompt="", cool_japan_type="Anime", disable_auto_prompt_correction=False):

  generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None

  prompt,neg_prompt=auto_prompt_correction(prompt,neg_prompt,cool_japan_type,disable_auto_prompt_correction)

  if(image_size=="Portrait"):
      height=1024
      width=768
      superreso=False
      #pipe.enable_attention_slicing()
  elif(image_size=="Landscape"):
      height=768
      width=1024  
      superreso=False
      #pipe.enable_attention_slicing()
  elif(image_size=="Highreso."):
      height=1024
      width=1024
      superreso=False
      #pipe.enable_attention_slicing()
  elif(image_size=="Superreso."):
      height=1024
      width=1024
      superreso=True
      #pipe.enable_attention_slicing()
  else:
      height=768
      width=768
      superreso=False
      #pipe.enable_attention_slicing()

  print(prompt,neg_prompt)
    
  try:
    if img is not None:
      return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator,superreso), None
    else:
      return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator,superreso), None
  except Exception as e:
    return None, error_str(e)
def auto_prompt_correction(prompt_ui,neg_prompt_ui,cool_japan_type_ui,disable_auto_prompt_correction):
    # auto prompt correction
    cool_japan_type=str(cool_japan_type_ui)
    if(cool_japan_type=="Manga"):
        cool_japan_type="manga, monochrome, white and black manga"
    elif(cool_japan_type=="Game"):
        cool_japan_type="game"
    else:
        cool_japan_type="anime"
        
    prompt=str(prompt_ui)
    neg_prompt=str(neg_prompt_ui)
    prompt=prompt.lower()
    neg_prompt=neg_prompt.lower()
    if(disable_auto_prompt_correction):
        prompt=f"{cool_japan_type}, {prompt}"
        return prompt, neg_prompt

    if(prompt=="" and neg_prompt==""):
        #prefix=["masterpiece","evangelion, mika pikazo", "konosuba, mika pikazo","steins; gate, ilya kuvshinov",
        #        "ghibli, shinkai makoto", "evangelion, madoka magica"]
        #suffix=["","ayanami rei, asuka langrey", "megumin, aqua from konosuba","mayuri shiina from steins gate, kurisu makise steins gate anime",
        #        "hakurei reimu","kirisame marisa", "kaname madoka, megumin"]
        #prefix_index=random.randrange(len(prefix))
        #suffix_index=random.randrange(len(suffix))
        #prompt=f"{cool_japan_type}, {prefix[prefix_index]}, portrait, a good girl, {suffix[suffix_index]}, good pupil, 4k, detailed"
        prompt=f"{cool_japan_type}, masterpiece, upper body, a girl, good pupil, 4k, detailed"
        neg_prompt=f"(((deformed))), blurry, ((((bad anatomy)))), bad pupil, disfigured, poorly drawn face, mutation, mutated, (extra limb), (ugly), (poorly drawn hands), bad hands, fused fingers, messy drawing, broken legs censor, low quality, (mutated hands and fingers:1.5), (long body :1.3), (mutation, poorly drawn :1.2), ((bad eyes)), ui, error, missing fingers, fused fingers, one hand with more than 5 fingers, one hand with less than 5 fingers, one hand with more than 5 digit, one hand with less than 5 digit, extra digit, fewer digits, fused digit, missing digit, bad digit, liquid digit, long body, uncoordinated body, unnatural body, lowres, jpeg artifacts, 3d, cg, text"
        return prompt, neg_prompt
        
    splited_prompt=prompt.replace(","," ").replace("_"," ").split(" ")
    splited_prompt=["a person" if p=="solo" else p for p in splited_prompt]
    splited_prompt=["girl" if p=="1girl" else p for p in splited_prompt]
    splited_prompt=["a couple of girls" if p=="2girls" else p for p in splited_prompt]
    splited_prompt=["a couple of boys" if p=="2boys" else p for p in splited_prompt]
    human_words=["girl","maid","maids","female","woman","girls","a couple of girls","women","boy","boys","a couple of boys","male","man","men","guy","guys"]
    for word in human_words:
        if( word in splited_prompt):
            prompt=f"{cool_japan_type}, masterpiece, {prompt}, good pupil, 4k, detailed"
            neg_prompt=f"(((deformed))), blurry, ((((bad anatomy)))), {neg_prompt}, bad pupil, disfigured, poorly drawn face, mutation, mutated, (extra limb), (ugly), (poorly drawn hands), bad hands, fused fingers, messy drawing, broken legs censor, low quality, (mutated hands and fingers:1.5), (long body :1.3), (mutation, poorly drawn :1.2), ((bad eyes)), ui, error, missing fingers, fused fingers, one hand with more than 5 fingers, one hand with less than 5 fingers, one hand with more than 5 digit, one hand with less than 5 digit, extra digit, fewer digits, fused digit, missing digit, bad digit, liquid digit, long body, uncoordinated body, unnatural body, lowres, jpeg artifacts, 3d, cg, text"

    animal_words=["cat","dog","bird"]
    for word in animal_words:
        if( word in splited_prompt):
            prompt=f"{cool_japan_type}, a {word}, 4k, detailed"
            neg_prompt=f"(((deformed))), blurry, ((((bad anatomy)))), {neg_prompt}, bad pupil, disfigured, poorly drawn face, mutation, mutated, (extra limb), (ugly), (poorly drawn hands), bad hands, fused fingers, messy drawing, broken legs censor, low quality, (mutated hands and fingers:1.5), (long body :1.3), (mutation, poorly drawn :1.2), ((bad eyes)), ui, error, missing fingers, fused fingers, one hand with more than 5 fingers, one hand with less than 5 fingers, one hand with more than 5 digit, one hand with less than 5 digit, extra digit, fewer digits, fused digit, missing digit, bad digit, liquid digit, long body, uncoordinated body, unnatural body, lowres, jpeg artifacts, 3d, cg, text"

    background_words=["mount fuji","mt. fuji","building", "buildings", "tokyo", "kyoto", "nara", "shibuya", "shinjuku"]
    for word in background_words:
        if( word in splited_prompt):
            prompt=f"{cool_japan_type}, shinkai makoto, {word}, 4k, 8k, highly detailed"
            neg_prompt=f"(((deformed))), {neg_prompt}, girl, boy, photo, people, low quality, ui, error, lowres, jpeg artifacts, 2d, 3d, cg, text"

    return prompt,neg_prompt
    
def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator,superreso=False):
    if(superreso):
      low_res_latents = pipe(
        prompt,
        negative_prompt = neg_prompt,
        num_inference_steps = int(steps),
        guidance_scale = guidance,
        width = width,
        height = height,
        output_type="latent",
        generator = generator)
      low_res_latents = pipeline(prompt, generator=generator, output_type="latent").images

      result = upscaler(
        prompt=prompt,
        negative_prompt = neg_prompt,
        image=low_res_latents,
        num_inference_steps=20,
        guidance_scale=guidance,
        generator=generator,
      )
    else:  
      result = pipe(
        prompt,
        negative_prompt = neg_prompt,
        num_inference_steps = int(steps),
        guidance_scale = guidance,
        width = width,
        height = height,
        generator = generator)
    
    return result.images[0]

def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator,superreso=False):
    ratio = min(height / img.height, width / img.width)
    img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
    if(superreso):
      low_res_latents = pipe_i2i(
        prompt,
        negative_prompt = neg_prompt,
        init_image = img,
        num_inference_steps = int(steps),
        strength = strength,
        guidance_scale = guidance,
        #width = width,
        #height = height,
        output_type="latent",
        generator = generator)

      result = upscaler(
        prompt=prompt,
        negative_prompt = neg_prompt,
        image=low_res_latents,
        num_inference_steps=20,
        guidance_scale=guidance,
        generator=generator,
      )
    else:  
      result = pipe_i2i(
        prompt,
        negative_prompt = neg_prompt,
        init_image = img,
        num_inference_steps = int(steps),
        strength = strength,
        guidance_scale = guidance,
        #width = width,
        #height = height,
        generator = generator)

        
    return result.images[0]

css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
"""
with gr.Blocks(css=css) as demo:
    gr.HTML(
        f"""
            <div class="main-div">
              <div>
                <h1>Cool Japan Diffusion 2.1.2 Beta</h1>
              </div>
              <p>
               Demo for <a href="https://huggingface.co/aipicasso/cool-japan-diffusion-2-1-2-beta">Cool Japan Diffusion 2.1.2 Beta</a> .<br>
              </p>
              <p>
              sample : Click "Generate" button without any prompts.
              </p>
              <p>
              sample prompt1 : girl, kimono
              </p>
              <p>
              sample prompt2 : boy, school uniform
              </p>
              Running on {"<b>GPU 🔥</b>" if torch.cuda.is_available() else f"<b>CPU 🥶</b>. For faster inference it is recommended to <b>upgrade to GPU in <a href='https://huggingface.co/spaces/akhaliq/cool-japan-diffusion-2-1-0/settings'>Settings</a></b>"} <br>
              <a style="display:inline-block" href="https://huggingface.co/spaces/aipicasso/cool-japan-diffusion-latest-demo?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> to say goodbye from waiting for the generating.
            </div>
        """
    )
    with gr.Row():
        
        with gr.Column(scale=55):
          with gr.Group():
              with gr.Row():
                cool_japan_type=gr.Radio(["Anime", "Manga", "Game"])
                cool_japan_type.show_label=False
                cool_japan_type.value="Anime"
                
              with gr.Row():
                prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="[your prompt]").style(container=False)
                generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))

              image_out = gr.Image(height=768,width=768)
          error_output = gr.Markdown()

        with gr.Column(scale=45):
          with gr.Tab("Options"):
            with gr.Group():
              neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
              disable_auto_prompt_correction = gr.Checkbox(label="Disable auto prompt corretion.")
              with gr.Row():
                image_size=gr.Radio(["Portrait","Landscape","Square","Highreso.","Superreso."])
                image_size.show_label=False
                image_size.value="Square"
                
              with gr.Row():
                guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
                steps = gr.Slider(label="Steps", value=20, minimum=2, maximum=75, step=1)

              seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)

          with gr.Tab("Image to image"):
              with gr.Group():
                image = gr.Image(label="Image", height=256, tool="editor", type="pil")
                strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
                  
    inputs = [prompt, guidance, steps, image_size, seed, image, strength, neg_prompt, cool_japan_type, disable_auto_prompt_correction]

    outputs = [image_out, error_output]
    prompt.submit(inference, inputs=inputs, outputs=outputs)
    generate.click(inference, inputs=inputs, outputs=outputs,api_name="generate")

demo.queue(concurrency_count=1)
demo.launch()