File size: 1,882 Bytes
94ee4bb
 
 
 
 
150f210
 
94ee4bb
 
 
5493baa
 
94ee4bb
 
 
 
 
 
 
5493baa
94ee4bb
 
97126d6
 
 
 
 
 
94ee4bb
 
c05b820
 
 
 
 
 
 
 
5493baa
c05b820
42e4838
94ee4bb
5493baa
94ee4bb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97126d6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
#import torch
#from torch import autocast // only for GPU

from PIL import Image
import numpy as np
#from io import BytesIO
import os
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')

#from diffusers import StableDiffusionPipeline
from diffusers import StableDiffusionImg2ImgPipeline

print("hello sylvain")

YOUR_TOKEN=MY_SECRET_TOKEN

device="cpu"

pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=YOUR_TOKEN)
pipe.to(device)

source_img = gr.Image(image_mode="RGB",
        source="upload",
        type="file",
        shape=None,
        invert_colors=False)

gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")

def resize(width,img):
  basewidth = width
  img = Image.open(img)
  wpercent = (basewidth/float(img.size[0]))
  hsize = int((float(img.size[1])*float(wpercent)))
  img = img.resize((basewidth,hsize), Image.ANTIALIAS)
  return img

def infer(prompt, init_image): 
    init_image = resize(512,init_image)
    init_image = init_image.save("init_image.png")
    #image = pipe(prompt, init_image=init_image)["sample"][0]
    images_list = pipe([prompt] * 2, init_image=init_image, strength=0.75)
    images = []
    safe_image = Image.open(r"unsafe.png")
    for i, image in enumerate(images_list["sample"]):
        if(images_list["nsfw_content_detected"][i]):
            images.append(safe_image)
        else:
            images.append(image)
    
    return images

print("Great sylvain ! Everything is working fine !")

title="Stable Diffusion CPU"
description="Stable Diffusion example using CPU and HF token. <br />Warning: Slow process... ~5/10 min inference time. <b>NSFW filter enabled.</b>" 

gr.Interface(fn=infer, inputs=["text", source_img], outputs=gallery,title=title,description=description).launch(enable_queue=True)