nicolaleo commited on
Commit
e7a5dcc
1 Parent(s): 8a54bce

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ #import IPython.display
4
+ from PIL import Image
5
+ import base64
6
+ from dotenv import load_dotenv, find_dotenv
7
+ _ = load_dotenv(find_dotenv()) # read local .env file
8
+ hf_api_key = os.environ['HF_API_KEY']
9
+
10
+ # Helper function
11
+ import requests, json
12
+ import gradio as gr
13
+
14
+ #A helper function to convert the PIL image to base64
15
+ # so you can send it to the API
16
+ def base64_to_pil(img_base64):
17
+ base64_decoded = base64.b64decode(img_base64)
18
+ byte_stream = io.BytesIO(base64_decoded)
19
+ pil_image = Image.open(byte_stream)
20
+ return pil_image
21
+
22
+ def generate(prompt, negative_prompt, steps, guidance, width, height):
23
+ params = {
24
+ "negative_prompt": negative_prompt,
25
+ "num_inference_steps": steps,
26
+ "guidance_scale": guidance,
27
+ "width": width,
28
+ "height": height
29
+ }
30
+
31
+ end_point_url="https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
32
+ output = get_completion(prompt, params,end_point_url)
33
+ pil_image = base64_to_pil(output)
34
+ return pil_image
35
+
36
+ #Text-to-image endpoint
37
+ def get_completion(inputs, parameters=None, end_point_url=None):
38
+ headers = {
39
+ "Authorization": f"Bearer {hf_api_key}",
40
+ "Content-Type": "application/json"
41
+ }
42
+ data = { "inputs": inputs }
43
+ if parameters is not None:
44
+ data.update({"parameters": parameters})
45
+ response = requests.request("POST",
46
+ end_point_url,
47
+ headers=headers,
48
+ data=json.dumps(data))
49
+ print ("response:",response)
50
+ print("response.content:",response.content)
51
+
52
+ return json.loads(response.content.decode("utf-8"))
53
+
54
+
55
+ API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
56
+ headers = {"Authorization": "Bearer "+hf_api_key}
57
+
58
+
59
+ #nel corso usa chiamata a generate che puntava ad un endpoit diverso. se usa quella chiamata non va (incompatibilità endpoint-funzione?)
60
+ # questa è identica a quella suggerita dal sito Use this model with the Inference API. profata in simple_api_call.py
61
+ def my_generate(payload, negative_prompt, steps, guidance, width, height):
62
+ payload_input={"inputs": payload}
63
+ #print("payload:",payload_input)
64
+ response= requests.post(API_URL, headers=headers, json=payload_input)
65
+ #print("response.content:",response.content)
66
+ image = Image.open(io.BytesIO(response.content))
67
+ return image
68
+
69
+
70
+
71
+ with gr.Blocks() as demo:
72
+ gr.Markdown("# Image Generation with Stable Diffusion")
73
+ with gr.Row():
74
+ with gr.Column(scale=4):
75
+ prompt = gr.Textbox(label="Your prompt") #Give prompt some real estate
76
+ with gr.Column(scale=1, min_width=50):
77
+ btn = gr.Button("Submit") #Submit button side by side!
78
+ with gr.Accordion("Advanced options", open=False): #Let's hide the advanced options!
79
+ negative_prompt = gr.Textbox(label="Negative prompt")
80
+ with gr.Row():
81
+ with gr.Column():
82
+ steps = gr.Slider(label="Inference Steps", minimum=1, maximum=100, value=25,
83
+ info="In many steps will the denoiser denoise the image?")
84
+ guidance = gr.Slider(label="Guidance Scale", minimum=1, maximum=20, value=7,
85
+ info="Controls how much the text prompt influences the result")
86
+ with gr.Column():
87
+ width = gr.Slider(label="Width", minimum=64, maximum=512, step=64, value=512)
88
+ height = gr.Slider(label="Height", minimum=64, maximum=512, step=64, value=512)
89
+ output = gr.Image(label="Result") #Move the output up too
90
+
91
+ btn.click(fn=my_generate, inputs=[prompt,negative_prompt,steps,guidance,width,height], outputs=[output])
92
+
93
+
94
+ demo.launch(share=False, server_port=8081)