Ruben commited on
Commit
6da9d0a
1 Parent(s): b7b124d
Files changed (8) hide show
  1. Makefile +0 -24
  2. README copy.md +0 -10
  3. README.md +1 -1
  4. app.py +15 -41
  5. gradio-app.py +0 -42
  6. gradio-mock-app.py +0 -12
  7. launch_full_interface.ipynb +0 -107
  8. main.py +0 -3
Makefile DELETED
@@ -1,24 +0,0 @@
1
- install-node:
2
- ./install-node.sh
3
-
4
- build-client:
5
- cd frontend && npm install && npm run build && rm -rf ../static && cp -r build/ ../static/
6
-
7
- build-dev:
8
- cd frontend && npm install && NODE_ENV=development npm run build && rm -rf ../static 2>&1 && cp -rv build/ ../static/
9
-
10
- run-dev:
11
- FLASK_DEBUG=development python3 app.py
12
-
13
- run-prod:
14
- python3 app.py & python3 gradio-app.py
15
-
16
- run-mock:
17
- python3 app.py & python3 gradio-mock-app.py
18
-
19
- stop-server:
20
- killall python
21
-
22
- all: run-prod
23
-
24
- mock: run-mock
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README copy.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Drawing2Map
3
- emoji: 🌍
4
- colorFrom: red
5
- colorTo: red
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Drawing2Map
3
  emoji: 🎨
4
  colorFrom: blue
5
  colorTo: green
 
1
  ---
2
+ title: Drawing2Map-api
3
  emoji: 🎨
4
  colorFrom: blue
5
  colorTo: green
app.py CHANGED
@@ -4,36 +4,16 @@ import requests
4
  from gradio_client import Client
5
  import base64
6
 
7
- from PIL import Image
8
- from io import BytesIO
9
- import base64
10
- import os
11
-
12
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
13
- from diffusers.utils import load_image
14
- import torch
15
-
16
- import gradio as gr
17
-
18
- controlnet = ControlNetModel.from_pretrained("rgres/sd-controlnet-aerialdreams", torch_dtype=torch.float16)
19
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
20
- "stabilityai/stable-diffusion-2-1-base", controlnet=controlnet, torch_dtype=torch.float16
21
- )
22
 
23
- pipe = pipe.to("cuda")
24
-
25
- # CPU offloading for faster inference times
26
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
27
- pipe.enable_model_cpu_offload()
28
 
29
  app = Flask(__name__, static_url_path='/static')
30
 
31
-
32
  @app.route('/')
33
  def index():
34
  return app.send_static_file('index.html')
35
 
36
-
37
  def save_base64_image(base64Image):
38
  image_data = base64.b64decode(base64Image)
39
  path = "input_image.jpg"
@@ -41,28 +21,17 @@ def save_base64_image(base64Image):
41
  f.write(image_data)
42
  return path
43
 
44
-
45
  def encode_image_to_base64(filepath):
46
  with open(filepath, "rb") as image_file:
47
  encoded_image = base64.b64encode(image_file.read()).decode("utf-8")
48
  return encoded_image
49
 
50
-
51
- def generate_map(image, prompt, steps, seed):
52
- #image = Image.open(BytesIO(base64.b64decode(image_base64)))
53
- generator = torch.manual_seed(seed)
54
-
55
- image = pipe(
56
- prompt=prompt,
57
- num_inference_steps=steps,
58
- image=image
59
- ).images[0]
60
-
61
- return image
62
-
63
-
64
  @app.route('/predict', methods=['POST'])
65
  def predict():
 
 
 
 
66
  data = request.get_json()
67
 
68
  base64Image = data['data'][0]
@@ -71,11 +40,16 @@ def predict():
71
  seed = data['data'][3]
72
 
73
  b64meta, b64_data = base64Image.split(',')
74
- image = Image.open(BytesIO(base64.b64decode(b64_data)))
75
-
76
- return generate_map(image, prompt, steps, seed)
 
 
 
 
 
77
 
78
 
79
  if __name__ == '__main__':
80
  app.run(host='0.0.0.0', port=int(
81
- os.environ.get('D2M_PORT', 8000)), debug=True)
 
4
  from gradio_client import Client
5
  import base64
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ base_gradio_url = os.getenv('URL_GRADIO', 'http://localhost:7860')
9
+ client = None
 
 
 
10
 
11
  app = Flask(__name__, static_url_path='/static')
12
 
 
13
  @app.route('/')
14
  def index():
15
  return app.send_static_file('index.html')
16
 
 
17
  def save_base64_image(base64Image):
18
  image_data = base64.b64decode(base64Image)
19
  path = "input_image.jpg"
 
21
  f.write(image_data)
22
  return path
23
 
 
24
  def encode_image_to_base64(filepath):
25
  with open(filepath, "rb") as image_file:
26
  encoded_image = base64.b64encode(image_file.read()).decode("utf-8")
27
  return encoded_image
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  @app.route('/predict', methods=['POST'])
30
  def predict():
31
+ global client
32
+ if not client:
33
+ client = Client(base_gradio_url)
34
+
35
  data = request.get_json()
36
 
37
  base64Image = data['data'][0]
 
40
  seed = data['data'][3]
41
 
42
  b64meta, b64_data = base64Image.split(',')
43
+
44
+ image_path = save_base64_image(b64_data)
45
+
46
+ result = client.predict(
47
+ image_path, prompt, steps, seed, fn_index=0
48
+ )
49
+
50
+ return b64meta + ',' + encode_image_to_base64(result)
51
 
52
 
53
  if __name__ == '__main__':
54
  app.run(host='0.0.0.0', port=int(
55
+ os.environ.get('PORT', 8000)), debug=True)
gradio-app.py DELETED
@@ -1,42 +0,0 @@
1
- from PIL import Image
2
- from io import BytesIO
3
- import base64
4
- import os
5
-
6
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
7
- from diffusers.utils import load_image
8
- import torch
9
-
10
- import gradio as gr
11
-
12
- controlnet = ControlNetModel.from_pretrained("rgres/sd-controlnet-aerialdreams", torch_dtype=torch.float16)
13
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
14
- "stabilityai/stable-diffusion-2-1-base", controlnet=controlnet, torch_dtype=torch.float16
15
- )
16
-
17
- pipe = pipe.to("cuda")
18
-
19
- # CPU offloading for faster inference times
20
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
21
- pipe.enable_model_cpu_offload()
22
-
23
- def generate_map(image, prompt, steps, seed):
24
- #image = Image.open(BytesIO(base64.b64decode(image_base64)))
25
- generator = torch.manual_seed(seed)
26
-
27
- image = Image.fromarray(image)
28
-
29
- image = pipe(
30
- prompt=prompt,
31
- num_inference_steps=steps,
32
- image=image
33
- ).images[0]
34
-
35
- return image
36
-
37
- demo = gr.Interface(
38
- fn=generate_map,
39
- inputs=["image", "text", gr.Slider(0,100), "number"],
40
- outputs="image")
41
-
42
- demo.launch(server_port=int(os.getenv('GRADIO_PORT', '7860')))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
gradio-mock-app.py DELETED
@@ -1,12 +0,0 @@
1
- from PIL import Image
2
- from io import BytesIO
3
- import base64
4
- import gradio as gr
5
-
6
- def generate_map(image, prompt, steps, seed):
7
- return image
8
-
9
- with gr.Blocks() as demo:
10
- button = gr.Button(label="Generate Image")
11
- button.click(fn=generate_map, inputs=[gr.Image(), gr.Textbox(), gr.Number(), gr.Number()], outputs=gr.Image())
12
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
launch_full_interface.ipynb DELETED
@@ -1,107 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "attachments": {},
5
- "cell_type": "markdown",
6
- "metadata": {
7
- "colab_type": "text",
8
- "id": "view-in-github"
9
- },
10
- "source": [
11
- "<a href=\"https://colab.research.google.com/github/RubenGres/Drawing2Map-hfspace/blob/main/Launch_interface.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
12
- ]
13
- },
14
- {
15
- "cell_type": "code",
16
- "execution_count": null,
17
- "metadata": {
18
- "id": "hsn_87UXrUn5"
19
- },
20
- "outputs": [],
21
- "source": [
22
- "!git clone https://github.com/RubenGres/Drawing2Map-hfspace.git\n",
23
- "%cd Drawing2Map-hfspace/ui"
24
- ]
25
- },
26
- {
27
- "cell_type": "code",
28
- "execution_count": null,
29
- "metadata": {
30
- "id": "uVUasUqwr_Br"
31
- },
32
- "outputs": [],
33
- "source": [
34
- "!pip install -r requirements.txt\n",
35
- "!npm install -g localtunnel"
36
- ]
37
- },
38
- {
39
- "cell_type": "code",
40
- "execution_count": null,
41
- "metadata": {
42
- "id": "9OIMlcM9OQWk"
43
- },
44
- "outputs": [],
45
- "source": [
46
- "!export URL_GRADIO='http://localhost:7860'"
47
- ]
48
- },
49
- {
50
- "cell_type": "code",
51
- "execution_count": null,
52
- "metadata": {
53
- "id": "yhLLdro5s-rf"
54
- },
55
- "outputs": [],
56
- "source": [
57
- "!echo \" \"\n",
58
- "!echo \"Click the next link and when prompted enter:\"\n",
59
- "!curl ipv4.icanhazip.com\n",
60
- "!echo \" \"\n",
61
- "!lt --port 8000 & make run-prod"
62
- ]
63
- },
64
- {
65
- "cell_type": "code",
66
- "execution_count": null,
67
- "metadata": {
68
- "colab": {
69
- "base_uri": "https://localhost:8080/"
70
- },
71
- "id": "G2USPEr_4YOm",
72
- "outputId": "36de5b2b-0cf7-410b-a225-2869138e5523"
73
- },
74
- "outputs": [
75
- {
76
- "name": "stdout",
77
- "output_type": "stream",
78
- "text": [
79
- "killall python\n",
80
- "python: no process found\n",
81
- "make: *** [Makefile:20: stop-server] Error 1\n"
82
- ]
83
- }
84
- ],
85
- "source": [
86
- "!make stop-server"
87
- ]
88
- }
89
- ],
90
- "metadata": {
91
- "accelerator": "GPU",
92
- "colab": {
93
- "include_colab_link": true,
94
- "provenance": []
95
- },
96
- "gpuClass": "standard",
97
- "kernelspec": {
98
- "display_name": "Python 3",
99
- "name": "python3"
100
- },
101
- "language_info": {
102
- "name": "python"
103
- }
104
- },
105
- "nbformat": 4,
106
- "nbformat_minor": 0
107
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py DELETED
@@ -1,3 +0,0 @@
1
- import subprocess
2
-
3
- subprocess.run(["make", "run-prod"], shell=False)