ldhldh's picture
Update app.py
7a95186
raw
history blame contribute delete
No virus
2.75 kB
from threading import Thread
import gradio as gr
from gradio_client import Client as GrClient
import inspect
from gradio import routes
from typing import List, Type
import requests, os, re, asyncio, queue
import math
import time
import datetime
import requests, json
loop = asyncio.get_event_loop()
gradio_client = GrClient(os.environ.get('GrClient_url'), serialize=False)
# Monkey patch
def get_types(cls_set: List[Type], component: str):
docset = []
types = []
if component == "input":
for cls in cls_set:
doc = inspect.getdoc(cls)
doc_lines = doc.split("\n")
docset.append(doc_lines[1].split(":")[-1])
types.append(doc_lines[1].split(")")[0].split("(")[-1])
else:
for cls in cls_set:
doc = inspect.getdoc(cls)
doc_lines = doc.split("\n")
docset.append(doc_lines[-1].split(":")[-1])
types.append(doc_lines[-1].split(")")[0].split("(")[-1])
return docset, types
routes.get_types = get_types
q = queue.Queue()
def chat():
while True:
time.sleep(1)
global q
if not q.empty():
arr = q.get()
start = time.time()
print("응닡 μ‹œμž‘\nx:"+ arr[0] +"\nid:"+ arr[1] +"\ncdata:" + arr[2] + "\ncollback_url : " + arr[3])
result = gradio_client.predict(
arr[0],
# str representing input in 'User input' Textbox component
arr[1],
arr[2],
fn_index=0
)
end = time.time()
sec = (end - start)
result_list = str(datetime.timedelta(seconds=sec)).split(".")
print("응닡 μ‹œκ°„ : " + result_list[0] +"\nresult:"+ result)
if result.split(":")[0]=="상담원":
result = result.split(":")[1]
headers = {'Content-Type': 'application/json'}
body = {
"url" : arr[3],
"data" : result
}
if arr[3] != "Asia/Seoul":
response_collback = requests.post("https://9174963504.for-seoul.synctreengine.com/call", headers=headers, data=json.dumps(body))
print(response_collback)
th_a = Thread(target = chat)
th_a.daemon = True
th_a.start()
# App code
def res(x, id, cdata, url):
global q
arr = [x, id, str(cdata.split(",", 1)[0]), url]
q.put(arr)
print("\n_Done\n\n")
return "Done"
with gr.Blocks() as demo:
count = 0
aa = gr.Interface(
fn=res,
inputs=["text","text", "text", "text"],
outputs="text",
description="chat",
)
demo.queue(max_size=32).launch(enable_queue=True)