kenken999 commited on
Commit
024edca
1 Parent(s): d58f5a2
mysite/asgi_config.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi.staticfiles import StaticFiles
3
+ from django.conf import settings
4
+
5
+ def init_django_app(app: FastAPI, application):
6
+ if settings.MOUNT_DJANGO_APP:
7
+ app.mount("/django", application) # type:ignore
8
+ app.mount("/static", StaticFiles(directory="staticfiles"), name="static")
mysite/database.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import duckdb
2
+ import pandas as pd
3
+ from fastapi import FastAPI
4
+ import gradio as gr
5
+
6
+ con = duckdb.connect(database="./workspace/mydatabase.duckdb")
7
+ con.execute("CREATE TABLE IF NOT EXISTS items (id INTEGER, name VARCHAR);")
8
+
9
+ def setup_database_routes(app: FastAPI):
10
+ def create_item(name):
11
+ con.execute("INSERT INTO items (name) VALUES (?);", (name,))
12
+ con.commit()
13
+ return "Item created successfully!"
14
+
15
+ def read_items():
16
+ cursor = con.cursor()
17
+ cursor.execute("SELECT * FROM items;")
18
+ items = cursor.fetchall()
19
+ df = pd.DataFrame(items, columns=["ID", "Name"])
20
+ return df
21
+
22
+ def update_item(id, name):
23
+ con.execute("UPDATE items SET name = ? WHERE id = ?;", (name, id))
24
+ con.commit()
25
+ return "Item updated successfully!"
26
+
27
+ def delete_item(id):
28
+ con.execute("DELETE FROM items WHERE id = ?;", (id,))
29
+ con.commit()
30
+ return "Item deleted successfully!"
31
+
32
+ with gr.Blocks() as appdb:
33
+ gr.Markdown("CRUD Application")
34
+ with gr.Row():
35
+ with gr.Column():
36
+ create_name = gr.Textbox(label="Create Item")
37
+ create_btn = gr.Button("Create")
38
+ with gr.Column():
39
+ read_btn = gr.Button("Read Items")
40
+ with gr.Row():
41
+ with gr.Column():
42
+ update_id = gr.Textbox(label="Update Item ID")
43
+ update_name = gr.Textbox(label="Update Item Name")
44
+ update_btn = gr.Button("Update")
45
+ with gr.Column():
46
+ delete_id = gr.Textbox(label="Delete Item ID")
47
+ delete_btn = gr.Button("Delete")
48
+ output_text = gr.Textbox(label="Output")
49
+ output_table = gr.DataFrame(label="Items")
50
+
51
+ create_btn.click(fn=create_item, inputs=create_name, outputs=output_text)
52
+ read_btn.click(fn=read_items, outputs=output_table)
53
+ update_btn.click(fn=update_item, inputs=[update_id, update_name], outputs=output_text)
54
+ delete_btn.click(fn=delete_item, inputs=delete_id, outputs=output_text)
55
+
56
+ app.mount("/db", appdb, name="database_app")
mysite/gradio_config.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from utilities import chat_with_interpreter, completion, process_file
3
+
4
+ def setup_gradio_interfaces():
5
+ chat_interface = gr.ChatInterface(
6
+ fn=chat_with_interpreter,
7
+ examples=["サンプルHTMLの作成", "google spreadの読み込み作成", "merhaba"],
8
+ title="Auto Program",
9
+ css=".chat-container { height: 1500px; }",
10
+ )
11
+
12
+ chat_interface2 = gr.ChatInterface(
13
+ fn=chat_with_interpreter,
14
+ examples=["こんにちは", "どうしたの?"],
15
+ title="Auto Program 2",
16
+ )
17
+ chat_interface2.queue()
18
+
19
+ demo4 = gr.ChatInterface(
20
+ chat_with_interpreter,
21
+ additional_inputs=[
22
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
23
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
24
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
25
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
26
+ ],
27
+ )
28
+
29
+ democs = gr.Interface(
30
+ fn=process_file,
31
+ inputs=[
32
+ "file",
33
+ gr.Textbox(label="Additional Notes", lines=10),
34
+ gr.Textbox(label="Folder Name"),
35
+ ],
36
+ outputs="text",
37
+ )
38
+
39
+ with gr.Blocks(fill_height=True, css="") as demo:
40
+ demo = gr.ChatInterface(
41
+ fn=chat_with_interpreter,
42
+ chatbot=gr.Chatbot(height=650, placeholder="PLACEHOLDER", label="Gradio ChatInterface"),
43
+ fill_height=True,
44
+ additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
45
+ additional_inputs=[
46
+ gr.Slider(minimum=0, maximum=1, step=0.1, value=0.95, label="Temperature", render=False),
47
+ gr.Slider(minimum=128, maximum=4096, step=1, value=512, label="Max new tokens", render=False),
48
+ ],
49
+ examples=[
50
+ ["HTMLのサンプルを作成して"],
51
+ ["CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml"],
52
+ ],
53
+ cache_examples=False,
54
+ )
55
+
56
+ with gr.Blocks(fill_height=True, css="") as democ:
57
+ gr.ChatInterface(
58
+ fn=completion,
59
+ chatbot=gr.Chatbot(height=450, placeholder="PLACEHOLDER", label="Gradio ChatInterface"),
60
+ fill_height=True,
61
+ additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
62
+ additional_inputs=[
63
+ gr.Slider(minimum=0, maximum=1, step=0.1, value=0.95, label="Temperature", render=False),
64
+ gr.Slider(minimum=128, maximum=4096, step=1, value=512, label="Max new tokens", render=False),
65
+ ],
66
+ examples=[
67
+ ["HTMLのサンプルを作成して"],
68
+ ["CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml"],
69
+ ],
70
+ cache_examples=False,
71
+ )
72
+ gr.Markdown("--- Built with Meta Llama 3")
73
+
74
+ default_interfaces = [demo, demo4, democ, democs]
75
+ default_names = ["AIで開発", "FineTuning", "Chat", "仕様書から作成"]
76
+
77
+ tabs = gr.TabbedInterface(default_interfaces, default_names)
78
+ tabs.queue()
79
+ return tabs
mysite/main.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from django.core.asgi import get_asgi_application
3
+ from fastapi import FastAPI
4
+ from fastapi.staticfiles import StaticFiles
5
+ from fastapi.templating import Jinja2Templates
6
+ from starlette.middleware.cors import CORSMiddleware
7
+ import logging
8
+ import gradio as gr
9
+ from gradio_config import setup_gradio_interfaces
10
+ from webhook import setup_webhook_routes
11
+ from database import setup_database_routes
12
+ from asgi_config import init_django_app
13
+
14
+ # ロガーの設定
15
+ logging.basicConfig(level=logging.INFO)
16
+ logger = logging.getLogger(__name__)
17
+ file_handler = logging.FileHandler("app.log")
18
+ file_handler.setLevel(logging.INFO)
19
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
20
+ file_handler.setFormatter(formatter)
21
+ logger.addHandler(file_handler)
22
+
23
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
24
+ application = get_asgi_application()
25
+
26
+ app = FastAPI()
27
+
28
+ # Djangoアプリケーションの初期化
29
+ init_django_app(app, application)
30
+
31
+ # ミドルウェアの設定
32
+ app.add_middleware(
33
+ CORSMiddleware,
34
+ allow_origins=["*"],
35
+ allow_credentials=True,
36
+ allow_methods=["*"],
37
+ allow_headers=["*"],
38
+ )
39
+
40
+ # Gradioインターフェースの設定
41
+ gradio_interfaces = setup_gradio_interfaces()
42
+
43
+ # Webhookルートの設定
44
+ setup_webhook_routes(app)
45
+
46
+ # データベースルートの設定
47
+ setup_database_routes(app)
48
+
49
+ # Gradioアプリのマウント
50
+ app.mount("/static", StaticFiles(directory="static", html=True), name="static")
51
+ app = gr.mount_gradio_app(app, gradio_interfaces, "/")
52
+
53
+ # テンプレートファイルが格納されているディレクトリを指定
54
+ templates = Jinja2Templates(directory="static")
55
+
56
+ @app.get("/test")
57
+ def get_some_page(request: Request):
58
+ return templates.TemplateResponse("index.html", {"request": request})
59
+
60
+ @app.get("/groq")
61
+ def hello_world():
62
+ return "Hello World"
63
+
64
+ if __name__ == "__main__":
65
+ import uvicorn
66
+ uvicorn.run(app, host="0.0.0.0", port=7860)
mysite/utilities.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import hmac
3
+ import hashlib
4
+ import base64
5
+ import subprocess
6
+ import time
7
+
8
+ def validate_signature(body: str, signature: str, secret: str) -> bool:
9
+ hash = hmac.new(secret.encode("utf-8"), body.encode("utf-8"), hashlib.sha256).digest()
10
+ expected_signature = base64.b64encode(hash).decode("utf-8")
11
+ return hmac.compare_digest(expected_signature, signature)
12
+
13
+ def no_process_file(prompt, foldername):
14
+ set_environment_variables()
15
+ try:
16
+ proc = subprocess.Popen(["mkdir", f"/home/user/app/routers/{foldername}"])
17
+ except subprocess.CalledProcessError as e:
18
+ return f"Processed Content:\n{e.stdout}\n\nMake Command Error:\n{e.stderr}"
19
+
20
+ no_extension_path = f"/home/user/app/routers/{foldername}/prompt"
21
+ time.sleep(1)
22
+ with open(no_extension_path, "a") as f:
23
+ f.write(prompt)
24
+ time.sleep(1)
25
+ try:
26
+ prompt_file_path = no_extension_path
27
+ with open(prompt_file_path, "w") as prompt_file:
28
+ prompt_file.write(prompt)
29
+ except Exception as e:
30
+ return f"Error writing prompt to file: {str(e)}"
31
+ time.sleep(1)
32
+ try:
33
+ proc = subprocess.Popen(
34
+ ["make", "run", foldername],
35
+ stdin=subprocess.PIPE,
36
+ stdout=subprocess.PIPE,
37
+ stderr=subprocess.PIPE,
38
+ text=True,
39
+ )
40
+ stdout, stderr = proc.communicate(input="n\ny\ny\n")
41
+ return f"Processed Content:\n{stdout}\n\nMake Command Output:\n{stdout}\n\nMake Command Error:\n{stderr}"
42
+ except subprocess.CalledProcessError as e:
43
+ return f"Processed Content:\n{e.stdout}\n\nMake Command Error:\n{e.stderr}"
44
+
45
+ def set_environment_variables():
46
+ os.environ["OPENAI_API_BASE"] = "https://api.groq.com/openai/v1"
47
+ os.environ["OPENAI_API_KEY"] = "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"
48
+ os.environ["MODEL_NAME"] = "llama3-8b-8192"
49
+ os.environ["LOCAL_MODEL"] = "true"
50
+
51
+ def chat_with_interpreter(message, history, a=None, b=None, c=None, d=None):
52
+ from interpreter import interpreter
53
+ full_response = ""
54
+ user_entry = {"role": "user", "type": "message", "content": message}
55
+ messages.append(user_entry)
56
+ for chunk in interpreter.chat(message, display=False, stream=True):
57
+ full_response = format_response(chunk, full_response)
58
+ yield full_response
59
+
60
+ age = 28
61
+ con = duckdb.connect(database="./workspace/sample.duckdb")
62
+ con.execute(
63
+ """
64
+ CREATE SEQUENCE IF NOT EXISTS sample_id_seq START 1;
65
+ CREATE TABLE IF NOT EXISTS samples (
66
+ id INTEGER DEFAULT nextval('sample_id_seq'),
67
+ name VARCHAR,
68
+ age INTEGER,
69
+ PRIMARY KEY(id)
70
+ );
71
+ """
72
+ )
73
+ cur = con.cursor()
74
+ con.execute("INSERT INTO samples (name, age) VALUES (?, ?)", (full_response, age))
75
+ con.execute("INSERT INTO samples (name, age) VALUES (?, ?)", (message, age))
76
+ con.execute("COPY samples TO 'sample.csv' (FORMAT CSV, HEADER)")
77
+ con.commit()
78
+ cur = con.execute("SELECT * FROM samples")
79
+ res = cur.fetchall()
80
+ rows = "\n".join([f"name: {row[0]}, age: {row[1]}" for row in res])
81
+ con.close()
82
+ yield full_response + rows
83
+
84
+ async def completion(message: str, history, c=None, d=None):
85
+ from groq import Groq
86
+ client = Groq(api_key=os.getenv("api_key"))
87
+ messages = []
88
+ recent_messages = history[-20:]
89
+ for conversation in recent_messages:
90
+ user_message = conversation[0]
91
+ user_entry = {"role": "user", "content": user_message}
92
+ messages.append(user_entry)
93
+ assistant_message = conversation[1]
94
+ assistant_entry = {"role": "assistant", "content": assistant_message}
95
+ messages.append(assistant_entry)
96
+
97
+ user_entry = {"role": "user", "content": message}
98
+ messages.append(user_entry)
99
+ system_prompt = {"role": "system", "content": "あなたは日本語の優秀なアシスタントです。"}
100
+ messages.insert(0, system_prompt)
101
+ async with async_timeout.timeout(GENERATION_TIMEOUT_SEC):
102
+ try:
103
+ stream = client.chat.completions.create(
104
+ model="llama3-8b-8192",
105
+ messages=messages,
106
+ temperature=1,
107
+ max_tokens=1024,
108
+ top_p=1,
109
+ stream=True,
110
+ stop=None,
111
+ )
112
+ all_result = ""
113
+ for chunk in stream:
114
+ current_content = chunk.choices[0].delta.content or ""
115
+ all_result += current_content
116
+ yield current_content
117
+ yield all_result
118
+ except asyncio.TimeoutError:
119
+ raise HTTPException(status_code=504, detail="Stream timed out")
120
+
121
+ def process_file(fileobj, prompt, foldername):
122
+ set_environment_variables()
123
+ try:
124
+ proc = subprocess.Popen(["mkdir", f"/home/user/app/routers/{foldername}"])
125
+ except subprocess.CalledProcessError as e:
126
+ return f"Processed Content:\n{e.stdout}\n\nMake Command Error:\n{e.stderr}"
127
+ time.sleep(2)
128
+ path = f"/home/user/app/routers/{foldername}/" + os.path.basename(fileobj)
129
+ shutil.copyfile(fileobj.name, path)
130
+ base_name = os.path.splitext(os.path.basename(fileobj))[0]
131
+ no_extension_path = f"/home/user/app/routers/{foldername}/{base_name}"
132
+ shutil.copyfile(fileobj, no_extension_path)
133
+ with open(no_extension_path, "a") as f:
134
+ f.write(prompt)
135
+ try:
136
+ prompt_file_path = no_extension_path
137
+ with open(prompt_file_path, "w") as prompt_file:
138
+ prompt_file.write(prompt)
139
+ except Exception as e:
140
+ return f"Error writing prompt to file: {str(e)}"
141
+ time.sleep(1)
142
+ try:
143
+ proc = subprocess.Popen(
144
+ ["make", "run", foldername],
145
+ stdin=subprocess.PIPE,
146
+ stdout=subprocess.PIPE,
147
+ stderr=subprocess.PIPE,
148
+ text=True,
149
+ )
150
+ stdout, stderr = proc.communicate(input="n\ny\ny\n")
151
+ return f"Processed Content:\n{stdout}\n\nMake Command Output:\n{stdout}\n\nMake Command Error:\n{stderr}"
152
+ except subprocess.CalledProcessError as e:
153
+ return f"Processed Content:\n{stdout}\n\nMake Command Error:\n{e.stderr}"
154
+
mysite/webhook.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import logging
4
+ from fastapi import FastAPI, Request, HTTPException
5
+ import requests
6
+ import json
7
+ from datetime import datetime
8
+ from utilities import validate_signature, no_process_file
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ def setup_webhook_routes(app: FastAPI):
13
+ @app.post("/webhook")
14
+ async def webhook(request: Request):
15
+ logger.info("[Start] ====== LINE webhook ======")
16
+ try:
17
+ body = await request.body()
18
+ received_headers = dict(request.headers)
19
+ body_str = body.decode("utf-8")
20
+ logger.info("Received Body: %s", body_str)
21
+ body_json = json.loads(body_str)
22
+ events = body_json.get("events", [])
23
+
24
+ for event in events:
25
+ if event["type"] == "message" and event["message"]["type"] == "text":
26
+ user_id = event["source"]["userId"]
27
+ text = event["message"]["text"]
28
+ logger.info("------------------------------------------")
29
+ logger.info(f"User ID: {user_id}, Text: {text}")
30
+ no_process_file(text, "ai")
31
+
32
+ for event in events:
33
+ if event["type"] == "message" and event["message"]["type"] == "text":
34
+ user_id = event["source"]["userId"]
35
+ text = event["message"]["text"]
36
+ logger.info(event)
37
+ logger.info(f"User ID: {user_id}, Text: {text}")
38
+ now = datetime.now().strftime("%Y%m%d%H%M%S")
39
+ title = text[:10]
40
+ user_id_with_timestamp = f"{now}_{title}_{user_id}"
41
+ no_process_file(text, user_id_with_timestamp)
42
+
43
+ logger.info("Received Headers: %s", received_headers)
44
+ logger.info("Received Body: %s", body.decode("utf-8"))
45
+
46
+ line_signature = received_headers.get("x-line-signature")
47
+ if not line_signature:
48
+ raise HTTPException(status_code=400, detail="X-Line-Signature header is missing.")
49
+
50
+ if not validate_signature(body.decode("utf-8"), line_signature, os.getenv("CHANNEL_SECRET")):
51
+ raise HTTPException(status_code=400, detail="Invalid signature.")
52
+
53
+ if not os.getenv("WEBHOOK_URL") or not os.getenv("WEBHOOK_URL").startswith("https://"):
54
+ raise HTTPException(status_code=400, detail="Invalid webhook URL")
55
+
56
+ headers = {
57
+ "Content-Type": "application/json",
58
+ "X-Line-Signature": line_signature,
59
+ "Authorization": f"Bearer {os.getenv('CHANNEL_ACCESS_TOKEN')}",
60
+ }
61
+
62
+ logger.info("Forwarding to URL: %s", os.getenv("WEBHOOK_URL"))
63
+ logger.info("Forwarding Headers: %s", headers)
64
+ logger.info("Forwarding Body: %s", body.decode("utf-8"))
65
+
66
+ response = requests.post(os.getenv("WEBHOOK_URL"), headers=headers, data=body)
67
+
68
+ logger.info("Response Code: %s", response.status_code)
69
+ logger.info("Response Content: %s", response.text)
70
+ logger.info("Response Headers: %s", response.headers)
71
+
72
+ return {"status": "success", "response_content": response.text}, response.status_code
73
+
74
+ except Exception as e:
75
+ logger.error("Error: %s", str(e))
76
+ raise HTTPException(status_code=500, detail=str(e))