Update app.py
Browse files
app.py
CHANGED
@@ -154,7 +154,7 @@ def rag_chain(llm, prompt, db):
|
|
154 |
|
155 |
###################################################
|
156 |
#Funktion von Gradio aus, die den dort eingegebenen Prompt annimmt und weiterverarbeitet
|
157 |
-
def invoke(openai_api_key, rag_option,
|
158 |
global splittet
|
159 |
|
160 |
if (openai_api_key == "" or openai_api_key == "sk-"):
|
@@ -241,6 +241,9 @@ def vote(data: gr.LikeData):
|
|
241 |
else: print("You downvoted this response: " + data.value)
|
242 |
|
243 |
additional_inputs = [
|
|
|
|
|
|
|
244 |
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Höhere Werte erzeugen diversere Antworten"),
|
245 |
gr.Slider(label="Max new tokens", value=256, minimum=0, maximum=4096, step=64, interactive=True, info="Maximale Anzahl neuer Tokens"),
|
246 |
gr.Slider(label="Top-p (nucleus sampling)", value=0.6, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Höhere Werte verwenden auch Tokens mit niedrigerer Wahrscheinlichkeit."),
|
@@ -252,17 +255,19 @@ chatbot_stream = gr.Chatbot(avatar_images=(
|
|
252 |
"https://drive.google.com/uc?id=1tfELAQW_VbPCy6QTRbexRlwAEYo8rSSv"
|
253 |
), bubble_full_width = False)
|
254 |
|
255 |
-
chat_interface_stream = gr.
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
|
|
|
|
266 |
|
267 |
with gr.Blocks() as demo:
|
268 |
with gr.Tab("General LLM"):
|
|
|
154 |
|
155 |
###################################################
|
156 |
#Funktion von Gradio aus, die den dort eingegebenen Prompt annimmt und weiterverarbeitet
|
157 |
+
def invoke (prompt, chatbot, openai_api_key, rag_option, temperature=0.9, max_new_tokens=512, top_p=0.6, repetition_penalty=1.3,);
|
158 |
global splittet
|
159 |
|
160 |
if (openai_api_key == "" or openai_api_key == "sk-"):
|
|
|
241 |
else: print("You downvoted this response: " + data.value)
|
242 |
|
243 |
additional_inputs = [
|
244 |
+
gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1),
|
245 |
+
#gr.Radio(["Off", "Chroma", "MongoDB"], label="Retrieval Augmented Generation", value = "Off"),
|
246 |
+
gr.Radio(["Off", "Chroma"], label="Retrieval Augmented Generation", value = "Off"),
|
247 |
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Höhere Werte erzeugen diversere Antworten"),
|
248 |
gr.Slider(label="Max new tokens", value=256, minimum=0, maximum=4096, step=64, interactive=True, info="Maximale Anzahl neuer Tokens"),
|
249 |
gr.Slider(label="Top-p (nucleus sampling)", value=0.6, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Höhere Werte verwenden auch Tokens mit niedrigerer Wahrscheinlichkeit."),
|
|
|
255 |
"https://drive.google.com/uc?id=1tfELAQW_VbPCy6QTRbexRlwAEYo8rSSv"
|
256 |
), bubble_full_width = False)
|
257 |
|
258 |
+
chat_interface_stream = gr.Interface(fn=invoke,
|
259 |
+
inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1),
|
260 |
+
#gr.Radio(["Off", "Chroma", "MongoDB"], label="Retrieval Augmented Generation", value = "Off"),
|
261 |
+
gr.Radio(["Off", "Chroma"], label="Retrieval Augmented Generation", value = "Off"),
|
262 |
+
gr.Textbox(label = "Prompt", value = "What is GPT-4?", lines = 1),
|
263 |
+
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Höhere Werte erzeugen diversere Antworten"),
|
264 |
+
gr.Slider(label="Max new tokens", value=256, minimum=0, maximum=4096, step=64, interactive=True, info="Maximale Anzahl neuer Tokens"),
|
265 |
+
gr.Slider(label="Top-p (nucleus sampling)", value=0.6, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Höhere Werte verwenden auch Tokens mit niedrigerer Wahrscheinlichkeit."),
|
266 |
+
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Strafe für wiederholte Tokens")
|
267 |
+
],
|
268 |
+
outputs = [chatbot_stream],
|
269 |
+
title = "Generative AI - LLM & RAG",
|
270 |
+
description = description)
|
271 |
|
272 |
with gr.Blocks() as demo:
|
273 |
with gr.Tab("General LLM"):
|