Spaces:
Sleeping
Sleeping
ILyaz03
commited on
Commit
•
53a5645
0
Parent(s):
Duplicate from ILyaz03/DhikrLabs_Ai
Browse files- .gitattributes +34 -0
- README.md +13 -0
- app.py +88 -0
- requirements.txt +2 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: ILyaz
|
3 |
+
emoji: 🔥
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: red
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.20.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: ILyaz03/DhikrLabs_Ai
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import openai
|
4 |
+
from gtts import gTTS
|
5 |
+
|
6 |
+
# load the api key
|
7 |
+
openai.api_key = os.environ["OPEN_AI_KEY"]
|
8 |
+
|
9 |
+
# takes an audio file from the microphone
|
10 |
+
def transcribe(audio):
|
11 |
+
audio_file = open(audio, "rb")
|
12 |
+
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
13 |
+
|
14 |
+
return transcript["text"]
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
# Create a Gradio App using Blocks
|
19 |
+
with gr.Blocks() as demo:
|
20 |
+
gr.Markdown(
|
21 |
+
"""
|
22 |
+
# Welcome to ILyaz Ai Bot
|
23 |
+
"""
|
24 |
+
)
|
25 |
+
with gr.Accordion("Click for Instructions:"):
|
26 |
+
gr.Markdown(
|
27 |
+
"""
|
28 |
+
* Record your query.
|
29 |
+
* Submit your query, and follow the chat or listen to the advice.
|
30 |
+
* When you are ready to respond, clear your last recording and resubmit.
|
31 |
+
note: Transcribe Audio does not work on iOS
|
32 |
+
""")
|
33 |
+
|
34 |
+
|
35 |
+
# First message as instructions to OpenAI
|
36 |
+
messages = gr.State(value=[{"role": "system", "content": "his skilled gamer has honed their strategic mind through countless hours spent mastering FIFA, Call of Duty, and Rocket League, allowing them to quickly adapt to changing circumstances and outmaneuver their opponents with ease." }])
|
37 |
+
|
38 |
+
# Takes the users transcribed audio as a string
|
39 |
+
def botResponse(user_input, messages):
|
40 |
+
messages.append({"role": "user", "content": user_input})
|
41 |
+
response = openai.ChatCompletion.create(
|
42 |
+
model="gpt-3.5-turbo-0301",
|
43 |
+
messages=messages
|
44 |
+
)
|
45 |
+
|
46 |
+
# Parse the response from OpenAI and store
|
47 |
+
system_message = response["choices"][0]["message"]["content"]
|
48 |
+
messages.append({"role": "assistant", "content": system_message})
|
49 |
+
|
50 |
+
# Process the messages list to get the chat log into a string
|
51 |
+
chat_transcript = ""
|
52 |
+
for message in messages:
|
53 |
+
if (message["role"] != "system"):
|
54 |
+
chat_transcript += message["role"] + ": " + message["content"] + "\n\n"
|
55 |
+
|
56 |
+
return chat_transcript
|
57 |
+
|
58 |
+
# Gets the last message in the chat log and uses GTTS to convert the last response into an audio file.
|
59 |
+
def giveVoice(messages):
|
60 |
+
bot_message=messages[-1]
|
61 |
+
|
62 |
+
myobj = gTTS(text=bot_message["content"])
|
63 |
+
myobj.save("temp.mp3")
|
64 |
+
|
65 |
+
dir = os.getcwd()
|
66 |
+
new_path = os.path.join(dir, "temp.mp3")
|
67 |
+
|
68 |
+
return new_path
|
69 |
+
|
70 |
+
# Creates the Gradio interface objects
|
71 |
+
with gr.Row():
|
72 |
+
with gr.Column(scale=1):
|
73 |
+
user_audio = gr.Audio(source="microphone", type="filepath", label="Input Phrase")
|
74 |
+
submit_btn = gr.Button(value="Transcribe Audio")
|
75 |
+
submit_btn2 = gr.Button(value="Submit Text")
|
76 |
+
gpt_voice = gr.Audio(label="Listen to Advice")
|
77 |
+
with gr.Column(scale=2):
|
78 |
+
user_transcript = gr.Text(label="Audio Translation", interactive=False)
|
79 |
+
user_text = gr.Text(label="Text Input")
|
80 |
+
gpt_transcript = gr.Text(label="Chat Transcript")
|
81 |
+
submit_btn.click(transcribe, user_audio, user_transcript)
|
82 |
+
submit_btn2.click(botResponse, [user_text, messages], gpt_transcript)
|
83 |
+
user_transcript.change(botResponse, [user_transcript, messages], gpt_transcript)
|
84 |
+
gpt_transcript.change(giveVoice, messages, gpt_voice)
|
85 |
+
|
86 |
+
|
87 |
+
# creates a local web server
|
88 |
+
demo.launch(auth=("ILyaz", "hello"))
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
openai
|
2 |
+
gtts
|