Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,67 +1,29 @@
|
|
1 |
import streamlit as st
|
2 |
-
from streamlit_webrtc import webrtc_streamer, WebRtcMode, ClientSettings
|
3 |
import numpy as np
|
4 |
import io
|
5 |
import wave
|
6 |
import requests
|
7 |
from audio_to_text import audio_to_text
|
8 |
-
|
9 |
|
10 |
# Initialize Streamlit app layout
|
11 |
st.title("Microphone Input in Streamlit")
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
.
|
24 |
-
|
25 |
-
|
26 |
-
recordButton.onclick = function() {
|
27 |
-
mediaRecorder.start();
|
28 |
-
console.log("Recording started...");
|
29 |
-
};
|
30 |
-
|
31 |
-
stopButton.onclick = function() {
|
32 |
-
mediaRecorder.stop();
|
33 |
-
console.log("Recording stopped...");
|
34 |
-
};
|
35 |
-
|
36 |
-
mediaRecorder.ondataavailable = function(e) {
|
37 |
-
audioChunks.push(e.data);
|
38 |
-
};
|
39 |
-
|
40 |
-
mediaRecorder.onstop = function(e) {
|
41 |
-
const audioBlob = new Blob(audioChunks, { type: "audio/wav" });
|
42 |
-
const reader = new FileReader();
|
43 |
-
reader.readAsDataURL(audioBlob);
|
44 |
-
reader.onloadend = function() {
|
45 |
-
const base64data = reader.result.split(',')[1];
|
46 |
-
Streamlit.setComponentValue(base64data);
|
47 |
-
};
|
48 |
-
};
|
49 |
-
});
|
50 |
-
</script>
|
51 |
-
<button id="recordButton">Start Recording</button>
|
52 |
-
<button id="stopButton">Stop Recording</button>
|
53 |
-
"""
|
54 |
-
|
55 |
-
# Embed the JavaScript and HTML in Streamlit
|
56 |
-
components.html(audio_recorder_html, height=300)
|
57 |
-
|
58 |
-
# Retrieve the audio data from the component (if available)
|
59 |
-
audio_data = st.query_params().get("value")
|
60 |
-
|
61 |
-
|
62 |
-
def audio_callback(frame):
|
63 |
# Get raw audio data from the frame
|
64 |
-
audio_data =
|
65 |
|
66 |
# Convert audio to text
|
67 |
transcription = audio_to_text(audio_data)
|
@@ -82,22 +44,4 @@ def audio_callback(frame):
|
|
82 |
st.write("Assistant:", response.json())
|
83 |
else:
|
84 |
st.write("Error:", response.status_code, response.text)
|
85 |
-
|
86 |
-
|
87 |
-
webrtc_ctx = webrtc_streamer(
|
88 |
-
key="audio-only",
|
89 |
-
mode=WebRtcMode.SENDRECV,
|
90 |
-
rtc_configuration={"iceServers": [{"urls": ["stun:stun1.l.google.com:19302"]}]},
|
91 |
-
media_stream_constraints={
|
92 |
-
"audio": True,
|
93 |
-
"video": False
|
94 |
-
},
|
95 |
-
audio_frame_callback=audio_callback
|
96 |
-
)
|
97 |
-
|
98 |
-
# Placeholder for capturing audio
|
99 |
-
if webrtc_ctx.state.playing:
|
100 |
-
st.write("Microphone is active. Speak into the microphone...")
|
101 |
-
else:
|
102 |
-
st.write("Click to start microphone input.")
|
103 |
|
|
|
1 |
import streamlit as st
|
|
|
2 |
import numpy as np
|
3 |
import io
|
4 |
import wave
|
5 |
import requests
|
6 |
from audio_to_text import audio_to_text
|
7 |
+
from streamlit_mic_recorder import mic_recorder
|
8 |
|
9 |
# Initialize Streamlit app layout
|
10 |
st.title("Microphone Input in Streamlit")
|
11 |
|
12 |
+
# Record audio
|
13 |
+
audio = mic_recorder(
|
14 |
+
start_prompt="Start recording",
|
15 |
+
stop_prompt="Stop recording",
|
16 |
+
just_once=False,
|
17 |
+
use_container_width=True
|
18 |
+
)
|
19 |
+
|
20 |
+
# Check if audio is recorded
|
21 |
+
if audio:
|
22 |
+
st.audio(audio['bytes'], format='audio/wav')
|
23 |
+
byte_data=audio['bytes']
|
24 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
# Get raw audio data from the frame
|
26 |
+
audio_data = byte_data.to_ndarray().astype(np.int16)
|
27 |
|
28 |
# Convert audio to text
|
29 |
transcription = audio_to_text(audio_data)
|
|
|
44 |
st.write("Assistant:", response.json())
|
45 |
else:
|
46 |
st.write("Error:", response.status_code, response.text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|