Spaces:
Running
Running
import streamlit as st | |
from streamlit_webrtc import webrtc_streamer, WebRtcMode, ClientSettings | |
import numpy as np | |
import io | |
import wave | |
import requests | |
from audio_to_text import audio_to_text | |
import streamlit.components.v1 as components | |
# Initialize Streamlit app layout | |
st.title("Microphone Input in Streamlit") | |
# Load the custom component | |
mic_component = """ | |
<button>Record Audio</button> | |
<script src="mic_component.js"></script> | |
""" | |
components.html(mic_component) | |
def audio_callback(frame): | |
# Get raw audio data from the frame | |
audio_data = frame.to_ndarray().astype(np.int16) | |
# Convert audio to text | |
transcription = audio_to_text(audio_data) | |
# Display the transcription | |
st.write("Transcription:", transcription) | |
API_URL = "https://eaa0-34-74-179-199.ngrok-free.app/generate" | |
# Optionally, send the transcription to an API | |
headers = { | |
"Content-Type": "application/json" | |
} | |
payload = { | |
"prompt": transcription | |
} | |
response = requests.post(API_URL, json=payload, headers=headers) | |
if response.status_code == 200: | |
st.write("Assistant:", response.json()) | |
else: | |
st.write("Error:", response.status_code, response.text) | |
webrtc_ctx = webrtc_streamer( | |
key="audio-only", | |
mode=WebRtcMode.SENDRECV, | |
rtc_configuration={"iceServers": [{"urls": ["stun:stun1.l.google.com:19302"]}]}, | |
media_stream_constraints={ | |
"audio": True, | |
"video": False | |
}, | |
audio_frame_callback=audio_callback | |
) | |
# Placeholder for capturing audio | |
if webrtc_ctx.state.playing: | |
st.write("Microphone is active. Speak into the microphone...") | |
else: | |
st.write("Click to start microphone input.") | |