Spaces:
Sleeping
Sleeping
Marcos12886
commited on
Commit
•
9b48c7d
1
Parent(s):
a1c7d58
CLASIFICADOR Y MONITOR FUNCIONANDO A LA VEZ
Browse files
app.py
CHANGED
@@ -5,15 +5,27 @@ from huggingface_hub import InferenceClient
|
|
5 |
from model import model_params, AudioDataset
|
6 |
|
7 |
token = os.getenv("HF_TOKEN")
|
8 |
-
# dataset_path = f"data/baby_cry_detection" # PARA MONITOR
|
9 |
-
dataset_path = f"data/mixed_data" # PARA CLASIFICADOR
|
10 |
-
model, _, _, id2label = model_params(dataset_path)
|
11 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")# Usar a GPU o CPU
|
12 |
-
model.to(device)# Usar a GPU o CPU
|
13 |
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=token)
|
14 |
-
# client = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407", token=token)
|
15 |
|
16 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
audio_dataset = AudioDataset(dataset_path, {})
|
18 |
inputs = audio_dataset.preprocess_audio(audio_path)
|
19 |
inputs = {"input_values": inputs.to(device).unsqueeze(0)}
|
@@ -149,7 +161,7 @@ with gr.Blocks(theme=my_theme) as demo:
|
|
149 |
)
|
150 |
classify_btn = gr.Button("¿Por qué llora?")
|
151 |
classification_output = gr.Textbox(label="Tu bebé llora por:")
|
152 |
-
classify_btn.click(
|
153 |
with gr.Column():
|
154 |
gr.Markdown("<h2>Assistant</h2>")
|
155 |
system_message = "You are a Chatbot specialized in baby health and care."
|
@@ -168,7 +180,34 @@ with gr.Blocks(theme=my_theme) as demo:
|
|
168 |
gr.Markdown("Este chatbot no sustituye a un profesional de la salud. Ante cualquier preocupación o duda, consulta con tu pediatra.")
|
169 |
boton_volver_inicio_1 = gr.Button("Volver a la pantalla inicial").click(cambiar_pestaña, outputs=[pagina_1, pantalla_inicial])
|
170 |
with gr.Column(visible=False) as pagina_2:
|
171 |
-
gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
gr.Markdown("Contenido de la Página 2")
|
173 |
boton_volver_inicio_2 = gr.Button("Volver a la pantalla inicial").click(cambiar_pestaña, outputs=[pagina_2, pantalla_inicial])
|
174 |
boton_pagina_1.click(cambiar_pestaña, outputs=[pantalla_inicial, pagina_1])
|
|
|
5 |
from model import model_params, AudioDataset
|
6 |
|
7 |
token = os.getenv("HF_TOKEN")
|
|
|
|
|
|
|
|
|
|
|
8 |
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=token)
|
|
|
9 |
|
10 |
+
def predict_class(audio_path):
|
11 |
+
dataset_path = f"data/mixed_data" # PARA CLASIFICADOR
|
12 |
+
model, _, _, id2label = model_params(dataset_path)
|
13 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")# Usar a GPU o CPU
|
14 |
+
model.to(device)# Usar a GPU o CPU
|
15 |
+
audio_dataset = AudioDataset(dataset_path, {})
|
16 |
+
inputs = audio_dataset.preprocess_audio(audio_path)
|
17 |
+
inputs = {"input_values": inputs.to(device).unsqueeze(0)}
|
18 |
+
with torch.no_grad():
|
19 |
+
outputs = model(**inputs)
|
20 |
+
predicted_class_ids = outputs.logits.argmax(-1)
|
21 |
+
label = id2label[predicted_class_ids.item()]
|
22 |
+
return label
|
23 |
+
|
24 |
+
def predict_mon(audio_path):
|
25 |
+
dataset_path = f"data/baby_cry_detection" # PARA CLASIFICADOR
|
26 |
+
model, _, _, id2label = model_params(dataset_path)
|
27 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")# Usar a GPU o CPU
|
28 |
+
model.to(device)# Usar a GPU o CPU
|
29 |
audio_dataset = AudioDataset(dataset_path, {})
|
30 |
inputs = audio_dataset.preprocess_audio(audio_path)
|
31 |
inputs = {"input_values": inputs.to(device).unsqueeze(0)}
|
|
|
161 |
)
|
162 |
classify_btn = gr.Button("¿Por qué llora?")
|
163 |
classification_output = gr.Textbox(label="Tu bebé llora por:")
|
164 |
+
classify_btn.click(predict_class, inputs=audio_input, outputs=classification_output)
|
165 |
with gr.Column():
|
166 |
gr.Markdown("<h2>Assistant</h2>")
|
167 |
system_message = "You are a Chatbot specialized in baby health and care."
|
|
|
180 |
gr.Markdown("Este chatbot no sustituye a un profesional de la salud. Ante cualquier preocupación o duda, consulta con tu pediatra.")
|
181 |
boton_volver_inicio_1 = gr.Button("Volver a la pantalla inicial").click(cambiar_pestaña, outputs=[pagina_1, pantalla_inicial])
|
182 |
with gr.Column(visible=False) as pagina_2:
|
183 |
+
with gr.Row():
|
184 |
+
with gr.Column():
|
185 |
+
gr.Markdown("<h2>Monitor</h2>")
|
186 |
+
audio_input = gr.Audio(
|
187 |
+
min_length=1.0,
|
188 |
+
format="wav",
|
189 |
+
label="Baby recorder",
|
190 |
+
type="filepath", # Para no usar numpy y preprocesar siempre igual
|
191 |
+
)
|
192 |
+
classify_btn = gr.Button("¿Por qué llora?")
|
193 |
+
classification_output = gr.Textbox(label="Tu bebé llora por:")
|
194 |
+
classify_btn.click(predict_mon, inputs=audio_input, outputs=classification_output)
|
195 |
+
with gr.Column():
|
196 |
+
gr.Markdown("<h2>Assistant</h2>")
|
197 |
+
system_message = "You are a Chatbot specialized in baby health and care."
|
198 |
+
max_tokens = 512
|
199 |
+
temperature = 0.7
|
200 |
+
top_p = 0.95
|
201 |
+
chatbot = gr.ChatInterface(
|
202 |
+
respond, # TODO: Cambiar para que argumentos estén aquí metidos
|
203 |
+
additional_inputs=[
|
204 |
+
gr.State(value=system_message),
|
205 |
+
gr.State(value=max_tokens),
|
206 |
+
gr.State(value=temperature),
|
207 |
+
gr.State(value=top_p)
|
208 |
+
],
|
209 |
+
)
|
210 |
+
gr.Markdown("Este chatbot no sustituye a un profesional de la salud. Ante cualquier preocupación o duda, consulta con tu pediatra.")
|
211 |
gr.Markdown("Contenido de la Página 2")
|
212 |
boton_volver_inicio_2 = gr.Button("Volver a la pantalla inicial").click(cambiar_pestaña, outputs=[pagina_2, pantalla_inicial])
|
213 |
boton_pagina_1.click(cambiar_pestaña, outputs=[pantalla_inicial, pagina_1])
|