Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -29,7 +29,6 @@ class ChatRequest(BaseModel):
|
|
29 |
|
30 |
def generate_chat_response(request, llm):
|
31 |
try:
|
32 |
-
# Normalización del mensaje para manejo robusto
|
33 |
user_input = normalize_input(request.message)
|
34 |
response = llm.create_chat_completion(
|
35 |
messages=[{"role": "user", "content": user_input}],
|
@@ -43,11 +42,10 @@ def generate_chat_response(request, llm):
|
|
43 |
return {"response": f"Error: {str(e)}", "literal": user_input}
|
44 |
|
45 |
def normalize_input(input_text):
|
46 |
-
# Implementar aquí cualquier lógica de normalización que sea necesaria
|
47 |
return input_text.strip()
|
48 |
|
49 |
def select_best_response(responses, request):
|
50 |
-
coherent_responses = filter_by_coherence(
|
51 |
best_response = filter_by_similarity(coherent_responses)
|
52 |
return best_response
|
53 |
|
@@ -77,16 +75,22 @@ async def generate_chat(request: ChatRequest):
|
|
77 |
response = future.result()
|
78 |
responses.append(response)
|
79 |
|
|
|
80 |
if any("Error" in response['response'] for response in responses):
|
81 |
error_response = next(response for response in responses if "Error" in response['response'])
|
82 |
raise HTTPException(status_code=500, detail=error_response['response'])
|
83 |
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
return {
|
87 |
"best_response": best_response,
|
88 |
-
"all_responses":
|
89 |
-
"literal_inputs":
|
90 |
}
|
91 |
|
92 |
if __name__ == "__main__":
|
|
|
29 |
|
30 |
def generate_chat_response(request, llm):
|
31 |
try:
|
|
|
32 |
user_input = normalize_input(request.message)
|
33 |
response = llm.create_chat_completion(
|
34 |
messages=[{"role": "user", "content": user_input}],
|
|
|
42 |
return {"response": f"Error: {str(e)}", "literal": user_input}
|
43 |
|
44 |
def normalize_input(input_text):
|
|
|
45 |
return input_text.strip()
|
46 |
|
47 |
def select_best_response(responses, request):
|
48 |
+
coherent_responses = filter_by_coherence(responses, request)
|
49 |
best_response = filter_by_similarity(coherent_responses)
|
50 |
return best_response
|
51 |
|
|
|
75 |
response = future.result()
|
76 |
responses.append(response)
|
77 |
|
78 |
+
# Verifica si alguna respuesta contiene un error y maneja el error si es necesario
|
79 |
if any("Error" in response['response'] for response in responses):
|
80 |
error_response = next(response for response in responses if "Error" in response['response'])
|
81 |
raise HTTPException(status_code=500, detail=error_response['response'])
|
82 |
|
83 |
+
# Extrae las respuestas y las entradas literales
|
84 |
+
response_texts = [resp['response'] for resp in responses]
|
85 |
+
literal_inputs = [resp['literal'] for resp in responses]
|
86 |
+
|
87 |
+
# Selecciona la mejor respuesta
|
88 |
+
best_response = select_best_response(response_texts, request)
|
89 |
|
90 |
return {
|
91 |
"best_response": best_response,
|
92 |
+
"all_responses": response_texts,
|
93 |
+
"literal_inputs": literal_inputs
|
94 |
}
|
95 |
|
96 |
if __name__ == "__main__":
|