Uhhy commited on
Commit
5e2b717
1 Parent(s): 1608585

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -6,7 +6,6 @@ import uvicorn
6
  from dotenv import load_dotenv
7
  from difflib import SequenceMatcher
8
  from tqdm import tqdm
9
- import multiprocessing
10
 
11
  load_dotenv()
12
 
@@ -47,6 +46,7 @@ class ChatRequest(BaseModel):
47
  top_p: float = 0.95
48
  temperature: float = 0.7
49
 
 
50
  def generate_chat_response(request, llm):
51
  try:
52
  user_input = normalize_input(request.message)
@@ -102,6 +102,9 @@ def filter_by_similarity(responses):
102
  break
103
  return best_response
104
 
 
 
 
105
  @app.post("/generate_chat")
106
  async def generate_chat(request: ChatRequest):
107
  if not request.message.strip():
@@ -109,12 +112,8 @@ async def generate_chat(request: ChatRequest):
109
 
110
  print(f"Procesando solicitud: {request.message}")
111
 
112
- # Utilizar un ProcessPoolExecutor para procesar los modelos en paralelo
113
- def worker_function(llm):
114
- return generate_chat_response(request, llm)
115
-
116
  with ProcessPoolExecutor() as executor:
117
- futures = [executor.submit(worker_function, llm) for llm in llms.values()]
118
  responses = []
119
 
120
  for future in tqdm(as_completed(futures), total=len(futures), desc="Generando respuestas"):
 
6
  from dotenv import load_dotenv
7
  from difflib import SequenceMatcher
8
  from tqdm import tqdm
 
9
 
10
  load_dotenv()
11
 
 
46
  top_p: float = 0.95
47
  temperature: float = 0.7
48
 
49
+ # Función global para generar respuestas de chat
50
  def generate_chat_response(request, llm):
51
  try:
52
  user_input = normalize_input(request.message)
 
102
  break
103
  return best_response
104
 
105
+ def worker_function(llm, request):
106
+ return generate_chat_response(request, llm)
107
+
108
  @app.post("/generate_chat")
109
  async def generate_chat(request: ChatRequest):
110
  if not request.message.strip():
 
112
 
113
  print(f"Procesando solicitud: {request.message}")
114
 
 
 
 
 
115
  with ProcessPoolExecutor() as executor:
116
+ futures = [executor.submit(worker_function, llm, request) for llm in llms.values()]
117
  responses = []
118
 
119
  for future in tqdm(as_completed(futures), total=len(futures), desc="Generando respuestas"):