# Created by Leandro Carneiro at 19/01/2024 # Description: # ------------------------------------------------ import search_engine import rag import constants import llm def generate_news(subject, min_words, max_words, sites): print('\n\n' + '*' * 50) print('\n\nInício do Programa: \n') print('\nBuscando sites relevantes...') retrieved_sites = search_engine.search_google(subject, sites) if type(retrieved_sites) == str: return 'Erro: ' + retrieved_sites print('\nBaixando as notícias...') retrieved_text_from_sites = search_engine.retrieve_text_from_site(retrieved_sites) if type(retrieved_text_from_sites) == str: return 'Erro: ' + retrieved_text_from_sites print('\nSalvando as notícias em base local...') ret = search_engine.delete_base(constants.local_base) if ret != 0: return 'Erro: ' + ret ret = search_engine.save_on_base(retrieved_sites, retrieved_text_from_sites, constants.local_base) if ret != 0: return 'Erro: ' + ret print('\nGerando embeddings e vectorstore...') vectorstore = rag.generate_embeddings_and_vectorstore(constants.local_base) if type(vectorstore) == str: return 'Erro: ' + vectorstore print('\nGerando a notícia (RAG)...') print(' Assunto: ' + subject) obj_rag = rag.Rag(vectorstore, min_words, max_words) result_news = obj_rag.generate_text(subject) if type(result_news) == str: return 'Erro: ' + result_news print('\n\n' + '*' * 50 + '\n\n') print(result_news[0]) if result_news[1]: print('\n\nFontes: ') print(result_news[1]) return result_news[0] + '\n\n' + 'Fontes: ' + '\n' + result_news[1] else: return result_news[0] def call_llm(context, prompt): print('\nChamando o modelo de linguagem...') result = llm.invoke_llm(context, prompt) return result