inoid commited on
Commit
f757ba6
1 Parent(s): ad91a76

Add Gemini LLM cal request

Browse files
.idea/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
4
+ # Editor-based HTTP Client requests
5
+ /httpRequests/
6
+ # Datasource local storage ignored files
7
+ /dataSources/
8
+ /dataSources.local.xml
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="Eslint" enabled="true" level="WARNING" enabled_by_default="true" />
5
+ <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
6
+ <option name="ignoredPackages">
7
+ <value>
8
+ <list size="2">
9
+ <item index="0" class="java.lang.String" itemvalue="torch" />
10
+ <item index="1" class="java.lang.String" itemvalue="bs4" />
11
+ </list>
12
+ </value>
13
+ </option>
14
+ </inspection_tool>
15
+ </profile>
16
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="Black">
4
+ <option name="sdkName" value="Python 3.11 (seminarLabAI)" />
5
+ </component>
6
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11 (seminarLabAI)" project-jdk-type="Python SDK" />
7
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/seminarLabAI.iml" filepath="$PROJECT_DIR$/.idea/seminarLabAI.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/seminarLabAI.iml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$">
5
+ <excludeFolder url="file://$MODULE_DIR$/.venv" />
6
+ </content>
7
+ <orderEntry type="jdk" jdkName="Python 3.11 (seminarLabAI)" jdkType="Python SDK" />
8
+ <orderEntry type="sourceFolder" forTests="false" />
9
+ </component>
10
+ </module>
.idea/vcs.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="" vcs="Git" />
5
+ </component>
6
+ </project>
README.md CHANGED
@@ -11,3 +11,6 @@ license: apache-2.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
+
15
+ ## How to set HuggingFace's Space applications
16
+ - [How to Use Git with Hugging Face: From Cloning to Pushing with Access Token Verification ](https://dev.to/sh20raj/how-to-use-git-with-hugging-face-from-cloning-to-pushing-with-access-token-verification-5711)
app.py CHANGED
@@ -1,7 +1,181 @@
1
  import gradio as gr
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
+ HISTORY_ANSWER = ''
 
4
 
5
+ with gr.Blocks() as demo:
6
+ gr.Markdown("SermonLab AI Demo.")
7
+ with gr.Tab("Preparando mi Sermón"):
8
+ text_input = gr.Textbox(label = "Tópico del sermón")
9
+
10
+ with gr.Accordion("Contemplando y Proclamando", open=False):
11
+ checkButton = gr.Checkbox(
12
+ value = False,
13
+ label = "Mantener historial"
14
+ )
15
+ with gr.Row():
16
+ with gr.Tab("Contemplando"):
17
+ inbtwContemplando = gr.Button(f"Devocionalmente: {contemplandoQuestion['DEVOCIONALMENTE']}")
18
+ inbtwContemplandoOne = gr.Button(f"Exégesis: {contemplandoQuestion['EXÉGESIS']}")
19
+ inbtwContemplandoTwo = gr.Button(f"Cristo: {contemplandoQuestion['CRISTO']}")
20
+ inbtwContemplandoTree = gr.Button(f"Arco Redentor: {contemplandoQuestion['ARCO REDENTOR']}")
21
+ inbtwContemplandoFour = gr.Button(f"Evangelión: {contemplandoQuestion['EVANGELION']}")
22
+ inbtwContemplandoFourOne = gr.Button(f"Evangelión: {contemplandoQuestion['EVANGELION_TWO']}")
23
+
24
+ with gr.Tab("Proclamando"):
25
+ inbtwProclamando = gr.Button(f"Público: {proclamandoQuestion['PÚBLICO']}")
26
+ inbtwProclamandoOne = gr.Button(f"Historia: {proclamandoQuestion['HISTORIA']}")
27
+ inbtwProclamandoTwo = gr.Button(f"Expectativas: {proclamandoQuestion['EXPECTATIVAS']}")
28
+ inbtwProclamandoTwoTwo = gr.Button(f"Expectativas: {proclamandoQuestion['EXPECTATIVAS_TWO']}")
29
+
30
+
31
+ text_output = gr.Textbox(label = "Respuesta", lines = 10)
32
+
33
+ text_button = gr.Button("Crear")
34
+
35
+ text_download = gr.DownloadButton(
36
+ label = "Descargar",
37
+ value = fileAddresToDownload,
38
+ every = 10
39
+ )
40
+
41
+ inbtwContemplando.click(
42
+ fn = lambda x: predictContemplando(f"DEVOCIONALMENTE"),
43
+ inputs = text_input,
44
+ outputs = text_output
45
+ )
46
+
47
+ inbtwContemplandoOne.click(
48
+ fn = lambda x: predictContemplando(f"EXÉGESIS"),
49
+ inputs = text_input,
50
+ outputs = text_output
51
+ )
52
+
53
+ inbtwContemplandoTwo.click(
54
+ fn = lambda x: predictContemplando(f"CRISTO"),
55
+ inputs = text_input,
56
+ outputs = text_output
57
+ )
58
+
59
+ inbtwContemplandoTree.click(
60
+ fn = lambda x: predictContemplando(f"ARCO REDENTOR"),
61
+ inputs = text_input,
62
+ outputs = text_output
63
+ )
64
+
65
+ inbtwContemplandoFour.click(
66
+ fn = lambda x: predictContemplando(f"EVANGELION"),
67
+ inputs = text_input,
68
+ outputs = text_output
69
+ )
70
+
71
+ inbtwContemplandoFourOne.click(
72
+ fn = lambda x: predictContemplando(f"EVANGELION_TWO"),
73
+ inputs = text_input,
74
+ outputs = text_output
75
+ )
76
+
77
+ ##---------------------------------------------------------------------
78
+
79
+ inbtwProclamando.click(
80
+ fn = lambda x: predictProclamando(f"PÚBLICO"),
81
+ inputs = text_input,
82
+ outputs = text_output
83
+ )
84
+
85
+ inbtwProclamandoOne.click(
86
+ fn = lambda x: predictProclamando(f"HISTORIA"),
87
+ inputs = text_input,
88
+ outputs = text_output
89
+ )
90
+
91
+ inbtwProclamandoTwo.click(
92
+ fn = lambda x: predictProclamando(f"EXPECTATIVAS"),
93
+ inputs = text_input,
94
+ outputs = text_output
95
+ )
96
+
97
+ inbtwProclamandoTwoTwo.click(
98
+ fn = lambda x: predictProclamando(f"EXPECTATIVAS_TWO"),
99
+ inputs = text_input,
100
+ outputs = text_output
101
+ )
102
+
103
+
104
+ text_button.click(
105
+ fn = predictFromInit,
106
+ inputs = text_input,
107
+ outputs = text_output
108
+ )
109
+
110
+ text_download.click(
111
+ fn = downloadSermonFile,
112
+ inputs = text_output
113
+ )
114
+ with gr.Tab("Obtener guía de la comunidad (Preguntas)"):
115
+ with gr.Row():
116
+ #Bibliografy about components
117
+ # File (https://www.gradio.app/docs/gradio/file)
118
+ # Download Button (https://www.gradio.app/docs/gradio/downloadbutton)
119
+ with gr.Column():
120
+ file_input_question = gr.File( )
121
+ upload_button_question = gr.UploadButton("Click to Upload a File", file_types = ['.pdf'], file_count = "multiple")
122
+ with gr.Column():
123
+ temp_slider_question = gr.Slider(
124
+ minimum=1,
125
+ maximum=10,
126
+ value=1,
127
+ step=1,
128
+ interactive=True,
129
+ label="Preguntas",
130
+ )
131
+ text_output_question = gr.Textbox(label = "Respuesta", lines = 10)
132
+ text_button_question = gr.Button("Crear guía de preguntas")
133
+ text_download_question = gr.DownloadButton(
134
+ label = "Descargar",
135
+ value = fileAddresToDownload,
136
+ every = 10
137
+ )
138
+
139
+ text_button_question.click(
140
+ fn = predictQuestionBuild,
141
+ outputs = text_output_question
142
+ )
143
+
144
+ upload_button_question.upload(upload_file_ex, inputs= upload_button_question, outputs = [file_input_question, text_output_question])
145
+
146
+
147
+ with gr.Tab("Obtener guía de la comunidad (Devocionario)"):
148
+ with gr.Row():
149
+ #Bibliografy about components
150
+ # File (https://www.gradio.app/docs/gradio/file)
151
+ # Download Button (https://www.gradio.app/docs/gradio/downloadbutton)
152
+
153
+ with gr.Column():
154
+ file_input_devotions = gr.File( )
155
+ upload_button_devotion = gr.UploadButton("Click to Upload a File", file_types = ['.pdf'], file_count = "multiple")
156
+
157
+ with gr.Column():
158
+ temp_slider_question = gr.Slider(
159
+ minimum=1,
160
+ maximum=10,
161
+ value=1,
162
+ step=1,
163
+ interactive=True,
164
+ label="Cantidad",
165
+ )
166
+ text_output_devotions = gr.Textbox(label = "Respuesta", lines = 10)
167
+ text_button_devotion = gr.Button("Crear")
168
+ text_download_question = gr.DownloadButton(
169
+ label = "Descargar",
170
+ value = fileAddresToDownload,
171
+ every = 10
172
+ )
173
+
174
+ text_button_devotion.click(
175
+ fn = predictDevotionBuild,
176
+ outputs = text_output_devotions
177
+ )
178
+
179
+ upload_button_devotion.upload(upload_file_ex, inputs= upload_button_devotion, outputs = [file_input_devotions, text_output_devotions])
180
+
181
+ demo.launch( )
llm_call.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_community.embeddings import HuggingFaceEmbeddings
3
+ from langchain_community.vectorstores import Chroma
4
+ from langchain_google_genai import ChatGoogleGenerativeAI
5
+ from langchain.prompts import PromptTemplate
6
+ from langchain.chains import LLMChain
7
+ from google.colab import userdata
8
+
9
+ class GeminiLLM():
10
+ def __init__(self):
11
+ self.ACCESS_TOKEN = os.getenv('GOOGLE_GEMINI_TOKEN')
12
+ self.model_name = "gemini-pro"
13
+
14
+ def getEmbeddingsModel(self):
15
+ self.embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
16
+ return self.embeddings
17
+
18
+ def getRetriver(self, documents ):
19
+ vectorstore = Chroma.from_documents(
20
+ documents = documents,
21
+ embedding = self.embeddings,
22
+ persist_directory = "chroma_db_dir", # Local mode with in-memory storage only
23
+ collection_name="sermon_lab_ai"
24
+ )
25
+
26
+ retriever = vectorstore.as_retriever(
27
+ search_kwargs={"k": 3}
28
+ )
29
+
30
+ return (retriever, vectorstore)
31
+
32
+ def getLLM(self, documents ):
33
+ if os.getenv('GOOGLE_GEMINI_TOKEN') is None:
34
+ raise ValueError("GOOGLE_GEMINI_TOKEN environment variable not set")
35
+ else:
36
+ self.llm = ChatGoogleGenerativeAI(
37
+ model = self.model_name,
38
+ temperature = 0.7,
39
+ top_k = 40,
40
+ top_p = 1
41
+ )
42
+
43
+ return self.llm
packages.txt CHANGED
@@ -0,0 +1 @@
 
 
1
+ wkhtmltopdf
requirements.txt CHANGED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ sentence-transformers
2
+ transformers
3
+ einops
4
+ accelerate
5
+ langchain_community
6
+ langchain
7
+ pypdf
8
+ tiktoken
9
+ unstructured
10
+ chromadb
11
+ google-generativeai
12
+ langchain-google-genai
13
+ pdfkit
14
+ gradio
15
+ pypdf
seminar_edition_ai.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.docstore.document import Document
2
+ from langchain.chains.question_answering import load_qa_chain
3
+ import chromadb
4
+ from datetime import datetime
5
+ import os
6
+ from datetime import datetime
7
+ import pdfkit
8
+ from langchain.chains.question_answering import load_qa_chain
9
+
10
+ bookQuestion = dict()
11
+
12
+ contemplandoQuestion = {
13
+ 'DEVOCIONALMENTE':'¿Cómo estimula Dios su corazón a través de Su Palabra?',
14
+ 'EXÉGESIS':'Cuál es el contexto de este pasaje?',
15
+ 'CRISTO':'¿Cómo se comprende este texto a la luz de Cristo?',
16
+ 'ARCO REDENTOR':'¿Cómo encaja este texto en la metanarrativa de las Escrituras?',
17
+ 'EVANGELION': '¿Cómo se declara el evangelio en este texto?',
18
+ 'EVANGELION_TWO': '¿Cómo interpretamos este texto a la luz del evangelio?',
19
+ }
20
+
21
+ proclamandoQuestion = {
22
+ 'PÚBLICO':'¿Cuáles son los ídolos en los corazones de las personas que rechazarían el evangelio de Cristo?',
23
+ 'HISTORIA':'¿Cómo el guión de su predicación comunica la historia de Dios?',
24
+ 'EXPECTATIVAS': '¿Qué espera Dios que hagan como respuesta a esta predicación?',
25
+ 'EXPECTATIVAS_TWO': '¿Cuáles son sus expectativas divinas como predicador de este mensaje?',
26
+ }
27
+
28
+
29
+ bookQuestion['Contemplando'] = contemplandoQuestion
30
+ bookQuestion['Proclamando'] = proclamandoQuestion
31
+
32
+ HISTORY_ANSWER = ""
33
+
34
+ DIRECTORY_PATH_TO_DOWNLOAD = 'data/sermon_lab_ai/download_files'
35
+
36
+ if not os.path.exists(DIRECTORY_PATH_TO_DOWNLOAD):
37
+ os.makedirs(f"{DIRECTORY_PATH_TO_DOWNLOAD}")
38
+
39
+ def getCurrentFileName():
40
+ now = datetime.now()
41
+ strNow = now.strftime("%m%d%Y_%H%M%S")
42
+ return f"sermonDay_{strNow}.pdf"
43
+
44
+ fileAddresToDownload = f"{DIRECTORY_PATH_TO_DOWNLOAD}{os.sep}{getCurrentFileName()}"
45
+ FILE_PATH_NAME = fileAddresToDownload
46
+
47
+ def updatePromptTemplate(promptTemplate, inputVariablesTemplate):
48
+ prompt = PromptTemplate(template = promptTemplate,
49
+ input_variables = inputVariablesTemplate)
50
+ chain = load_qa_chain(llm, chain_type="stuff", prompt = prompt)
51
+ return chain
52
+ def predict(query):
53
+ chain = updatePromptTemplate(
54
+ sermonPromptMenuGemini['BUILD_PREPARE_QUESTIONS'],
55
+ ['question','SERMON_CONTEXT','context']
56
+ )
57
+
58
+ if query != '':
59
+ global retriever
60
+ answer = askQuestion(
61
+ query,
62
+ chain,
63
+ retriever,
64
+ topic = query,
65
+ KEY = 'question'
66
+ )
67
+ answer = (answer.split("<|assistant|>")[-1]).strip()
68
+ HISTORY_ANSWER = answer
69
+
70
+ return answer
71
+ else:
72
+ return query
73
+
74
+ def predictContemplando(queryKey):
75
+ #Call to LLM LangChaing inference
76
+ query = contemplandoQuestion[queryKey]
77
+ return predict(query)
78
+
79
+
80
+ def predictProclamando(queryKey):
81
+ #Call to LLM LangChaing inference
82
+ query = proclamandoQuestion[queryKey]
83
+ return predict(query)
84
+
85
+ ####
86
+ #
87
+ ####
88
+ def predictFromInit(sermonTopic):
89
+ global HISTORY_ANSWER
90
+ keyStr = 'SERMON_TOPIC'
91
+
92
+ if HISTORY_ANSWER == '':
93
+ chain = updatePromptTemplate(
94
+ sermonPromptMenuGemini['BUILD_INIT'],
95
+ [keyStr,'CANT_VERSICULOS','context']
96
+ )
97
+ else:
98
+ chain = updatePromptTemplate(
99
+ sermonPromptMenuGemini['BUILD_EMPTY'],
100
+ ['BIBLE_VERSICLE','context']
101
+ )
102
+ keyStr = 'BIBLE_VERSICLE'
103
+
104
+ global retriever
105
+ answer = askQuestionInit(
106
+ '',
107
+ chain,
108
+ retriever,
109
+ topic = sermonTopic,
110
+ KEY = keyStr
111
+ )
112
+
113
+ #Create a new document and build a retriver
114
+ if answer != '':
115
+ doc = Document(page_content="text", metadata = {"source": "local"})
116
+
117
+ vectorstore = Chroma.from_documents(
118
+ documents = [doc],
119
+ embedding = embed_model,
120
+ persist_directory="chroma_db_dir_sermon", # Local mode with in-memory storage only
121
+ collection_name="sermon_lab_ai"
122
+ )
123
+
124
+ retriever = vectorstore.as_retriever(
125
+ search_kwargs = {"k": 3}
126
+ )
127
+
128
+
129
+ HISTORY_ANSWER = answer
130
+
131
+ return answer
132
+
133
+ ####
134
+ #
135
+ ####
136
+ def predictQuestionBuild(sermonTopic):
137
+ chain = updatePromptTemplate(
138
+ sermonPromptMenuGemini['BUILD_QUESTION'],
139
+ ['SERMON_IDEA', 'context']
140
+ )
141
+ global retriever
142
+ answer = askQuestionEx(
143
+ '',
144
+ chain,
145
+ retriever,
146
+ topic = sermonTopic,
147
+ KEY = 'SERMON_IDEA'
148
+ )
149
+
150
+ return answer
151
+
152
+ ####
153
+ #
154
+ ####
155
+ def predictDevotionBuild(sermonTopic):
156
+ chain = updatePromptTemplate(
157
+ sermonPromptMenuGemini['BUILD_REFLECTIONS'],
158
+ ['SERMON_IDEA', 'context']
159
+ )
160
+ global retriever
161
+ global HISTORY_ANSWER
162
+ answer = askQuestionEx(
163
+ HISTORY_ANSWER,
164
+ chain,
165
+ retriever,
166
+ topic = sermonTopic,
167
+ KEY = 'SERMON_IDEA'
168
+ )
169
+
170
+ return answer
171
+
172
+
173
+ # A utility function for answer generation
174
+ def askQuestion(
175
+ question,
176
+ _chain,
177
+ _retriever,
178
+ topic = 'el amor de Dios',
179
+ KEY = 'SERMON_TOPIC'
180
+ ):
181
+
182
+ #Obtener los Chunks relevantes a la pregunta en el RAG
183
+ #print(f" Question: {question}")
184
+
185
+ context = _retriever.get_relevant_documents(question)
186
+
187
+ #print("---- Contexto ----")
188
+ #print(context)
189
+ #print("____________________GLOBAL________")
190
+
191
+ global HISTORY_ANSWER
192
+
193
+ #print (HISTORY_ANSWER)
194
+
195
+ return (
196
+ _chain({
197
+ KEY: topic,
198
+ 'SERMON_CONTEXT': HISTORY_ANSWER,
199
+ "input_documents": context,
200
+ "question": question
201
+ },
202
+ return_only_outputs=True)
203
+ )['output_text']
204
+
205
+
206
+ A utility function for answer generation
207
+ def askQuestionEx(
208
+ question,
209
+ _chain,
210
+ _retriever,
211
+ topic = 'el amor de Dios',
212
+ KEY = 'SERMON_TOPIC'
213
+ ):
214
+
215
+ #Obtener los Chunks relevantes a la pregunta en el RAG
216
+ #print(f" Question: {question}")
217
+
218
+ context = _retriever.get_relevant_documents(question)
219
+
220
+ #print("---- Contexto ----")
221
+ #print(context)
222
+ #print("____________________GLOBAL________")
223
+
224
+ global HISTORY_ANSWER
225
+ #print (HISTORY_ANSWER)
226
+
227
+ return (
228
+ _chain({
229
+ KEY: topic,
230
+ "input_documents": context,
231
+ "question": question
232
+ },
233
+ return_only_outputs=True)
234
+ )['output_text']
235
+
236
+ # A utility function for answer generation
237
+ def askQuestionInit(
238
+ question,
239
+ _chain,
240
+ _retriever,
241
+ topic = 'el amor de Dios',
242
+ KEY = 'SERMON_TOPIC'
243
+ ):
244
+
245
+ #Obtener los Chunks relevantes a la pregunta en el RAG
246
+ context = _retriever.get_relevant_documents(question)
247
+
248
+ settings = {
249
+ KEY: topic,
250
+ "input_documents": context,
251
+ "question": question
252
+ }
253
+
254
+ if KEY == 'SERMON_TOPIC':
255
+ settings['CANT_VERSICULOS'] = 5
256
+
257
+ return (
258
+ _chain(
259
+ settings,
260
+ return_only_outputs=True)
261
+ )['output_text']
262
+
263
+
264
+
265
+
266
+
267
+
268
+
269
+
270
+ def downloadSermonFile(answer):
271
+
272
+ if os.path.exists(FILE_PATH_NAME):
273
+ os.remove(FILE_PATH_NAME)
274
+
275
+ pdfkit.from_string(
276
+ answer,
277
+ FILE_PATH_NAME
278
+ )
279
+
280
+ return ""