Spaces:
Paused
Paused
Create generate_answer.py
Browse files- generate_answer.py +17 -0
generate_answer.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import BartTokenizer, BartModel
|
2 |
+
from langchain.prompts import PromptTemplate
|
3 |
+
|
4 |
+
tokenizer = BartTokenizer.from_pretrained('facebook/bart-base')
|
5 |
+
model = BartModel.from_pretrained('facebook/bart-base')
|
6 |
+
|
7 |
+
def generate_answer(context):
|
8 |
+
prompt_template = PromptTemplate(template="Summarise the following context: {context}",
|
9 |
+
input_variables=["context"], output_variables=["answer"])
|
10 |
+
# Model loading
|
11 |
+
format_prompt = prompt_template.format(question=question)
|
12 |
+
encoded_input = tokenizer(format_prompt, return_tensors='pt')
|
13 |
+
# Run the model
|
14 |
+
output = model.generate(**encoded_input) # Use generate method for text generation
|
15 |
+
# Decode the model output to text
|
16 |
+
decoded_output = tokenizer.decode(output[0])
|
17 |
+
return decoded_output
|