facebook-bart-base / generate_answer.py
vakodiya's picture
Create generate_answer.py
8e2d616 verified
raw
history blame
802 Bytes
from transformers import BartTokenizer, BartModel
from langchain.prompts import PromptTemplate
tokenizer = BartTokenizer.from_pretrained('facebook/bart-base')
model = BartModel.from_pretrained('facebook/bart-base')
def generate_answer(context):
prompt_template = PromptTemplate(template="Summarise the following context: {context}",
input_variables=["context"], output_variables=["answer"])
# Model loading
format_prompt = prompt_template.format(question=question)
encoded_input = tokenizer(format_prompt, return_tensors='pt')
# Run the model
output = model.generate(**encoded_input) # Use generate method for text generation
# Decode the model output to text
decoded_output = tokenizer.decode(output[0])
return decoded_output