from transformers import GPT2Tokenizer, GPT2LMHeadModel from langchain.prompts import PromptTemplate tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2LMHeadModel.from_pretrained('gpt2') def generate_answer(question): prompt_template = PromptTemplate(template="Answer the following question within 100 words: {question}", input_variables=["question"], output_variables=["answer"]) # Model loading format_prompt = prompt_template.format(question=question) encoded_input = tokenizer(format_prompt, return_tensors='pt') # Run the model output = model.generate(**encoded_input, max_length=100) # Use generate method for text generation # Decode the model output to text decoded_output = tokenizer.decode(output[0]) response_text = decoded_output return response_text