--- license: apache-2.0 language: - bn --- To run: ``` bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) config = { 'base_model_name_or_path': 'deepseek-ai/deepseek-math-7b-base' } PEFT_MODEL = "trained-model3" config = PeftConfig.from_pretrained(PEFT_MODEL) model = AutoModelForCausalLM.from_pretrained( config.base_model_name_or_path, return_dict=True, quantization_config=bnb_config, device_map="sequential", trust_remote_code=True ) tokenizer=AutoTokenizer.from_pretrained(config.base_model_name_or_path) tokenizer.pad_token = tokenizer.eos_token model = PeftModel.from_pretrained(model, PEFT_MODEL) generation_config = model.generation_config generation_config.max_new_tokens = 2048 generation_config.temperature = 0.7 generation_config.top_p = 0.7 generation_config.do_sample = True generation_config.num_return_sequences = 1 generation_config.pad_token_id = tokenizer.eos_token_id generation_config.eos_token_id = tokenizer.eos_token_id prompt = f"""Problem Statement: {ques}""" encoding = tokenizer(prompt, return_tensors="pt").to(device) with torch.inference_mode(): outputs = model.generate( input_ids = encoding.input_ids, attention_mask = encoding.attention_mask, generation_config = generation_config ) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ```