SHASWATSINGH3101 commited on
Commit
224a5e3
1 Parent(s): d2574e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -2
app.py CHANGED
@@ -1,17 +1,25 @@
 
1
  import torch
2
  import pandas as pd
3
  import numpy as np
4
  import gradio as gr
5
  from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
6
 
 
 
 
7
  # Load the base model with device_map set to 'auto'
8
  model = AutoModelForCausalLM.from_pretrained(
9
  "SHASWATSINGH3101/Qwen2-0.5B-Instruct_lora_merge",
10
- device_map='auto'
 
11
  )
12
 
13
  # Load the tokenizer
14
- tokenizer = AutoTokenizer.from_pretrained("SHASWATSINGH3101/Qwen2-0.5B-Instruct_lora_merge")
 
 
 
15
  tokenizer.pad_token = tokenizer.eos_token
16
 
17
  def gen(model, p, maxlen=100, sample=True):
 
1
+ import os
2
  import torch
3
  import pandas as pd
4
  import numpy as np
5
  import gradio as gr
6
  from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
7
 
8
+ # Set the cache directory
9
+ os.environ['TRANSFORMERS_CACHE'] = '/app/.cache'
10
+
11
  # Load the base model with device_map set to 'auto'
12
  model = AutoModelForCausalLM.from_pretrained(
13
  "SHASWATSINGH3101/Qwen2-0.5B-Instruct_lora_merge",
14
+ device_map='auto',
15
+ cache_dir='/app/.cache'
16
  )
17
 
18
  # Load the tokenizer
19
+ tokenizer = AutoTokenizer.from_pretrained(
20
+ "SHASWATSINGH3101/Qwen2-0.5B-Instruct_lora_merge",
21
+ cache_dir='/app/.cache'
22
+ )
23
  tokenizer.pad_token = tokenizer.eos_token
24
 
25
  def gen(model, p, maxlen=100, sample=True):