vmuchinov commited on
Commit
06dfd60
1 Parent(s): 06ee0b7

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -12,7 +12,7 @@ DEFAULT_MAX_NEW_TOKENS = 1024
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
  ACCESS_TOKEN = os.getenv("HF_TOKEN", "")
14
 
15
- model_id = "Qwen/Qwen2.5-7B-Instruct"
16
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto", token=ACCESS_TOKEN)
17
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=ACCESS_TOKEN)
18
  tokenizer.use_default_system_prompt = False
@@ -104,8 +104,8 @@ chat_interface = gr.Interface(
104
  value=1.2,
105
  ),
106
  ],
107
- title="Chat with the Model",
108
- description="Provide system settings and a message to interact with the model.",
109
  )
110
 
111
  chat_interface.queue(max_size=20).launch(share = True)
 
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
  ACCESS_TOKEN = os.getenv("HF_TOKEN", "")
14
 
15
+ model_id = "Qwen/Qwen2.5-14B-Instruct-GPTQ-Int8"
16
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto", token=ACCESS_TOKEN)
17
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=ACCESS_TOKEN)
18
  tokenizer.use_default_system_prompt = False
 
104
  value=1.2,
105
  ),
106
  ],
107
+ title="Model testing",
108
+ description="Provide system settings and a prompt to interact with the model.",
109
  )
110
 
111
  chat_interface.queue(max_size=20).launch(share = True)