abdulllah01 commited on
Commit
eb33e79
1 Parent(s): 247f7a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -1,12 +1,17 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
  # Load the model and tokenizer from your Hugging Face Hub repository
5
  model_name = "abdulllah01/outputs" # Replace with your Hugging Face repo name
6
 
7
- # Disable quantization explicitly if it's set in the config
 
 
 
 
 
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(model_name, device_map=None, quantization_config=None)
10
 
11
  # Streamlit interface
12
  st.title("Tech Support Chatbot")
@@ -16,7 +21,8 @@ st.write("Ask your technical support questions below:")
16
  user_input = st.text_input("Your question:", "")
17
 
18
  if user_input:
19
- # Generate a response using the pipeline
20
- response = model.generate(input_ids=tokenizer.encode(user_input, return_tensors="pt"), max_length=100)
 
21
  answer = tokenizer.decode(response[0], skip_special_tokens=True)
22
  st.write("**Answer:**", answer)
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
3
 
4
  # Load the model and tokenizer from your Hugging Face Hub repository
5
  model_name = "abdulllah01/outputs" # Replace with your Hugging Face repo name
6
 
7
+ # Load the model configuration first and modify it if necessary
8
+ config = AutoConfig.from_pretrained(model_name)
9
+ if hasattr(config, 'quantization_config'):
10
+ config.quantization_config = None # Disable any quantization settings
11
+
12
+ # Load the model and tokenizer
13
  tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ model = AutoModelForCausalLM.from_pretrained(model_name, config=config)
15
 
16
  # Streamlit interface
17
  st.title("Tech Support Chatbot")
 
21
  user_input = st.text_input("Your question:", "")
22
 
23
  if user_input:
24
+ # Generate a response using the model
25
+ inputs = tokenizer.encode(user_input, return_tensors="pt")
26
+ response = model.generate(inputs, max_length=100)
27
  answer = tokenizer.decode(response[0], skip_special_tokens=True)
28
  st.write("**Answer:**", answer)