abdulllah01 commited on
Commit
247f7a9
1 Parent(s): bee1be5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -1,15 +1,12 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
 
4
  # Load the model and tokenizer from your Hugging Face Hub repository
5
  model_name = "abdulllah01/outputs" # Replace with your Hugging Face repo name
6
 
7
- # Load the model without quantization
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(model_name)
10
-
11
- # Initialize the pipeline for text generation
12
- qa_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
13
 
14
  # Streamlit interface
15
  st.title("Tech Support Chatbot")
@@ -20,6 +17,6 @@ user_input = st.text_input("Your question:", "")
20
 
21
  if user_input:
22
  # Generate a response using the pipeline
23
- response = qa_pipeline(user_input, max_length=100, num_return_sequences=1)
24
- answer = response[0]["generated_text"]
25
  st.write("**Answer:**", answer)
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
  # Load the model and tokenizer from your Hugging Face Hub repository
5
  model_name = "abdulllah01/outputs" # Replace with your Hugging Face repo name
6
 
7
+ # Disable quantization explicitly if it's set in the config
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map=None, quantization_config=None)
 
 
 
10
 
11
  # Streamlit interface
12
  st.title("Tech Support Chatbot")
 
17
 
18
  if user_input:
19
  # Generate a response using the pipeline
20
+ response = model.generate(input_ids=tokenizer.encode(user_input, return_tensors="pt"), max_length=100)
21
+ answer = tokenizer.decode(response[0], skip_special_tokens=True)
22
  st.write("**Answer:**", answer)