import streamlit as st from huggingface_hub import InferenceClient import os # Retrieve the Hugging Face token from environment variables hf_token = os.getenv("HF_TOKEN") # Initialize the Hugging Face Inference Client client = InferenceClient( model="Qwen/Qwen2-7B", token=hf_token, ) def get_chat_completion(message): try: response = client.chat_completion( messages=[{"role": "user", "content": message}], max_tokens=500, stream=False, ) completion = response[0]['choices'][0]['message']['content'] return completion except Exception as e: return f"Error: {e}" # Streamlit app layout st.title("Chat with Hugging Face Model") # Input from the user user_input = st.text_input("Enter your message:") if st.button("Send"): if user_input: # Get response from the model response = get_chat_completion(user_input) st.write("**Response:**") st.write(response) else: st.write("Please enter a message.")