import os import streamlit as st from dotenv import load_dotenv # Importing load_dotenv to load environment variables from langchain import HuggingFaceHub # Load environment variables from the .env file load_dotenv() # Set your Hugging Face API token from the environment variable HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN") # Function to return the response from the Hugging Face model def load_answer(question): try: # Initialize the Hugging Face model using LangChain's HuggingFaceHub class llm = HuggingFaceHub( repo_id="mistralai/Mistral-7B-Instruct-v0.3", # Hugging Face model repo huggingfacehub_api_token=HUGGINGFACE_API_TOKEN, # Pass your API token model_kwargs={"temperature": 0.1} # Set a strictly positive temperature ) # Call the model with the user's question and get the response using .predict() answer = llm.predict(question) return answer except Exception as e: # Capture and return any exceptions or errors return f"Error: {str(e)}" # Streamlit App UI starts here st.set_page_config(page_title="Hugging Face Demo", page_icon=":robot:") st.header("Hugging Face Demo") # Function to get user input def get_text(): input_text = st.text_input("You: ", key="input") return input_text # Get user input user_input = get_text() # Create a button for generating the response submit = st.button('Generate') # If the generate button is clicked and user input is not empty if submit and user_input: response = load_answer(user_input) st.subheader("Answer:") st.write(response) elif submit: st.warning("Please enter a question.") # Warning for empty input