import streamlit as st from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline #from transformers import AutoTokenizer #from llama_cpp import Llama from datasets import load_dataset # Replace with the direct image URL flower_image_url = "https://i.postimg.cc/hG2FG85D/2.png" # Inject custom CSS for the background with a centered and blurred image st.markdown( f""" """, unsafe_allow_html=True ) # Add the blurred background div st.markdown('
', unsafe_allow_html=True) #""""""""""""""""""""""""" Application Code Starts here """"""""""""""""""""""""""""""""""""""""""""" # Groq API Configuration api_key = os.environ.get("LawersGuideAPIKey") # Ensure GROQ_API_KEY is set in your environment variables base_url = "https://api.groq.com/openai/v1/models/google/gemma-2-9b-it/completions" headers = { "Authorization": f"Bearer {api_key}", "Content-Type": "application/json" } # Function to query Groq model @st.cache_resource def query_groq_model(prompt, max_tokens=100, temperature=0.7): try: payload = { "prompt": prompt, "max_tokens": max_tokens, "temperature": temperature, "top_p": 1.0, "frequency_penalty": 0.0, "presence_penalty": 0.0, "n": 1 } response = requests.post(base_url, headers=headers, json=payload) response.raise_for_status() result = response.json() return result["choices"][0]["text"].strip() except Exception as e: return f"Error querying the model: {e}" # Streamlit App st.title("Mental Health Counseling Chat") st.markdown(""" Welcome to the **Mental Health Counseling Chat Application**. This platform is designed to provide **supportive, positive, and encouraging responses** using the Groq `google/gemma-2-9b-it` model. """) # Load example dataset for user exploration (optional) @st.cache_resource def load_counseling_dataset(): from datasets import load_dataset return load_dataset("Amod/mental_health_counseling_conversations") dataset = load_counseling_dataset() # Display example questions and answers from dataset if st.checkbox("Show Example Questions and Answers from Dataset"): sample = dataset["train"].shuffle(seed=42).select(range(3)) # Display 3 random samples for example in sample: st.markdown(f"**Question:** {example.get('context', 'N/A')}") st.markdown(f"**Answer:** {example.get('response', 'N/A')}") st.markdown("---") # User input for mental health concerns user_input = st.text_area("Your question or concern:", placeholder="Type your question here...") if st.button("Get Supportive Response"): if user_input.strip(): try: # Query Groq model prompt = f"User: {user_input}\nCounselor:" counselor_reply = query_groq_model(prompt, max_tokens=150, temperature=0.7) st.subheader("Counselor's Response:") st.write(counselor_reply) except Exception as e: st.error(f"An error occurred while querying the model: {e}") else: st.error("Please enter a question or concern to receive a response.") # Sidebar resources st.sidebar.header("Additional Mental Health Resources") st.sidebar.markdown(""" - [Mental Health Foundation](https://www.mentalhealth.org) - [Mind](https://www.mind.org.uk) - [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org) """) st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")