File size: 1,517 Bytes
9bc5b30 7bbe0a9 91236ed 9bc5b30 7bbe0a9 91236ed 9bc5b30 7bbe0a9 3e182cf 7bbe0a9 3e182cf 7bbe0a9 57642d9 7bbe0a9 933e843 7bbe0a9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import streamlit as st
import requests
import os
# Hugging Face API details
API_URL = "https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill"
# Access the secret API key stored as 'rag'
api_key = os.getenv('rag')
headers = {"Authorization": f"Bearer {api_key}"}
# Function to query the model
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
# Streamlit UI for Mental Health Chatbot
st.title("Mental Health Chatbot")
st.write("""
This chatbot provides responses to mental health-related queries.
Please note that this is an AI-based tool and is not a substitute for professional mental health support.
""")
# User input
user_input = st.text_input("How can I help you today?")
if st.button("Get Response"):
if user_input:
# Query the model
output = query({"inputs": user_input})
# Print the entire output for debugging purposes
st.write("**API Response:**", output)
# Check the structure of the response and adjust this line accordingly
if 'generated_text' in output:
st.write(f"**Response:** {output['generated_text']}")
elif isinstance(output, list) and len(output) > 0:
st.write(f"**Response:** {output[0].get('generated_text', 'No response available.')}")
else:
st.write("**Response:** Unable to retrieve a valid response.")
else:
st.write("Please enter a query to get a response.")
|