import streamlit as st
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import pickle
# Load the LSTM model
lstm_model = load_model('lstm.h5')
# Load the Tokenizer used during training
with open('tokenizer.pkl', 'rb') as tokenizer_file:
tokenizer = pickle.load(tokenizer_file)
# Define class labels and their numerical mapping
class_mapping = {"Angry": 0, "Sad": 1, "Joy": 2, "Surprise": 3}
numerical_to_label = {v: k for k, v in class_mapping.items()}
st.title('VibeConnect')
# Define the emojis you want to use
emojis = ["🤣","🥲","🥹","😇","😍","😋","🤪","🤩","🥳","😭","😡","😦","😧","😮","🥴","🤮","🤧","😷"]
# Create a string of emojis to use as the background
background_emojis = " ".join(emojis * 10) # Repeat the emojis to cover the background
centered_text = """
VibeConnect
"""
# Set the HTML as the app's content
st.markdown(centered_text, unsafe_allow_html=True)
# Use HTML and CSS to set the background
background_style = f"""
{background_emojis}
"""
# Set the HTML as the app's background
st.markdown(background_style, unsafe_allow_html=True)
faded_youtube_logo = """
"""
# Set the HTML as the app's content
st.markdown(faded_youtube_logo, unsafe_allow_html=True)
# Text input for the user to enter a sequence
user_input = st.text_input('Enter a Text:')
if st.button('Predict'):
# Tokenize and pad the user input
sequence = tokenizer.texts_to_sequences([user_input])
padded_sequence = pad_sequences(sequence, maxlen=128)
# Make predictions
prediction = lstm_model.predict(padded_sequence)
emojis = ["😡", "😭", "😄", "😯"]
threshold = 0.5
# Display the label
for i in range(len(prediction[0])):
label = numerical_to_label[i]
probability = prediction[0][i]
if probability > threshold:
st.write(f'{label}{emojis[i]}')