import streamlit as st from transformers import pipeline from ldclient import LDClient, Config, Context import os import torch # Retrieve the LaunchDarkly SDK key from environment variables ld_sdk_key = os.getenv("LAUNCHDARKLY_SDK_KEY") # Initialize LaunchDarkly client with the correct configuration ld_client = LDClient(Config(ld_sdk_key)) # Function to get the AI model configuration from LaunchDarkly def get_model_config(user_name): flag_key = "model-swap" # Replace with your flag key # Create a context using Context Builder—it can be anything, but for this use case, I’m just defaulting to myself. context = Context.builder(f"context-key-{user_name}").name(user_name).build() flag_variation = ld_client.variation(flag_key, context, default={}) model_id = flag_variation.get("modelID", "distilbert-base-uncased") return model_id # Function to translate sentiment labels to user-friendly terms def translate_label(label): label_mapping = { "LABEL_0": "🤬 Negative", "LABEL_1": "😶 Neutral", "LABEL_2": "😃 Positive" } return label_mapping.get(label, "Unknown") # Streamlit app st.title("Sentiment Analysis Demo with AI Model Flags") user_input = st.text_area("Enter text for sentiment analysis:") # Add an input box for the user to enter their name name = st.text_input("Enter your name", "AJ") # if no name is anter add anonymous if not name: name = "Anonymous" if st.button("Analyze"): model_id = get_model_config(name) model = pipeline("sentiment-analysis", model=model_id) # Display model details st.write(f"Using model: {model_id}") # Perform sentiment analysis results = model(user_input) st.write("Results:") # Translate and display the results for result in results: label = translate_label(result['label']) score = result['score'] st.write(f"Sentiment: {label}, Confidence: {score:.2f}") # Closing the LD client ld_client.close()