import streamlit as st from transformers import pipeline from ldclient import LDClient, Config, Context import os import torch # Retrieve the LaunchDarkly SDK key from environment variables ld_sdk_key = os.getenv("LAUNCHDARKLY_SDK_KEY") # Initialize LaunchDarkly client with the correct configuration ld_client = LDClient(Config(ld_sdk_key)) # Set the user context for LaunchDarkly user = { "key": "test-user", "custom": { "groups": "beta_testers" } } context = Context(user) # Function to get the AI model configuration from LaunchDarkly def get_model_config(): flag_key = "model-swap" # Replace with your flag key # Create a context using Context Builder—it can be anything, but for this use case, I’m just defaulting to myself. context = Context.builder("context-key-123abc").name("AJ").build() flag_variation = ld_client.variation(flag_key, context, default={}) model_id = flag_variation.get("modelID", "distilbert-base-uncased") return model_id # Function to translate sentiment labels to user-friendly terms def translate_label(label): label_mapping = { "LABEL_0": "🤬 Negative", "LABEL_1": "😶 Neutral", "LABEL_2": "😃 Positive" } return label_mapping.get(label, "Unknown") # Streamlit app st.title("Sentiment Analysis Demo with AI Model Flags") user_input = st.text_area("Enter text for sentiment analysis:") if st.button("Analyze"): model_id = get_model_config() model = pipeline("sentiment-analysis", model=model_id) # Display model details st.write(f"Using model: {model_id}") # Perform sentiment analysis results = model(user_input) st.write("Results:") # Translate and display the results for result in results: label = translate_label(result['label']) score = result['score'] st.write(f"Sentiment: {label}, Confidence: {score:.2f}") # Closing the LD client ld_client.close()