Spaces:
Sleeping
Sleeping
File size: 3,431 Bytes
252ec19 5b1e2fb 252ec19 5b1e2fb 8afcdb2 252ec19 be25975 252ec19 be25975 252ec19 81a7239 1e4e3dc 817156c 252ec19 81a7239 1e4e3dc 817156c 252ec19 e5dd67d 3b4ba98 187629c 252ec19 2c32c16 b222e1b e5dd67d 851080b 817156c e5dd67d 2cb068a e5dd67d 7d303d2 e5dd67d 07e580e 7dc63d1 e5dd67d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import streamlit as st
import openai
import os
# Securely fetch the OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
KNOWN_MODELS = [
# General ML models
"Neural Networks", "Decision Trees", "Support Vector Machines",
"Random Forests", "Linear Regression", "Reinforcement Learning",
"Logistic Regression", "k-Nearest Neighbors", "Naive Bayes",
"Gradient Boosting Machines", "Regularization Techniques",
"Ensemble Methods", "Time Series Analysis",
# Deep Learning models
"Deep Learning", "Convolutional Neural Networks",
"Recurrent Neural Networks", "Transformer Models",
"Generative Adversarial Networks", "Autoencoders",
"Bidirectional LSTM", "Residual Networks (ResNets)",
"Variational Autoencoders",
# Computer Vision models and techniques
"Object Detection (e.g., YOLO, SSD)", "Semantic Segmentation",
"Image Classification", "Face Recognition", "Optical Character Recognition (OCR)",
"Pose Estimation", "Style Transfer", "Image-to-Image Translation",
"Image Generation", "Capsule Networks",
# NLP models and techniques
"BERT", "GPT", "ELMo", "T5", "Word2Vec", "Doc2Vec",
"Topic Modeling", "Sentiment Analysis", "Text Classification",
"Machine Translation", "Speech Recognition", "Sequence-to-Sequence Models",
"Attention Mechanisms", "Named Entity Recognition", "Text Summarization"
]
def recommend_ai_model_via_gpt(description):
messages = [{"role": "user", "content": description}]
response = openai.ChatCompletion.create(model="gpt-4", messages=messages)
return response['choices'][0]['message']['content'].strip()
def explain_recommendation(model_name):
messages = [{"role": "user", "content": f"Why is {model_name} the best choice for my application?"}]
response = openai.ChatCompletion.create(model="gpt-4", messages=messages)
return response['choices'][0]['message']['content'].strip()
# Streamlit UI
st.image("./A8title.png")
st.title('Find the best model for your GenAI App')
st.write("")
st.markdown("<h4 style='font-size:20px;'>Outline Your Application's Functionality:</h4>", unsafe_allow_html=True)
description = "Recommend HuggingFace open source models, do not mention OpenAI in response" + st.text_area("", key="app_description")
# Hardcoded 'Next' button that is always visible
if st.button("Next step: Dataset", key="next_to_dataset"):
st.session_state.show_dataset_description = True
if 'show_dataset_description' in st.session_state and st.session_state.show_dataset_description:
st.markdown("<h4 style='font-size:20px;'>Describe training dataset you will use:</h4>", unsafe_allow_html=True)
dataset_description = st.text_area("", key="dataset_description")
if st.button("Recommend AI Model", key="recommend_model_button"):
recommended_model = recommend_ai_model_via_gpt(description)
st.subheader(f"Recommended: {recommended_model}")
explanation = explain_recommendation(recommended_model)
st.write("Explanation:", explanation)
rating = st.slider("Rate the explanation from 1 (worst) to 5 (best):", 1, 5)
feedback = st.text_input("Any additional feedback?")
if st.button("Submit Feedback", key="submit_feedback_key"):
st.session_state.feedback_submitted = True
if st.session_state.feedback_submitted:
st.success("Thank you for your feedback!") |