Spaces:
Sleeping
Sleeping
MartinKosela
commited on
Commit
•
817156c
1
Parent(s):
7949790
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import streamlit as st
|
2 |
import openai
|
3 |
-
|
4 |
import os
|
5 |
|
6 |
# Securely fetch the OpenAI API key
|
@@ -45,16 +44,14 @@ KNOWN_MODELS = [
|
|
45 |
|
46 |
def recommend_ai_model_via_gpt(description):
|
47 |
messages = [
|
48 |
-
{"role": "user", "content":
|
49 |
]
|
50 |
-
|
51 |
-
#try:
|
52 |
response = openai.ChatCompletion.create(
|
53 |
model="gpt-4",
|
54 |
messages=messages
|
55 |
)
|
56 |
-
|
57 |
-
|
58 |
#except openai.APIError as e:
|
59 |
# return f"Error: {e}"
|
60 |
#except openai.RateLimitError as e:
|
@@ -67,14 +64,11 @@ def explain_recommendation(model_name):
|
|
67 |
messages = [
|
68 |
{"role": "user", "content": f"Why would {model_name} be a suitable choice for the application?"}
|
69 |
]
|
70 |
-
|
71 |
-
#try:
|
72 |
response = openai.ChatCompletion.create(
|
73 |
model="gpt-4",
|
74 |
messages=messages
|
75 |
)
|
76 |
-
|
77 |
-
return explanation
|
78 |
#except openai.APIError as e:
|
79 |
# return f"Error: {e}"
|
80 |
#except openai.RateLimitError as e:
|
@@ -86,7 +80,9 @@ def explain_recommendation(model_name):
|
|
86 |
st.image("./A8title2.png")
|
87 |
st.title('Find the best AI stack for your app')
|
88 |
|
89 |
-
description = st.text_area("Describe your application:"
|
|
|
|
|
90 |
recommendation_type = st.radio("What type of recommendation are you looking for?", ["Recommend Open-Source Model", "Recommend API Service"])
|
91 |
|
92 |
if "rec_model_pressed" not in st.session_state:
|
@@ -99,25 +95,13 @@ if st.button("Recommend AI Model"):
|
|
99 |
st.session_state.rec_model_pressed = True
|
100 |
|
101 |
if st.session_state.rec_model_pressed:
|
102 |
-
if description:
|
103 |
-
|
104 |
-
|
105 |
-
query = f"Given the application described as: '{description}', which open-source AI model would be most suitable?"
|
106 |
-
else: # Recommend API Service
|
107 |
-
query = f"Given the application described as: '{description}', which AI service API would be best?"
|
108 |
-
|
109 |
-
recommended_model = recommend_ai_model_via_gpt(query) # Updated function call
|
110 |
-
|
111 |
-
# Validate recommended model
|
112 |
-
# Commenting out model validation for the example
|
113 |
-
# if recommended_model not in KNOWN_MODELS:
|
114 |
-
# st.warning("The recommendation is ambiguous. Please refine your description or consult an expert.")
|
115 |
-
# else:
|
116 |
st.subheader(f"Recommended: {recommended_model}")
|
117 |
explanation = explain_recommendation(recommended_model)
|
118 |
st.write("Reason:", explanation)
|
119 |
|
120 |
-
# Collecting rating and feedback through Streamlit
|
121 |
rating = st.slider("Rate the explanation from 1 (worst) to 5 (best):", 1, 5)
|
122 |
feedback = st.text_input("Any additional feedback?")
|
123 |
|
@@ -127,9 +111,5 @@ if st.session_state.rec_model_pressed:
|
|
127 |
if st.session_state.feedback_submitted:
|
128 |
st.success("Thank you for your feedback!")
|
129 |
|
130 |
-
|
131 |
-
|
132 |
-
else:
|
133 |
-
st.warning("Please provide a description.")
|
134 |
-
|
135 |
-
|
|
|
1 |
import streamlit as st
|
2 |
import openai
|
|
|
3 |
import os
|
4 |
|
5 |
# Securely fetch the OpenAI API key
|
|
|
44 |
|
45 |
def recommend_ai_model_via_gpt(description):
|
46 |
messages = [
|
47 |
+
{"role": "user", "content": description}
|
48 |
]
|
|
|
|
|
49 |
response = openai.ChatCompletion.create(
|
50 |
model="gpt-4",
|
51 |
messages=messages
|
52 |
)
|
53 |
+
return response['choices'][0]['message']['content'].strip()
|
54 |
+
|
55 |
#except openai.APIError as e:
|
56 |
# return f"Error: {e}"
|
57 |
#except openai.RateLimitError as e:
|
|
|
64 |
messages = [
|
65 |
{"role": "user", "content": f"Why would {model_name} be a suitable choice for the application?"}
|
66 |
]
|
|
|
|
|
67 |
response = openai.ChatCompletion.create(
|
68 |
model="gpt-4",
|
69 |
messages=messages
|
70 |
)
|
71 |
+
return response['choices'][0]['message']['content'].strip()
|
|
|
72 |
#except openai.APIError as e:
|
73 |
# return f"Error: {e}"
|
74 |
#except openai.RateLimitError as e:
|
|
|
80 |
st.image("./A8title2.png")
|
81 |
st.title('Find the best AI stack for your app')
|
82 |
|
83 |
+
description = st.text_area("Describe your application:")
|
84 |
+
dataset_description = st.text_area("Describe the dataset you want to use for fine-tuning your model:")
|
85 |
+
|
86 |
recommendation_type = st.radio("What type of recommendation are you looking for?", ["Recommend Open-Source Model", "Recommend API Service"])
|
87 |
|
88 |
if "rec_model_pressed" not in st.session_state:
|
|
|
95 |
st.session_state.rec_model_pressed = True
|
96 |
|
97 |
if st.session_state.rec_model_pressed:
|
98 |
+
if description and dataset_description:
|
99 |
+
combined_query = f"{description} Dataset: {dataset_description}"
|
100 |
+
recommended_model = recommend_ai_model_via_gpt(combined_query)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
st.subheader(f"Recommended: {recommended_model}")
|
102 |
explanation = explain_recommendation(recommended_model)
|
103 |
st.write("Reason:", explanation)
|
104 |
|
|
|
105 |
rating = st.slider("Rate the explanation from 1 (worst) to 5 (best):", 1, 5)
|
106 |
feedback = st.text_input("Any additional feedback?")
|
107 |
|
|
|
111 |
if st.session_state.feedback_submitted:
|
112 |
st.success("Thank you for your feedback!")
|
113 |
|
114 |
+
else:
|
115 |
+
st.warning("Please provide a description and dataset details.")
|
|
|
|
|
|
|
|