from openai import OpenAI # Assuming Nvidia client is available in the same library, adjust if necessary import streamlit as st # Initialize Nvidia client client = OpenAI( base_url="https://integrate.api.nvidia.com/v1", # Nvidia API endpoint api_key=st.secrets["NVIDIA_API_KEY"] # Nvidia API Key from Streamlit secrets ) st.title("ChatGPT-like clone with Nvidia Model") # Initialize session state variables if not already present if "openai_model" not in st.session_state: st.session_state["openai_model"] = "nvidia/llama-3.1-nemotron-70b-instruct" if "messages" not in st.session_state: # Adding the initial system message st.session_state.messages = [{"role": "system", "content": "You are a helpful assistant."}] # Render the chat history for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Get new user input if prompt := st.chat_input("What is up?"): # Add user message to the session state st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) # Display assistant's message while waiting for the response with st.chat_message("assistant"): # Create Nvidia completion request with full conversation history stream = client.chat.completions.create( model=st.session_state["openai_model"], messages=st.session_state.messages, # Include all previous messages in the API call temperature=0.5, top_p=0.7, max_tokens=1024, stream=True, ) response = "" for chunk in stream: if chunk.choices[0].delta.content is not None: chunk_text = chunk.choices[0].delta.content response += chunk_text st.write(chunk_text, end="") # Store the assistant response in the session state st.session_state.messages.append({"role": "assistant", "content": response})