import streamlit as st from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import StreamlitChatMessageHistory from langchain.llms.huggingface_pipeline import HuggingFacePipeline msgs = StreamlitChatMessageHistory() memory = ConversationBufferMemory(memory_key="history", chat_memory=msgs) hf = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation", pipeline_kwargs={"max_new_tokens": 10}, ) st.title("🪩🤖") if len(msgs.messages) == 0: msgs.add_ai_message("Ask me anything about orb community projects!") for msg in msgs.messages: st.chat_message(msg.type).write(msg.content) if prompt := st.chat_input("Ask something"): st.chat_message("human").write(prompt) # Run st.chat_message("ai").write("hehe")