Spaces:
Sleeping
Sleeping
xicocdi
commited on
Commit
•
1a31378
1
Parent(s):
ac7c4ae
first push
Browse files- Dockerfile +11 -0
- app.py +120 -0
- chainlit.md +1 -0
- requirements.txt +9 -0
Dockerfile
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
RUN useradd -m -u 1000 user
|
3 |
+
USER user
|
4 |
+
ENV HOME=/home/user \
|
5 |
+
PATH=/home/user/.local/bin:$PATH
|
6 |
+
WORKDIR $HOME/app
|
7 |
+
COPY --chown=user . $HOME/app
|
8 |
+
COPY ./requirements.txt ~/app/requirements.txt
|
9 |
+
RUN pip install -r requirements.txt
|
10 |
+
COPY . .
|
11 |
+
CMD ["chainlit", "run", "app.py", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: E501
|
2 |
+
|
3 |
+
import os
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
from langchain_community.document_loaders import PyPDFLoader
|
6 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
7 |
+
from langchain_openai import OpenAIEmbeddings
|
8 |
+
from langchain_community.vectorstores.chroma import Chroma
|
9 |
+
from langchain_openai import ChatOpenAI
|
10 |
+
from langchain.prompts import PromptTemplate
|
11 |
+
from langchain.chains import ConversationalRetrievalChain
|
12 |
+
from langchain.memory import ConversationBufferMemory
|
13 |
+
import chainlit as cl
|
14 |
+
|
15 |
+
load_dotenv()
|
16 |
+
|
17 |
+
pdf_paths = [
|
18 |
+
"/Users/xico/AIMakerSpace-Midterm/AI_Risk_Management_Framework.pdf",
|
19 |
+
"/Users/xico/AIMakerSpace-Midterm/Blueprint-for-an-AI-Bill-of-Rights.pdf",
|
20 |
+
]
|
21 |
+
persist_directory = "docs/chroma/"
|
22 |
+
|
23 |
+
|
24 |
+
if os.path.exists(persist_directory) and os.listdir(persist_directory):
|
25 |
+
print("Loading existing vector database...")
|
26 |
+
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
|
27 |
+
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
|
28 |
+
else:
|
29 |
+
print("Creating new vector database...")
|
30 |
+
documents = []
|
31 |
+
for pdf_path in pdf_paths:
|
32 |
+
loader = PyPDFLoader(pdf_path)
|
33 |
+
documents.extend(loader.load())
|
34 |
+
|
35 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
36 |
+
chunk_size=2000,
|
37 |
+
chunk_overlap=0,
|
38 |
+
)
|
39 |
+
|
40 |
+
docs = text_splitter.split_documents(documents)
|
41 |
+
|
42 |
+
embedding = OpenAIEmbeddings(model="text-embedding-3-small")
|
43 |
+
|
44 |
+
vectordb = Chroma.from_documents(
|
45 |
+
documents=docs, embedding=embedding, persist_directory=persist_directory
|
46 |
+
)
|
47 |
+
|
48 |
+
custom_template = """
|
49 |
+
You are an expert in artificial intelligence policy, ethics, and industry trends. Your task is to provide clear and accurate answers to questions related to AI's role in politics, government regulations, and its ethical implications for enterprises. Use reliable and up-to-date information from government documents, industry reports, and academic research to inform your responses. Make sure to consider how AI is evolving, especially in relation to the current political landscape, and provide answers in a way that is easy to understand for both AI professionals and non-experts.
|
50 |
+
|
51 |
+
For each question:
|
52 |
+
|
53 |
+
Provide a summary of the most relevant insights from industry trends and government regulations.
|
54 |
+
Mention any government agencies, regulations, or political initiatives that play a role in AI governance.
|
55 |
+
Explain potential ethical concerns and how enterprises can navigate them.
|
56 |
+
Use real-world examples when possible to illustrate your points.
|
57 |
+
Here are a few example questions you might receive:
|
58 |
+
|
59 |
+
How are governments regulating AI, and what new policies have been implemented?
|
60 |
+
What are the ethical risks of using AI in political decision-making?
|
61 |
+
How can enterprises ensure their AI applications meet government ethical standards?
|
62 |
+
|
63 |
+
One final rule for you to remember. You CANNOT under any circumstance, answer any question that does not pertain to the AI. If you do answer an out-of-scope question, you could lose your job. If you are asked a question that does not have to do with AI, you must say: "I'm sorry, I don't know the answer to that question."
|
64 |
+
Context: {context}
|
65 |
+
Chat History: {chat_history}
|
66 |
+
Human: {question}
|
67 |
+
AI:"""
|
68 |
+
|
69 |
+
PROMPT = PromptTemplate(
|
70 |
+
template=custom_template, input_variables=["context", "question", "chat_history"]
|
71 |
+
)
|
72 |
+
|
73 |
+
retriever = vectordb.as_retriever(
|
74 |
+
search_type="mmr",
|
75 |
+
search_kwargs={"k": 4, "fetch_k": 10},
|
76 |
+
)
|
77 |
+
|
78 |
+
llm = ChatOpenAI(
|
79 |
+
model="gpt-4",
|
80 |
+
temperature=0.1,
|
81 |
+
streaming=True,
|
82 |
+
)
|
83 |
+
|
84 |
+
|
85 |
+
@cl.on_chat_start
|
86 |
+
async def start_chat():
|
87 |
+
memory = ConversationBufferMemory(
|
88 |
+
memory_key="chat_history", return_messages=True, output_key="answer"
|
89 |
+
)
|
90 |
+
|
91 |
+
qa = ConversationalRetrievalChain.from_llm(
|
92 |
+
llm,
|
93 |
+
retriever=retriever,
|
94 |
+
memory=memory,
|
95 |
+
combine_docs_chain_kwargs={"prompt": PROMPT},
|
96 |
+
return_source_documents=True,
|
97 |
+
)
|
98 |
+
|
99 |
+
cl.user_session.set("qa", qa)
|
100 |
+
|
101 |
+
await cl.Message(
|
102 |
+
content="Hi! What topic do you want to practice?", author="AI"
|
103 |
+
).send()
|
104 |
+
|
105 |
+
|
106 |
+
@cl.on_message
|
107 |
+
async def main(message: cl.Message):
|
108 |
+
qa = cl.user_session.get("qa")
|
109 |
+
cb = cl.AsyncLangchainCallbackHandler()
|
110 |
+
|
111 |
+
callbacks = [cb]
|
112 |
+
|
113 |
+
response = await cl.make_async(qa)(
|
114 |
+
{"question": message.content}, callbacks=callbacks
|
115 |
+
)
|
116 |
+
|
117 |
+
answer = response["answer"]
|
118 |
+
source_documents = response["source_documents"]
|
119 |
+
|
120 |
+
await cl.Message(content=answer, author="AI").send()
|
chainlit.md
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
AI Makerspace Midterm
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chainlit==1.1.401
|
2 |
+
cohere==4.37
|
3 |
+
openai>=1.10.0,<2.0.0
|
4 |
+
tiktoken==0.5.2
|
5 |
+
python-dotenv==1.0.0
|
6 |
+
langchain
|
7 |
+
langchain_openai==0.1.1
|
8 |
+
pypdf
|
9 |
+
chromadb
|