mintaeng commited on
Commit
37d73ba
โ€ข
1 Parent(s): 651da14

Create make_chain_model.py

Browse files
Files changed (1) hide show
  1. make_chain_model.py +32 -0
make_chain_model.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.runnables import RunnablePassthrough
2
+ from langchain_core.output_parsers import StrOutputParser
3
+ from langchain_community.chat_models import ChatOllama
4
+ from langchain_core.prompts import ChatPromptTemplate
5
+ from langchain_pinecone import PineconeVectorStore
6
+ from langchain_community.embeddings import SentenceTransformerEmbeddings
7
+
8
+ def make_chain_llm(retriever,llm):
9
+ def format_docs(docs):
10
+ # ๊ฒ€์ƒ‰ํ•œ ๋ฌธ์„œ ๊ฒฐ๊ณผ๋ฅผ ํ•˜๋‚˜์˜ ๋ฌธ๋‹จ์œผ๋กœ ํ•ฉ์ณ์ค๋‹ˆ๋‹ค.
11
+ return "\n\n".join(doc.page_content for doc in docs)
12
+
13
+ # LangChain์ด ์ง€์›ํ•˜๋Š” ๋‹ค๋ฅธ ์ฑ„ํŒ… ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. ์—ฌ๊ธฐ์„œ๋Š” Ollama๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค.
14
+ # llm = ChatOllama(model="zephyr:latest")
15
+
16
+ template = "\"```\" Below is an instruction that describes a task. Write a response that appropriately completes the request."\
17
+ "์ œ์‹œํ•˜๋Š” context์—์„œ๋งŒ ๋Œ€๋‹ตํ•˜๊ณ  context์— ์—†๋Š” ๋‚ด์šฉ์€ ์ƒ์„ฑํ•˜์ง€๋งˆ"\
18
+ "make answer in korean. ํ•œ๊ตญ์–ด๋กœ ๋Œ€๋‹ตํ•˜์„ธ์š”"\
19
+ "\n\nContext:\n{context}\n;"\
20
+ "Question: {question}"\
21
+ "\n\nAnswer:"
22
+
23
+ prompt = ChatPromptTemplate.from_template(template)
24
+
25
+ rag_chain = (
26
+ {"context": retriever| format_docs, "question": RunnablePassthrough()}
27
+ | prompt
28
+ | llm
29
+ | StrOutputParser()
30
+ )
31
+
32
+ return rag_chain