Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,29 +1,20 @@
|
|
|
|
|
|
|
|
1 |
|
2 |
-
|
3 |
-
|
4 |
-
"""Install the libraries"""
|
5 |
-
|
6 |
!pip install -q langchain transformers accelerate bitsandbytes
|
7 |
|
8 |
-
"""Load the libraries"""
|
9 |
-
|
10 |
from langchain.chains import LLMChain, SequentialChain
|
11 |
from langchain.memory import ConversationBufferMemory
|
12 |
from langchain import HuggingFacePipeline
|
13 |
-
from langchain import PromptTemplate,
|
14 |
-
|
15 |
-
|
16 |
from transformers import AutoModel
|
17 |
-
import torch
|
18 |
import transformers
|
19 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
20 |
-
|
21 |
import json
|
22 |
-
import textwrap
|
23 |
-
|
24 |
-
"""Download the Model - We are using NousResearch's Llama2 which is the same as Meta AI's Llama 2, the only difference being "**Not requiring authentication to download**"
|
25 |
-
"""
|
26 |
|
|
|
27 |
tokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-chat-hf")
|
28 |
|
29 |
model = AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-7b-chat-hf",
|
@@ -33,30 +24,24 @@ model = AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-7b-chat-hf",
|
|
33 |
bnb_4bit_quant_type="nf4",
|
34 |
bnb_4bit_compute_dtype=torch.float16)
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
from transformers import pipeline
|
39 |
-
|
40 |
pipe = pipeline("text-generation",
|
41 |
model=model,
|
42 |
-
tokenizer=
|
43 |
torch_dtype=torch.float16,
|
44 |
device_map="auto",
|
45 |
-
max_new_tokens
|
46 |
do_sample=True,
|
47 |
top_k=30,
|
48 |
num_return_sequences=1,
|
49 |
eos_token_id=tokenizer.eos_token_id
|
50 |
)
|
51 |
|
52 |
-
|
53 |
-
|
54 |
B_INST, E_INST = "[INST]", "[/INST]"
|
55 |
B_SYS, E_SYS = "<>\n", "\n<>\n\n"
|
56 |
DEFAULT_SYSTEM_PROMPT = """\
|
57 |
-
As the leader of a sizable team in a dynamic business, I'm tasked with improving our supply chain management process. Recently, we've been facing issues like increased costs, longer lead times, and decreased customer satisfaction, all of which we believe are interconnected. To address these challenges, I need your assistance in optimizing our supply chain management. Please provide insights, strategies, and best practices that can help us streamline our operations, reduce costs, improve efficiency, and ultimately enhance customer satisfaction. Additionally, consider the latest technologies and innovations that could be integrated into our supply chain to make it more agile and responsive to market demands.If you don't know the answer to a question, please don't share false information.Just say you don't know and you are sorry!"""
|
58 |
-
|
59 |
-
"""All the helper fucntions to generate prompt, prompt template, clean up output text"""
|
60 |
|
61 |
def get_prompt(instruction, new_system_prompt=DEFAULT_SYSTEM_PROMPT, citation=None):
|
62 |
SYSTEM_PROMPT = B_SYS + new_system_prompt + E_SYS
|
@@ -97,9 +82,8 @@ def parse_text(text):
|
|
97 |
wrapped_text = textwrap.fill(text, width=100)
|
98 |
print(wrapped_text + '\n\n')
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
llm = HuggingFacePipeline(pipeline = pipe, model_kwargs = {'temperature':0.3,'max_length': 4956, 'top_k' :50})
|
103 |
|
104 |
system_prompt = "You are an advanced supply chain optimization expert"
|
105 |
instruction = "Use the data provided to you to optimize the supply chain:\n\n {text}"
|
@@ -108,16 +92,14 @@ print(template)
|
|
108 |
|
109 |
prompt = PromptTemplate(template=template, input_variables=["text"])
|
110 |
|
111 |
-
llm_chain = LLMChain(prompt=prompt, llm=llm, verbose
|
112 |
|
113 |
import pandas as pd
|
114 |
|
|
|
115 |
df_supplier = pd.read_csv('merged_data.csv')
|
116 |
|
117 |
-
print(df_supplier.head())
|
118 |
-
|
119 |
text = f"Based on the data provided how can you optimize my supply chain by prorviding me with the optmized solution as well as the techniques used. {df_supplier}"
|
120 |
|
121 |
response = llm_chain.run(text)
|
122 |
print(response)
|
123 |
-
|
|
|
1 |
+
import torch
|
2 |
+
import textwrap
|
3 |
+
from transformers import AutoModel, AutoTokenizer, AutoModelForCausalLM, pipeline
|
4 |
|
5 |
+
# Install the required libraries
|
|
|
|
|
|
|
6 |
!pip install -q langchain transformers accelerate bitsandbytes
|
7 |
|
|
|
|
|
8 |
from langchain.chains import LLMChain, SequentialChain
|
9 |
from langchain.memory import ConversationBufferMemory
|
10 |
from langchain import HuggingFacePipeline
|
11 |
+
from langchain import PromptTemplate, LLMChain
|
|
|
|
|
12 |
from transformers import AutoModel
|
|
|
13 |
import transformers
|
14 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
15 |
import json
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
# Download the model (NousResearch's Llama2)
|
18 |
tokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-chat-hf")
|
19 |
|
20 |
model = AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-7b-chat-hf",
|
|
|
24 |
bnb_4bit_quant_type="nf4",
|
25 |
bnb_4bit_compute_dtype=torch.float16)
|
26 |
|
27 |
+
# Define Transformers pipeline
|
|
|
|
|
|
|
28 |
pipe = pipeline("text-generation",
|
29 |
model=model,
|
30 |
+
tokenizer=tokenizer,
|
31 |
torch_dtype=torch.float16,
|
32 |
device_map="auto",
|
33 |
+
max_new_tokens=4956,
|
34 |
do_sample=True,
|
35 |
top_k=30,
|
36 |
num_return_sequences=1,
|
37 |
eos_token_id=tokenizer.eos_token_id
|
38 |
)
|
39 |
|
40 |
+
# Define the prompt format
|
|
|
41 |
B_INST, E_INST = "[INST]", "[/INST]"
|
42 |
B_SYS, E_SYS = "<>\n", "\n<>\n\n"
|
43 |
DEFAULT_SYSTEM_PROMPT = """\
|
44 |
+
As the leader of a sizable team in a dynamic business, I'm tasked with improving our supply chain management process. Recently, we've been facing issues like increased costs, longer lead times, and decreased customer satisfaction, all of which we believe are interconnected. To address these challenges, I need your assistance in optimizing our supply chain management. Please provide insights, strategies, and best practices that can help us streamline our operations, reduce costs, improve efficiency, and ultimately enhance customer satisfaction. Additionally, consider the latest technologies and innovations that could be integrated into our supply chain to make it more agile and responsive to market demands. If you don't know the answer to a question, please don't share false information. Just say you don't know and you are sorry!"""
|
|
|
|
|
45 |
|
46 |
def get_prompt(instruction, new_system_prompt=DEFAULT_SYSTEM_PROMPT, citation=None):
|
47 |
SYSTEM_PROMPT = B_SYS + new_system_prompt + E_SYS
|
|
|
82 |
wrapped_text = textwrap.fill(text, width=100)
|
83 |
print(wrapped_text + '\n\n')
|
84 |
|
85 |
+
# Defining Langchain LLM
|
86 |
+
llm = HuggingFacePipeline(pipeline=pipe, model_kwargs={'temperature': 0.3, 'max_length': 4956, 'top_k': 50})
|
|
|
87 |
|
88 |
system_prompt = "You are an advanced supply chain optimization expert"
|
89 |
instruction = "Use the data provided to you to optimize the supply chain:\n\n {text}"
|
|
|
92 |
|
93 |
prompt = PromptTemplate(template=template, input_variables=["text"])
|
94 |
|
95 |
+
llm_chain = LLMChain(prompt=prompt, llm=llm, verbose=False)
|
96 |
|
97 |
import pandas as pd
|
98 |
|
99 |
+
# Assuming you have a CSV file named 'merged_data.csv'
|
100 |
df_supplier = pd.read_csv('merged_data.csv')
|
101 |
|
|
|
|
|
102 |
text = f"Based on the data provided how can you optimize my supply chain by prorviding me with the optmized solution as well as the techniques used. {df_supplier}"
|
103 |
|
104 |
response = llm_chain.run(text)
|
105 |
print(response)
|
|