Upload example-contract-analysis-use-case.py
Browse files
example-contract-analysis-use-case.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import time
|
6 |
+
|
7 |
+
from llmware.prompts import Prompt, HumanInTheLoop
|
8 |
+
from llmware.configs import LLMWareConfig
|
9 |
+
|
10 |
+
|
11 |
+
def contract_analysis_simple (model_name):
|
12 |
+
|
13 |
+
# my contracts folder path - note: this assumes prior preparation step
|
14 |
+
contracts_path = "/home/ubuntu/contracts/"
|
15 |
+
|
16 |
+
# query list - "key" : "value"
|
17 |
+
query_list = {"executive employment agreement": "What are the name of the two parties?",
|
18 |
+
"base salary": "What is the executive's base salary?",
|
19 |
+
"governing law": "What is the governing law?"}
|
20 |
+
|
21 |
+
print("\nupdate: loading model - ", model_name)
|
22 |
+
|
23 |
+
prompter = Prompt().load_model(model_name)
|
24 |
+
|
25 |
+
# start the clock to measure processing time, once model loaded
|
26 |
+
t0 = time.time()
|
27 |
+
|
28 |
+
for i, contract in enumerate(os.listdir(contracts_path)):
|
29 |
+
|
30 |
+
print("\nAnalyzing contract: ", str(i+1), contract)
|
31 |
+
|
32 |
+
for key, value in query_list.items():
|
33 |
+
|
34 |
+
# contract is parsed, text-chunked, and then filtered by topic key
|
35 |
+
source = prompter.add_source_document(contracts_path, contract, query=key)
|
36 |
+
|
37 |
+
# calling the LLM with 'source' information from the contract automatically packaged into the prompt
|
38 |
+
responses = prompter.prompt_with_source(value, prompt_name="just_the_facts", temperature=0.3)
|
39 |
+
|
40 |
+
for r, response in enumerate(responses):
|
41 |
+
print("LLM Response - ", key, " - ", re.sub("[\n]"," ", response["llm_response"]))
|
42 |
+
|
43 |
+
# We're done with this contract, clear the source from the prompt
|
44 |
+
prompter.clear_source_materials()
|
45 |
+
|
46 |
+
# capture time of the processing
|
47 |
+
print("\nupdate: time cycle: ", time.time() - t0)
|
48 |
+
|
49 |
+
# Save jsonl report to jsonl to /prompt_history folder
|
50 |
+
print("\nupdate: prompt state saved at: ", os.path.join(LLMWareConfig.get_prompt_path(),prompter.prompt_id))
|
51 |
+
|
52 |
+
prompter.save_state()
|
53 |
+
|
54 |
+
csv_output = HumanInTheLoop(prompter).export_current_interaction_to_csv()
|
55 |
+
|
56 |
+
print("update: csv output - ", csv_output)
|
57 |
+
|
58 |
+
return 0
|
59 |
+
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
|
63 |
+
model = "llmware/dragon-deci-6b-v0"
|
64 |
+
|
65 |
+
contract_analysis_simple(model)
|
66 |
+
|