TheDrakosfire commited on
Commit
f8ac491
1 Parent(s): 342314c

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. SRD_embeddings.csv +3 -0
  3. app.py +179 -0
  4. requirements.txt +7 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ SRD_embeddings.csv filter=lfs diff=lfs merge=lfs -text
SRD_embeddings.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1315c8fc5255c125c06b6c9e3ec4c84df91fd60e03596a363e6d7491df8171ba
3
+ size 46149879
app.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ from openai import OpenAI
4
+ from sentence_transformers import util, SentenceTransformer
5
+ import torch
6
+ import time
7
+ from time import perf_counter as timer
8
+ import textwrap
9
+ import json
10
+ import textwrap
11
+
12
+ import gradio as gr
13
+
14
+ print("Launching")
15
+
16
+ client = OpenAI()
17
+
18
+ # Define helper function to print wrapped text
19
+ def print_wrapped(text, wrap_length=80):
20
+ wrapped_text = textwrap.fill(text, wrap_length)
21
+ print(wrapped_text)
22
+
23
+ # Import saved file and view
24
+ embeddings_df_save_path = "./SRD_embeddings.csv"
25
+ print("Loading embeddings.csv")
26
+ text_chunks_and_embedding_df_load = pd.read_csv(embeddings_df_save_path)
27
+ print("Embedding file loaded")
28
+ embedding_model_path = "BAAI/bge-m3"
29
+ print("Loading embedding model")
30
+ embedding_model = SentenceTransformer(model_name_or_path=embedding_model_path,
31
+ device='cpu') # choose the device to load the model to
32
+
33
+ # Convert the stringified embeddings back to numpy arrays
34
+ text_chunks_and_embedding_df_load['embedding'] = text_chunks_and_embedding_df_load['embedding_str'].apply(lambda x: np.array(json.loads(x)))
35
+
36
+ # Convert texts and embedding df to list of dicts
37
+ pages_and_chunks = text_chunks_and_embedding_df_load.to_dict(orient="records")
38
+
39
+ # Convert embeddings to torch tensor and send to device (note: NumPy arrays are float64, torch tensors are float32 by default)
40
+ embeddings = torch.tensor(np.array(text_chunks_and_embedding_df_load["embedding"].tolist()), dtype=torch.float32).to('cpu')
41
+
42
+ def retrieve_relevant_resources(query: str,
43
+ embeddings: torch.tensor,
44
+ model: SentenceTransformer=embedding_model,
45
+ n_resources_to_return: int=4,
46
+ print_time: bool=True):
47
+ """
48
+ Embeds a query with model and returns top k scores and indices from embeddings.
49
+ """
50
+
51
+ # Embed the query
52
+ query_embedding = model.encode(query,
53
+ convert_to_tensor=True)
54
+
55
+ # Get dot product scores on embeddings
56
+ start_time = timer()
57
+ dot_scores = util.dot_score(query_embedding, embeddings)[0]
58
+ end_time = timer()
59
+
60
+ if print_time:
61
+ print(f"[INFO] Time taken to get scores on {len(embeddings)} embeddings: {end_time-start_time:.5f} seconds.")
62
+
63
+ scores, indices = torch.topk(input=dot_scores,
64
+ k=n_resources_to_return)
65
+
66
+ return scores, indices
67
+
68
+ def print_top_results_and_scores(query: str,
69
+ embeddings: torch.tensor,
70
+ pages_and_chunks: list[dict]=pages_and_chunks,
71
+ n_resources_to_return: int=5):
72
+ """
73
+ Takes a query, retrieves most relevant resources and prints them out in descending order.
74
+
75
+ Note: Requires pages_and_chunks to be formatted in a specific way (see above for reference).
76
+ """
77
+
78
+ scores, indices = retrieve_relevant_resources(query=query,
79
+ embeddings=embeddings,
80
+ n_resources_to_return=n_resources_to_return)
81
+
82
+ print(f"Query: {query}\n")
83
+ print("Results:")
84
+ # Loop through zipped together scores and indicies
85
+ for score, index in zip(scores, indices):
86
+ print(f"Score: {score:.4f}")
87
+ # Print relevant sentence chunk (since the scores are in descending order, the most relevant chunk will be first)
88
+ print_wrapped(pages_and_chunks[index]["sentence_chunk"])
89
+ # Print the page number too so we can reference the textbook further and check the results
90
+ print(f"File of Origin: {pages_and_chunks[index]['file_path']}")
91
+ print("\n")
92
+
93
+ def prompt_formatter(query: str,
94
+ context_items: list[dict]) -> str:
95
+ """
96
+ Augments query with text-based context from context_items.
97
+ """
98
+ # Join context items into one dotted paragraph
99
+ context = "- " + "\n- ".join([item["sentence_chunk"] for item in context_items])
100
+
101
+ # Create a base prompt with examples to help the model
102
+ # Note: this is very customizable, I've chosen to use 3 examples of the answer style we'd like.
103
+ # We could also write this in a txt file and import it in if we wanted.
104
+ base_prompt = """Now use the following context items to answer the user query: {context}
105
+ User query: {query}
106
+ Answer:"""
107
+
108
+ # Update base prompt with context items and query
109
+
110
+
111
+
112
+ return base_prompt.format(context=context, query=query)
113
+
114
+ system_prompt = """You are a game design expert specializing in Dungeons & Dragons 5e, answering beginner questions with descriptive, clear responses. Provide a story example. Avoid extraneous details and focus on direct answers. Use the examples provided as a guide for style and brevity. When responding:
115
+
116
+ 1. Identify the key point of the query.
117
+ 2. Provide a straightforward answer, omitting the thought process.
118
+ 3. Avoid additional advice or extended explanations.
119
+ 4. Answer in an informative manner, aiding the user's understanding without overwhelming them.
120
+ 5. DO NOT SUMMARIZE YOURSELF. DO NOT REPEAT YOURSELF.
121
+ 6. End with a line break and "What else can I help with?"
122
+
123
+ Refer to these examples for your response style:
124
+
125
+ Example 1:
126
+ Query: How do I determine what my magic ring does in D&D?
127
+ Answer: To learn what your magic ring does, use the Identify spell, take a short rest to study it, or consult a knowledgeable character. Once known, follow the item's instructions to activate and use its powers.
128
+
129
+ Example 2:
130
+ Query: What's the effect of the spell fireball?
131
+ Answer: Fireball is a 3rd-level spell creating a 20-foot-radius sphere of fire, dealing 8d6 fire damage (half on a successful Dexterity save) to creatures within. It ignites flammable objects not worn or carried.
132
+
133
+ Example 3:
134
+ Query: How do spell slots work for a wizard?
135
+ Answer: Spell slots represent your capacity to cast spells. You use a slot of equal or higher level to cast a spell, and you regain all slots after a long rest. You don't lose prepared spells after casting; they can be reused as long as you have available slots.
136
+
137
+ Use the context provided to answer the user's query concisely. """
138
+
139
+
140
+
141
+ with gr.Blocks() as RulesLawyer:
142
+ chatbot = gr.Chatbot()
143
+ msg = gr.Textbox()
144
+ clear = gr.ClearButton([msg, chatbot])
145
+
146
+ def respond(message, chat_history):
147
+
148
+ # Get relevant resources
149
+ scores, indices = retrieve_relevant_resources(query=message,
150
+ embeddings=embeddings)
151
+
152
+ # Create a list of context items
153
+ context_items = [pages_and_chunks[i] for i in indices]
154
+
155
+ # Format prompt with context items
156
+ prompt = prompt_formatter(query=message,
157
+ context_items=context_items)
158
+ print(prompt)
159
+ bot_message = client.chat.completions.create(
160
+ model="gpt-4",
161
+ messages=[
162
+ {
163
+ "role": "user",
164
+ "content": f"{system_prompt} {prompt}"
165
+ }
166
+ ],
167
+ temperature=1,
168
+ max_tokens=512,
169
+ top_p=1,
170
+ frequency_penalty=0,
171
+ presence_penalty=0
172
+ )
173
+ chat_history.append((message, bot_message.choices[0].message.content))
174
+ time.sleep(2)
175
+ return "", chat_history
176
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
177
+
178
+ if __name__ == "__main__":
179
+ RulesLawyer.launch(server_name = "0.0.0.0", server_port = 8000, share = False, allowed_paths = [])
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ torch
2
+ packaging
3
+ wheel
4
+ pandas
5
+ openai
6
+ sentence_transformers
7
+ gradio