AzeezIsh commited on
Commit
37d3a3b
1 Parent(s): 90be5c6

Upload 71 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. TalentLLM-main/.gitignore +2 -0
  2. TalentLLM-main/.vscode/launch.json +16 -0
  3. TalentLLM-main/Candidate.py +103 -0
  4. TalentLLM-main/__pycache__/Candidate.cpython-311.pyc +0 -0
  5. TalentLLM-main/__pycache__/claude_compator.cpython-311.pyc +0 -0
  6. TalentLLM-main/__pycache__/compator.cpython-311.pyc +0 -0
  7. TalentLLM-main/__pycache__/gh.cpython-311.pyc +0 -0
  8. TalentLLM-main/__pycache__/mathpix.cpython-311.pyc +0 -0
  9. TalentLLM-main/__pycache__/results.cpython-311.pyc +0 -0
  10. TalentLLM-main/__pycache__/resume_conversation.cpython-311.pyc +0 -0
  11. TalentLLM-main/__pycache__/resume_conversation_interactive.cpython-311.pyc +0 -0
  12. TalentLLM-main/__pycache__/set_envs.cpython-311.pyc +0 -0
  13. TalentLLM-main/comparisons.json +1 -0
  14. TalentLLM-main/compator.py +234 -0
  15. TalentLLM-main/daniyal.txt +56 -0
  16. TalentLLM-main/gh.py +139 -0
  17. TalentLLM-main/gh_cache/AzeezIsh.md +1 -0
  18. TalentLLM-main/gh_cache/PramaYudhistira.md +1 -0
  19. TalentLLM-main/gh_cache/Saad-Mufti.md +0 -0
  20. TalentLLM-main/gh_cache/Serhan-Asad.md +1 -0
  21. TalentLLM-main/gh_cache/Zaeemahmad34.md +1 -0
  22. TalentLLM-main/gh_cache/danikhan632.md +1 -0
  23. TalentLLM-main/gh_cache/jryzhik.md +0 -0
  24. TalentLLM-main/gh_cache/nmhossain02.md +1 -0
  25. TalentLLM-main/gh_cache/taliu02.md +1 -0
  26. TalentLLM-main/gh_cache/tuffstuff9.md +1 -0
  27. TalentLLM-main/main.py +50 -0
  28. TalentLLM-main/mathpix.py +62 -0
  29. TalentLLM-main/notebooks/cheap_compartor.py +9 -0
  30. TalentLLM-main/notebooks/comparisons copy 2.json +130 -0
  31. TalentLLM-main/notebooks/comparisons copy 3.json +1 -0
  32. TalentLLM-main/notebooks/compator.py +132 -0
  33. TalentLLM-main/notebooks/compator_parallel.py +110 -0
  34. TalentLLM-main/notebooks/extractor.py +45 -0
  35. TalentLLM-main/notebooks/hardset.ipynb +21 -0
  36. TalentLLM-main/notebooks/rag.ipynb +449 -0
  37. TalentLLM-main/output.txt +174 -0
  38. TalentLLM-main/requirements.txt +7 -0
  39. TalentLLM-main/results.py +56 -0
  40. TalentLLM-main/resume_conversation.py +28 -0
  41. TalentLLM-main/resume_conversation_interactive.py +49 -0
  42. TalentLLM-main/resume_mmds/[email protected] +95 -0
  43. TalentLLM-main/resume_mmds/[email protected] +83 -0
  44. TalentLLM-main/resume_mmds/[email protected] +107 -0
  45. TalentLLM-main/resume_mmds/[email protected] +133 -0
  46. TalentLLM-main/resume_mmds/[email protected] +85 -0
  47. TalentLLM-main/resume_mmds/[email protected] +101 -0
  48. TalentLLM-main/resume_mmds/[email protected] +109 -0
  49. TalentLLM-main/resume_mmds/[email protected] +95 -0
  50. TalentLLM-main/resume_mmds/[email protected] +110 -0
TalentLLM-main/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ service_creds.json
2
+ .env
TalentLLM-main/.vscode/launch.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "0.2.0",
3
+ "configurations": [
4
+ {
5
+ "name": "Python: Current File (pdb)",
6
+ "type": "python",
7
+ "request": "launch",
8
+ "program": "${file}",
9
+ "console": "integratedTerminal",
10
+ "python": "python3.11",
11
+ "cwd": "${workspaceFolder}",
12
+ "env": {},
13
+ "internalConsoleOptions": "neverOpen"
14
+ }
15
+ ]
16
+ }
TalentLLM-main/Candidate.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ import gdown
3
+ import os
4
+ from datetime import datetime # Importing the datetime class directly
5
+ from gh import getBasicReport
6
+ from mathpix import extract_text
7
+ from pathlib import Path
8
+ class JobCandidate:
9
+ def __init__(self, data: list):
10
+ self.timestamp = datetime.strptime(data[0], "%m/%d/%Y %H:%M:%S")
11
+ self.name = data[1]
12
+ self.email = data[2]
13
+ self.resume_link = data[3]
14
+ self.resume_text= self.parse_resume()
15
+ self.cover_letter = data[4]
16
+ self.linkedin = data[5]
17
+ self.github_link = data[6]
18
+ self.github_text= self.parse_gh()
19
+ self.personal_website_link = data[7]
20
+ self.visa_sponsorship = data[8]
21
+ self.disability_status = data[9]
22
+ self.ethnic_background = data[10]
23
+ self.gender = data[11]
24
+ self.military_service = data[12]
25
+
26
+ def __str__(self):
27
+ return (f"Job Candidate: {self.name}\n"
28
+ f"Applied on: {self.timestamp}\n"
29
+ f"Email: {self.email}\n"
30
+ f"Resume {self.resume_text}\n"
31
+ f"Personal Website: {self.personal_website_link}\n"
32
+ f"Visa Sponsorship: {self.visa_sponsorship}\n"
33
+ f"Disability Status: {self.disability_status}\n"
34
+ f"Ethnic Background: {self.ethnic_background}\n"
35
+ f"Gender: {self.gender}\n"
36
+ f"Military Service: {self.military_service}")
37
+
38
+ def parse_resume(self):
39
+ id = self.resume_link.split('=')[-1]
40
+ pdf_dir = os.path.join(os.getcwd(), "resume_pdfs")
41
+ mmd_dir = os.path.join(os.getcwd(), "resume_mmds")
42
+
43
+ # Ensure the directories exist
44
+ if not os.path.exists(pdf_dir):
45
+ os.makedirs(pdf_dir)
46
+ if not os.path.exists(mmd_dir):
47
+ os.makedirs(mmd_dir)
48
+
49
+ pdf_path = os.path.join(pdf_dir, f"{self.email}.pdf")
50
+ mmd_path = os.path.join(mmd_dir, f"{self.email}.pdf.mmd")
51
+
52
+ try:
53
+ # Check if the parsed text already exists
54
+ if os.path.exists(mmd_path):
55
+ with open(mmd_path, "r") as f:
56
+ return f.read()
57
+ else:
58
+ # Download the PDF
59
+ gdown.download(id=id, quiet=True, use_cookies=False, output=pdf_path)
60
+
61
+ # Check if the download was successful
62
+ if os.path.exists(pdf_path):
63
+ t = extract_text(pdf_path)
64
+ preproccessed = t.replace(self.name, "applicant")
65
+ preprocessed = preproccessed.replace(self.name.split(" ")[0], "applicant")
66
+ return preprocessed
67
+ else:
68
+ return "Failed to download the PDF."
69
+ except Exception as e:
70
+ return str(e)
71
+
72
+
73
+
74
+ def parse_gh(self):
75
+ username = self.github_link.replace("https://github.com/", "").replace("github.com", "").replace("/", "")
76
+
77
+ summary=""
78
+ if username:
79
+ file_path = Path(os.getcwd()) / "gh_cache" / f"{username}.md"
80
+ if not file_path.exists():
81
+ summary = str(getBasicReport(username))
82
+ # Write the summary to the file
83
+ file_path.write_text(summary)
84
+ else:
85
+ summary = open(file_path,"r").read()
86
+ return summary
87
+ else:
88
+ return ""
89
+ def parse_portfolio(self):
90
+ pass
91
+
92
+
93
+
94
+
95
+
96
+
97
+ def __lt__(self, other):
98
+ if not isinstance(other, JobCandidate):
99
+ return NotImplemented
100
+ return self.timestamp < other.timestamp
101
+
102
+ def __eq__(self, other):
103
+ return False
TalentLLM-main/__pycache__/Candidate.cpython-311.pyc ADDED
Binary file (6.4 kB). View file
 
TalentLLM-main/__pycache__/claude_compator.cpython-311.pyc ADDED
Binary file (7.14 kB). View file
 
TalentLLM-main/__pycache__/compator.cpython-311.pyc ADDED
Binary file (10.5 kB). View file
 
TalentLLM-main/__pycache__/gh.cpython-311.pyc ADDED
Binary file (7.61 kB). View file
 
TalentLLM-main/__pycache__/mathpix.cpython-311.pyc ADDED
Binary file (3.35 kB). View file
 
TalentLLM-main/__pycache__/results.cpython-311.pyc ADDED
Binary file (2.21 kB). View file
 
TalentLLM-main/__pycache__/resume_conversation.cpython-311.pyc ADDED
Binary file (2.2 kB). View file
 
TalentLLM-main/__pycache__/resume_conversation_interactive.cpython-311.pyc ADDED
Binary file (4.7 kB). View file
 
TalentLLM-main/__pycache__/set_envs.cpython-311.pyc ADDED
Binary file (1.01 kB). View file
 
TalentLLM-main/compator.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai;
2
+ import json, os,sys
3
+ from dotenv import load_dotenv
4
+ load_dotenv()
5
+ # openai.api_key = os.environ.get("OPENAI_API_KEY")
6
+ import openai; openai.api_key = "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"
7
+ from Candidate import JobCandidate
8
+
9
+
10
+ import litellm
11
+ from litellm import completion
12
+
13
+
14
+ import xml.etree.ElementTree as ET
15
+
16
+
17
+
18
+
19
+
20
+
21
+
22
+ def printc(obj, color="cyan"):
23
+ color_code = {
24
+ "black": "30", "red": "31", "green": "32", "yellow": "33",
25
+ "blue": "34", "magenta": "35", "cyan": "36", "white": "37"
26
+ }
27
+ colored_text = f"\033[{color_code[color]}m{obj}\033[0m" if color in color_code else obj
28
+ print(colored_text)
29
+
30
+
31
+
32
+ LLM=os.environ.get("COMPARATOR_LLM","chat-bison")
33
+ # LLM=os.environ.get("COMPARATOR_LLM","gpt-3.5-turbo-1106")
34
+ def getContent(candidateA, candidateB) -> str:
35
+ return (
36
+ "Given the following two candidates, choose between the two. Here is the rubric: "
37
+ + get_rubric()
38
+ + "Candidate A: "
39
+ + "\nRESUME:\n" +candidateA.resume_text+"\nEND Resume\n"
40
+ + "\nGITHUB:\n" +candidateA.github_text+"\nEND GITHUB\n"
41
+ + " END OF Candidate A"
42
+ + "\n\nCandidate B: "
43
+ + "\nRESUME:\n" +candidateB.resume_text+"\nEND Resume\n"
44
+ + "\nGITHUB:\n" +candidateB.github_text+"\nEND GITHUB\n"
45
+ + " END OF Candidate B"
46
+
47
+ )
48
+
49
+
50
+
51
+ def google_compare_resumes(content:str, nameA="", nameB=""):
52
+ choice =0
53
+ messages=[
54
+ {"role": "user", "content": "You are an LLM recrutier who will choose between two candidates based on an provided rubric"},
55
+ {"role": "user", "content":
56
+ """
57
+ You are an LLM recrutier who will choose between two candidates based on an provided rubric,
58
+ you will only use bullet point and broken english instead of proper english to be more concise
59
+ """
60
+ },
61
+ {"role": "assistant", "content":
62
+ """
63
+ I can assist you in evaluating two candidates based on a provided rubric.
64
+ Provide me with the rubric or the criteria you'd like to use for the evaluation,
65
+ and I'll help you assess the candidates accordingly and explain myself in less that 50 words
66
+ """
67
+ },
68
+ {"role": "user", "content": content}
69
+ ]
70
+
71
+ response =completion(model=LLM, messages=messages,max_tokens=170,)
72
+ printc(response["choices"][0]["message"],'red')
73
+
74
+ messages=[
75
+ {"role": "assistant","content":str(response["choices"][0]["message"])},
76
+ {"role": "user","content":"okay so now with just a single token select A or B, <select>choice letter goes here</select>"}
77
+ ]
78
+ retries=3
79
+ while retries >0:
80
+ response =completion(model=LLM, messages=messages,max_tokens=5,temperature=0.01)
81
+ # printc(response,'cyan')
82
+ html=''.join(str(response["choices"][0]["message"]['content']).split())
83
+ if "<select>" in html:
84
+ xml_content = f'<root>{html}</root>'
85
+ root = ET.fromstring(xml_content)
86
+ select_element = root.find('select')
87
+ letter = str(select_element.text)
88
+ else:
89
+ letter = str(html)[0]
90
+
91
+
92
+ if letter == 'A':
93
+ printc(nameA+" wins over "+nameB,"cyan")
94
+ return -1
95
+ elif letter == 'B':
96
+ printc(nameB+" wins over "+nameA,"green")
97
+ return 1
98
+
99
+
100
+ retries-=1
101
+
102
+
103
+
104
+ return choice
105
+
106
+
107
+ def compare_resumes(content:str, nameA="", nameB=""):
108
+ retries = 3
109
+ choice = 0
110
+
111
+ while retries > 0:
112
+ try:
113
+ response = openai.ChatCompletion.create(
114
+ model='gpt-4-0613',
115
+ messages=[
116
+ {"role": "user", "content":
117
+ """
118
+ You are an LLM recrutier who will choose between two candidates based on an provided rubric,
119
+ you will only use bullet point and broken english instead of proper english to be more concise in your justification
120
+ You will also provide args for selectCandidate
121
+ """
122
+ },
123
+ {"role": "assistant", "content":
124
+ """
125
+ I can assist you in evaluating two candidates based on a provided rubric.
126
+ Provide me with the rubric or the criteria you'd like to use for the evaluation,
127
+ and I'll help you assess the candidates accordingly and explain myself conscisely and will
128
+ provide args for selectCandidate
129
+ """
130
+ },
131
+ {"role": "user", "content": content}
132
+
133
+ ],
134
+ functions=[
135
+ {
136
+ "name": "selectCanidate",
137
+ "description": "choose between the two canidates",
138
+ "parameters": {
139
+ "type": "object",
140
+ "properties": {
141
+ "choice_num": {
142
+ "type": "integer",
143
+ "description": "1 for Candidate A is the best fit, 2 for Candidate B is the best fit",
144
+ "required": ["choice_num"],
145
+ },
146
+ "justifcation": {
147
+ "type": "string",
148
+ "description": "justifcation for why you chose the candidate",
149
+ "required": ["justifcation"],
150
+ },
151
+ }
152
+ },
153
+ }
154
+ ],
155
+ function_call="auto",
156
+ )
157
+
158
+ message = response["choices"][0]["message"]
159
+
160
+ if message.get("function_call"):
161
+ function_name = message["function_call"]["name"]
162
+ function_args = json.loads(message["function_call"]["arguments"])
163
+ choice = (int(function_args["choice_num"]))
164
+
165
+ if function_name == "selectCanidate":
166
+ if choice == 1:
167
+ choice = -1
168
+ printc(nameA+" wins over "+nameB, "cyan")
169
+ elif choice == 2:
170
+ choice = 1
171
+ printc(nameB+" wins over "+nameA, "green")
172
+
173
+ printc(function_args["justifcation"], "yellow")
174
+
175
+ break # Break the loop if everything went well
176
+
177
+ except Exception as e:
178
+ printc("Error: " + str(e), "red")
179
+ retries -= 1
180
+ if retries == 0:
181
+ printc("Maximum retries reached.", "red")
182
+ return 0 # Or any other default value or error indicator
183
+
184
+ return choice
185
+
186
+
187
+
188
+
189
+ def get_rubric():
190
+ text = open("rubric.txt","r").read()
191
+ return "\nRubric:\n" +str(text)+"\nEND Rubric\n"
192
+
193
+
194
+
195
+
196
+
197
+ def comp(candidateA:JobCandidate, candidateB:JobCandidate, rub_id:int=0 ) -> int:
198
+ comp_table= json.load(open("comparisons.json","r"))
199
+ tag= (candidateA.email+"#"+candidateB.email+"#"+str(rub_id))
200
+ inv_tag= (candidateB.email+"#"+candidateA.email+"#"+str(rub_id))
201
+ if tag in comp_table:
202
+ if comp_table[tag]==1:
203
+ printc(candidateA.name+" wins over "+candidateB.name,"magenta")
204
+ elif comp_table[tag]==-1:
205
+ printc(candidateB.name+" wins over "+candidateA.name,"magenta")
206
+
207
+ return comp_table[tag]
208
+ elif inv_tag in comp_table:
209
+ if comp_table[inv_tag]==1:
210
+ printc(candidateA.name+" wins over "+candidateB.name,"magenta")
211
+ elif comp_table[inv_tag]==-1:
212
+ printc(candidateB.name+" wins over "+candidateA.name,"magenta")
213
+ else:
214
+ choice = compare_resumes(getContent(candidateA, candidateB), candidateA.name, candidateB.name)
215
+ comp_table[tag]=choice
216
+ comp_table[inv_tag]=choice*-1
217
+
218
+ json.dump(comp_table, open("comparisons.json","w"))
219
+ return choice
220
+
221
+
222
+ def compute_scores(candidates):
223
+ scores = {candidate.email: 0 for candidate in candidates}
224
+ for i, candidateA in enumerate(candidates):
225
+ for candidateB in candidates[i+1:]:
226
+ result = comp(candidateA, candidateB)
227
+ scores[candidateA.email] += result
228
+ scores[candidateB.email] -= result
229
+ print(scores)
230
+ return scores
231
+
232
+ def bubble_sort(candidates: list) -> list:
233
+ scores = compute_scores(candidates)
234
+ return sorted(candidates, key=lambda x: scores[x.email])
TalentLLM-main/daniyal.txt ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Daniyal - Software Engineer
2
+ {
3
+ DH
4
+ }
5
+ Hey there, I'm
6
+ Daniyal Hussain
7
+ Digital Craftsman
8
+ - a software engineer, data scientist, full-stack dev, and more.
9
+ 📚
10
+ Graduated with a B.A. in
11
+ Computer Science
12
+ &
13
+ Data Science
14
+ @
15
+ NYU
16
+ .
17
+ 🚀
18
+ Looking for fulltime roles!
19
+ Github
20
+ LinkedIn
21
+ Email
22
+ ⚡ About Me
23
+ I'm a computer scientist based in NYC with an avid interest in interdisciplinary learning. My mindset is perfectly embodied by this quote from Naval Ravikant:
24
+ "Learn to sell, learn to build - if you can do both, you will be unstoppable."
25
+ My specialties lie in developing desktop applications, fullstack websites, machine learning, and making it all visually stunning.
26
+ I am currently working on Resyzer - a SaaS windows desktop application which takes window management to the next level.
27
+ Projects
28
+ See all projects →
29
+ Here are a couple of the things I've worked on.
30
+ See all projects →
31
+ NYU Course Tracker
32
+ JavaScript
33
+ Puppeteer
34
+ Utilises puppeteer to scrape the NYU public course search via a headless browser for the desired class and check its status. Manipulates user agent randomly to avoid detection. Sends an alert via telegram when the class status changes.
35
+ KeyboardHookLite
36
+ C#
37
+ win32
38
+ A lightweight low-level global windows keyboard hook compatible with modern UI frameworks (WPF, MAUI, WinUI 3, etc.)
39
+ React Blackjack
40
+ React
41
+ mongoDB
42
+ Express
43
+ Node.js
44
+ Passport
45
+ A blackjack app built with react. Supports user authentication. Your balance and game history are preserved. Implemented in react, node.js, passport, mongoDB.
46
+ Get In Touch!
47
+ LinkedIn
48
+ Email
49
+ Resume
50
+ Built with
51
+ Next.js
52
+ &
53
+ Chakra UI
54
+ . Hosted on
55
+ Vercel
56
+ .
TalentLLM-main/gh.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from github import Github,Auth
2
+ from github.Repository import Repository
3
+ from typing import List
4
+ import requests
5
+ import os, json, datetime, re,sys
6
+ from bs4 import BeautifulSoup
7
+ import openai
8
+ from dotenv import load_dotenv
9
+ load_dotenv()
10
+ import litellm
11
+ from litellm import completion
12
+ os.environ['AWS_ACCESS_KEY_ID']="AKIAWE73I2DYVFLFNLHS"
13
+ os.environ['AWS_REGION_NAME']="us-east-1"
14
+ os.environ['AWS_REGION']="us-east-1"
15
+ os.environ['AWS_SECRET_ACCESS_KEY']="r9Eqo6kg3rg0zHwK41N7IhfdiuWt4lCr68EYO6fv"
16
+ os.environ['PALM_API_KEY']='AIzaSyBr-t20IcF2T1xItAnlyYuQ50Ctu6Y0y4I'
17
+ os.environ["VERTEXAI_PROJECT"] = "data-axe-386317"
18
+ os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'service_creds.json'
19
+
20
+ def printc(obj, color="cyan"):
21
+ color_code = {
22
+ "black": "30", "red": "31", "green": "32", "yellow": "33",
23
+ "blue": "34", "magenta": "35", "cyan": "36", "white": "37"
24
+ }
25
+ colored_text = f"\033[{color_code[color]}m{obj}\033[0m" if color in color_code else obj
26
+ print(colored_text)
27
+
28
+
29
+ auth = Auth.Token(os.environ.get('GH_KEY', 'default'))
30
+ g = Github(auth=auth)
31
+
32
+
33
+ def remove_html_and_urls(markdown_text):
34
+ no_html = re.sub(r'<[^>]+>', '', markdown_text)
35
+ pattern_urls = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\'(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
36
+ no_html_no_urls = re.sub(pattern_urls, '', no_html)
37
+
38
+ return no_html_no_urls
39
+
40
+
41
+
42
+
43
+ def getGithubPinned(username: str)-> List[str]:
44
+ repos = []
45
+ today = datetime.datetime.now()
46
+ day_1 = today.replace(day=1)
47
+ start_date, end_date = day_1.strftime("%Y-%m-%d"), today.strftime("%Y-%m-%d")
48
+
49
+ url = f"https://github.com/{username}?tab=overview&from={start_date}&to={end_date}"
50
+
51
+ response = requests.get(url)
52
+
53
+ if response.status_code == 200:
54
+ soup = BeautifulSoup(response.text, 'html.parser')
55
+ pinned_items = soup.find_all('div', class_='pinned-item-list-item-content')
56
+
57
+ repos = []
58
+ for item in pinned_items:
59
+ repo_name = item.find('span', class_='repo').text.strip()
60
+ repos.append(repo_name)
61
+ else:
62
+ print(f"Failed to get pinned repos for {username}")
63
+
64
+ return repos
65
+
66
+
67
+
68
+
69
+ def get_repositories(username: str)->List[Repository]:
70
+ user = g.get_user(username)
71
+ all_repos = [repo for repo in user.get_repos()]
72
+ repo_dict = {repo.name: repo for repo in all_repos}
73
+ pinned_repo_names = getGithubPinned(username)
74
+ pinned_repos = []
75
+ for name in pinned_repo_names:
76
+ if name in repo_dict:
77
+ pinned_repos.append(repo_dict.pop(name))
78
+ sorted_repos = sorted(repo_dict.values(), key=lambda x: x.stargazers_count, reverse=True)
79
+ final_repo_list = pinned_repos + sorted_repos
80
+
81
+ return final_repo_list
82
+
83
+
84
+
85
+
86
+ def getBasicReport(username: str):
87
+ try:
88
+ user_repos = get_repositories(username)[:8]
89
+ summaries=[]
90
+
91
+
92
+ for repo in user_repos:
93
+
94
+ try:
95
+ content = ""
96
+ content+="\nNAME: "+str(repo.full_name)+"\nSTARS: "+str(repo.stargazers_count)+"\nReadme: \n"
97
+ files = repo.get_contents("")
98
+ md_files = [file for file in files if file.name.endswith('.md')]
99
+
100
+
101
+ md_file_content = repo.get_contents(md_files[0].path).decoded_content.decode()
102
+
103
+ content+= str(remove_html_and_urls(str(md_file_content)))
104
+
105
+
106
+
107
+ messages=[
108
+ {"role": "user", "content": "I want you to summarize this repository and summarize the skills gained with this repository "},
109
+ {"role": "assistant", "content":
110
+ """
111
+ Sure, I can help with that! Please provide me with the details for the repo and I'll be able to summarize it and outline the skills that can be gained from it.
112
+ Additonally I will grade the techinal complexity with it. I will also greatly take into consideration the Number of stars. Furthermore I Will use broken english to ensure
113
+ my statements are as short and concise as possible
114
+ """
115
+ },
116
+ {"role": "user", "content": content}
117
+ ]
118
+
119
+ response =completion(model="anthropic.claude-instant-v1", messages=messages,max_tokens=150,temperature=1.0)
120
+ summaries.append(response["choices"][0]["message"]['content'])
121
+
122
+ except:
123
+ continue
124
+
125
+
126
+ # message = completion(model="anthropic.claude-instant-v1", messages=messages)
127
+ printc(summaries,'cyan')
128
+
129
+
130
+
131
+
132
+ return summaries
133
+ except:
134
+ return ""
135
+
136
+
137
+
138
+
139
+
TalentLLM-main/gh_cache/AzeezIsh.md ADDED
@@ -0,0 +1 @@
 
 
1
+ [' Here is a summary of the AzeezIsh/housing_app repository:\n\n- This is a housing application project that utilizes Figma for frontend design and React for development. \n\n- It is primarily a frontend focused project with minimal backend functionality, as data sources are coming from cleaned datasets rather than a custom backend API.\n\nSkills that can be gained from working on this repository include:\n\n- UI/UX design using Figma for prototyping interfaces \n\n- Frontend development with React, including components, states, props, etc. \n\n- Integrating data from external sources into a React application\n\n- Basic frontend architecture and application structure without backend dependencies\n\nThe technical complexity of this project would be']
TalentLLM-main/gh_cache/PramaYudhistira.md ADDED
@@ -0,0 +1 @@
 
 
1
+ [" Here is the summary:\n\nName: PramaYudhistira/sh-web\nStars: 1\n\nThis project aims to build a web app called SideHustle that helps college students earn money by posting their skills and services that other students on campus may need. It focuses on services students can provide locally within a campus since many don't have cars. \n\nThe unique value is that it is similar to Craigslist but tailored specifically for college students. \n\nThe tech stack includes ASP.NET for the backend with Entity Framework and plans to use Angular for the frontend UI initially. There is also a note about potentially building a mobile version with React.\n\nSkills gained would include experience with:\n-", ' Here is a summary of the PramaYudhistira/urc-software repository:\n\nThis repository contains the software for the RoboJackets URC (Urban Reconnaissance and Capabilities) robot. It is organized into various ROS packages that enable capabilities like navigation, perception, manipulation, etc. \n\nSome of the key skills that can be gained from working with this repository include:\n\n- ROS system design and organization into packages\n- Robot perception using sensors like cameras \n- SLAM and navigation for autonomous mobility\n- Robot arm kinematics and manipulation\n- ROS message definitions and inter-package communication\n- Integration of robot hardware drivers\n- ROS simulation using Gazebo\n- Robot teleoperation through web', ' Here is a summary of the PramaYudhistira/software-training repository:\n\nRepo contain resource for software training program. Include syllabus and setup instruction. Syllabus outline what be cover in each session of training. Setup folder provide direction for student to prepare their computer for class. \n\nSkill gain from this repo include:\n\n- Learning software development fundamentals by following syllabus. Give overview of topics.\n\n- Setting up programming environment by following setup instructions. Gain skill of installing tools need for development. \n\nTechnical complexity is low. Mainly text documentation for syllabus and setup. \n\nSmall repo just starting. Not many star since new. But provide base for organizing software training course. Student and', ' Here is a summary of the PramaYudhistira/CS2340 repository:\n\nRepository contains materials for CS2340 course. No description provided in README. \n\nSkills gained:\n\n- Programming fundamentals as it is an introductory CS course. Likely covers basics of programming with a language like Python or Java.\n\n- Algorithm design and analysis. Course likely teaches techniques for solving problems systematically and analyzing time/space complexity.\n\n- Data structures. Repository may include implementations of common data structures like lists, stacks, queues, trees, hashes. \n\nTechnical complexity: Beginner. As an intro CS course, focuses on basic programming concepts and data structures. \n\nWith no other details provided', ' Here is a summary of the PramaYudhistira/VIPTraining repository:\n\nRepository contains training material for V.I.P training. Repository has 0 stars currently. \n\nSkills that can be gained:\n\n- Learn skills around virtual instructor-led training. Ability to create and deliver online training content.\n\n- Training design skills. Ability to structure training modules, create lesson plans and map learning outcomes. \n\n- Online facilitation skills. Skills around engaging learners, handling questions and ensuring knowledge transfer in a virtual environment. \n\n- Training delivery skills. Skills around using tools like video conferencing platforms, presenting content and handling technical glitches during delivery.\n\nTechnical complexity is likely moderate']
TalentLLM-main/gh_cache/Saad-Mufti.md ADDED
File without changes
TalentLLM-main/gh_cache/Serhan-Asad.md ADDED
@@ -0,0 +1 @@
 
 
1
+ [' Here is a summary of the Serhan-Asad/Beginner- repository:\n\n- The repository appears to be tracking a 30 month internship as the readme simply states "30 month internship". \n\n- With no other context provided in the readme or any files in the repository, it\'s difficult to determine specifics about the type of internship, skills being learned, projects worked on etc.\n\n- However, some skills that could potentially be gained from a 30 month internship include:\n - Hands-on work experience in the field/industry of the internship\n - Learning technical or soft skills applicable to the role/company\n - Project management experience by potentially working on smaller scoped projects\n - Time management and accountability by', " Here is a summary of the Serhan-Asad/jpmc-task-1 repository:\n\nThis repository appears to be a starter template created for Task 1 of JP Morgan Chase's software engineering program. It does not contain any specific code or files beyond a basic README. \n\nSome of the key skills that a participant could gain include:\n\n- Version control skills by utilizing GitHub for submission of tasks\n- Configuration management experience setting up a basic repo structure \n- Learning best practices for collaborative coding through use of shared codebase\n\nSince it is simply a template repository without any real implementation, the technical complexity would be considered very low. Participants would likely use it as a starting point to build out and demonstrate their work for", " Here is a summary of the Serhan-Asad/jpmc-task-2 repository:\n\nThis repository appears to be a starter template provided for the second task in JP Morgan Chase's Forage program. The Forage program seems to provide tasks or projects for participants to complete. \n\nThis specific repository does not contain any projects, code, or instructions - it is simply an empty repository intended for participants to add their solution for the second task.\n\nSkills that could be gained by completing the task and adding work to this repository include:\n\n- Programming/coding skills depending on the task requirements \n- Version control skills like committing, pushing code to GitHub\n- Project planning and management \n- Documentation skills if", " Here is a summary of the Serhan-Asad/jpmc-task-3 repository:\n\nThis repository appears to be a starter template for Task 3 of JP Morgan Chase's Forage program. The Forage program seems to be focused on providing learning opportunities in coding/technical skills. \n\nSkills that could be gained by completing the tasks or exercises within this repo include:\n\n- Programming fundamentals (use of variables, functions, conditionals etc depending on the language/framework required)\n\n- Version control with Git/Github for submitting work\n\n- Potentially increased proficiency with a specific language/framework depending on what is required for the tasks \n\n- Practice working through structured coding exercises/problems", ' Here is a summary of the Serhan-Asad/CS1332 repository:\n\nThis repo contains homework assignments from the CS1332 Data Structures and Algorithms course at Georgia Tech. It includes implementations of and practice with:\n\n- HW1: ArrayList - Gained experience implementing a basic array-backed list data structure. Learned about fundamental list operations like add, remove, get. \n\n- HW2: Circular Singly-Linked List - Practiced building a linked list and handling edge cases like wrapping around. Improved skills with pointers and traversal.\n\n- HW3: Stacks & Queues - Built stack and queue abstractions using arrays or linked lists. Learned FIFO', ' Here is a summary of the Serhan-Asad/2340 repository:\n\nThis repository contains a project code named 2340. Unfortunately there is no further information provided in the readme about what the project is or does. \n\nSkills that could potentially be gained from working with this repo include:\n\n- Software development: Building software projects from scratch provides experience with coding, debugging, and problem solving. \n- Version control: Using Git for a personal project introduces skills with tracking changes and collaboration.\n- Project organization: Organizing code and files into a cohesive project structure is a useful skill.\n\nThe technical complexity is difficult to determine without more project details. With no stars and minimal readme information, it is not', ' Here is the summary of the skills gained from this repository:\n\nName: Serhan-Asad/english \nStars: 0\n\nThis repository provides a starting project setup with Create React App. Some of the main skills and concepts learned include:\n\n- Setting up a React project from scratch with Create React App and common NPM scripts. \n\n- Understanding the basic file structure and entry points of a React app.\n\n- Running the project in development and production modes. \n\n- How to test React components.\n\n- Building the app into static files for deployment. \n\n- Options for ejecting from the default CRA setup for more customization.\n\n- Additional resources for learning core React concepts like']
TalentLLM-main/gh_cache/Zaeemahmad34.md ADDED
@@ -0,0 +1 @@
 
 
1
+ []
TalentLLM-main/gh_cache/danikhan632.md ADDED
@@ -0,0 +1 @@
 
 
1
+ [' Here is a summary of the vk-backend-for-triton repository:\n\n- This repository aims to create a Vulkan backend for Triton, enabling Vulkan-compatible devices like those using Apple Metal via MoltenVK to utilize Triton. \n\n- It is currently working on ensuring it produces valid SPIRV assembly code and developing support for Vulkan compute pipelines and Pytorch memory management.\n\n- The skills gained from this repository include learning about backend development for deep learning frameworks, interfacing with GPUs using graphics APIs like Vulkan, and understanding computational graph optimizations.\n\n- Technical complexity is intermediate as it involves developing backend compiler infrastrucutre for a deep learning framework. Understanding of concepts like SPIR-', ' Here is a summary of the iMessage API repository:\n\nThis repository contains a Flask API that allows a user to interact with iMessages on their iPhone/iPad. The key skills gained from working with this repo include:\n\n- API Development with Flask: User would learn how to create RESTful endpoints to perform CRUD operations like sending/retrieving messages and contacts. This introduces Flask framework concepts.\n\n- Working with iPhone Data: User would have to enable full disk access on their Mac and parse iCloud backup data to retrieve messages and contacts. This exposes them to working with iOS application data. \n\n- Cloud backups: User would learn to download, parse and transform iCloud backup files like contacts.vcf into usable formats like', " Here is a summary of the Auto-GPT-AlpacaTrader-Plugin repository:\n\n- The repository contains a plugin that allows an AI assistant created with Anthropic's Constitutional AI technology to integrate with the Alpaca trading platform to perform trades, analyze markets, and manage portfolios.\n\n- Key skills gained include algorithmic trading, financial market data analysis, portfolio management, and integrating an AI model with a trading API. The plugin exposes the Alpaca API to allow placing orders, accessing account data, and retrieving market data.\n\n- The technical complexity is moderate as it requires configuring the Alpaca API key, installing the plugin, and setting up paper trading initially. Basic Python/coding skills would be needed", ' In summary, this Guidance repository provides a framework for structuring and controlling LLMs through templating and programmatic control flow statements. \n\nSome key skills gained:\n\n- Template syntax based on Handlebars to interpolate variables and control program execution\n- Generation control through tags like {{gen}} to produce outputs\n- Logical control flow using tags like {{#if}}, {{#select}}, {{#each}}\n- Structuring multi-step prompts through concepts like hidden blocks, functions, and awaiting missing variables \n- Applying output structure to improve performance on tasks\n- Ensuring valid syntax through techniques like option sets and regex patterns\n- Building role-based conversational models and agent simulations\n- Integration with downstream APIs and tools', ' Here is a summary of the danikhan632/guidance_api repository:\n\n- Technical Complexity: Medium. The repository integrates two text generation tools and abstracts network calls. Some coding/scripting is required. \n\n- Stars: 25 stars. A moderate amount of community interest. \n\n- Purpose: To build an API extension that seamlessly integrates the Guidance large language model into the oobabooga/text-generation-webui interface. \n\n- Key Skills Gained: APIs/extensions development, network requests abstraction, text generation automation, modular programming with Python. \n\n- Benefits: Enables harnessing of advanced LLM capabilities from within a simple interface. Preserves', ' Here is a summary of the Triton programming language repository along with the skills that can be gained from contributing to it:\n\nName: danikhan632/triton \nStars: 0\n\nTriton is an open source programming language and compiler for deep learning workloads targeting GPUs. The repository contains the core compiler infrastructure including the Triton IR dialect, analyses, conversions between dialects, and GPU code generation targets. \n\nSkills gained:\n- Compiler construction: Parsing, IR design, transformations, code generation\n- Domain specific languages: Designing a DSL for deep learning \n- Memory analysis: Analyzing memory access patterns and optimizing data movement\n- GPU programming: Generating code for GPU backends like', " Here is a summary of the danikhan632/Auto-GPT-Messages-Plugin repository:\n\n- The repository contains a plugin that allows Anthropic's Claude AI assistant to send and receive text messages through the iMessage platform using an iMessage API server. \n\n- Skills gained from working with this repo include setting up an iMessage API server, creating a plugin that integrates with an external API, working with messaging platforms and APIs, and customizing Claude's functionality through plugins.\n\n- The technical complexity is moderate as it requires setting up an iMessage server, creating a Python plugin, and integrating with external APIs/platforms. \n\n- 48 stars indicates the plugin is fairly popular and useful for allowing messaging", ' Here is a summary of the Auto-GPT-Text-Gen-Plugin repository:\n\n- Name: Danikhan632/Auto-GPT-Text-Gen-Plugin\n- Stars: 46\n- Purpose: Allows users to fully customize the prompt sent to locally installed large language models (LLMs) for text generation through the Text Generation Web UI (TGWUI). Removes reliance on public models like GPT-3 and GPT-4.\n- How it works: Uses a local TGWUI API to connect to any model running in the TGWUI rather than directly managing models. Provides flexibility to use different models. \n- Skills gained: Customizing text generation prompts, configuring TGWUI']
TalentLLM-main/gh_cache/jryzhik.md ADDED
File without changes
TalentLLM-main/gh_cache/nmhossain02.md ADDED
@@ -0,0 +1 @@
 
 
1
+ [' Here is a summary of the BillSplit repository:\n\nThe BillSplit repository is a full-stack web application that allows multiple consumers shopping together to easily split the cost of items in their cart or food order. Through integrating with the NCR Orders API, it allows each consumer to select the items they purchased and directly pay the merchant for their portion of the bill. \n\nKey skills that can be gained from working with this repository include:\n\n- Full-stack web development experience using Flask for the backend and HTML, CSS, JavaScript for the frontend. This provides exposure to both server-side and client-side development.\n\n- Working with retailer APIs like the NCR Orders API to integrate payment processing functionality. This teaches skills around', ' Here is a summary of the nmhossain02/fall-2022-dev-takehome repository:\n\nThis repository contains a coding task to build out a basic todo list application with multiple features. The key skills that can be gained from completing this task include:\n\n- Building forms and accepting user input with features like text fields, tags, and date pickers. \n\n- Rendering lists of data onto the UI and managing state. \n\n- Marking items as complete and filtering/sorting lists.\n\n- Basic CSS styling to display items in cards and differentiate between complete/incomplete.\n\n- Potentially using additional skills like handling arrays, objects, events. \n\nThe technical complexity is on the moderate']
TalentLLM-main/gh_cache/taliu02.md ADDED
@@ -0,0 +1 @@
 
 
1
+ []
TalentLLM-main/gh_cache/tuffstuff9.md ADDED
@@ -0,0 +1 @@
 
 
1
+ [' Here is a summary of the KeyboardHookLite repository:\n\nThis repository provides a lightweight global keyboard hooking library for .NET applications. It uses low-level PInvoke signatures to hook into keyboard events without relying on outdated .NET classes. \n\nSome key skills and concepts covered include:\n\n- Installing and using a global keyboard hook across applications\n- Understanding low-level unmanaged Windows API calls via PInvoke \n- Processing raw keyboard input events\n- Implementing IDisposable patterns to avoid memory leaks\n- Building extensible event argument classes\n- Supporting modern UI frameworks like WPF, MAUI, WinUI\n\nThe code is well commented and documented to help developers understand how it works under', ' Here is a summary of the NYUCourseTracker repo and the skills that can be gained from it:\n\nThis is a course tracking tool that utilizes Puppeteer to scrape the NYU public course search via a headless browser. It monitors a desired class and sends alerts via Telegram when the class status changes from closed to open, allowing a student to enroll. \n\nThe main skills gained from this project include:\n\n- Web scraping using Puppeteer - Learning how to automate browser actions and extract data programmatically from websites. \n\n- API integration - How to connect apps/scripts to external APIs and services like Telegram. \n\n- Backend development with Node.js - Using Node to build a scraping/monitoring backend.', ' Here is a summary of the Next.js PDF Parser Template repository:\n\nName: nextjs-pdf-parser\nStars: 16\n\nSkills Gained:\n- Working with PDF files in a Next.js application \n- Implementing a file uploader interface using FilePond\n- Parsing PDF content and extracting text using the pdf2json library\n- Resolving technical issues like the "nodeUtil is not defined" error\n- Debugging type definition issues with TypeScript\n\nThis repository provides a template for building a PDF parser application with Next.js. It implements basic PDF uploading and parsing functionality using the FilePond and pdf2json libraries. Working through this template would help develop skills in integrating file uploads', ' Here is a summary of the Next.js 13 with Drizzle ORM & PlanetScale Starter Template repository:\n\nThis repository provides a starter template for building applications with Next.js 13, integrated with the Drizzle ORM for database management and PlanetScale as the database. \n\nSome of the key skills and technologies included in the project are:\n\n- Next.js for server-rendered React apps\n- Drizzle ORM for efficient database operations \n- PlanetScale for a scalable serverless database\n- TailwindCSS for rapid UI development\n- Server Actions for improved server-side functionality\n- An experimental hook for form state management\n\nThe technical complexity is moderate as it includes setting up backend integrations like the database.', ' Here is the summary of the react-blackjack repository:\n\nSkills gained: React, Node.js, Express, MongoDB, Passport authentication, deploying to Heroku. \n\nThis project allows users to play blackjack against the dealer. It uses React for the front-end UI and interactions. Node.js and Express are used to build the backend API. MongoDB stores user accounts and balances so data is persistent. Passport handles authentication so users can create accounts. Code is deployed to Heroku so the app can be run remotely.\n\nThe tech stack incorporates modern JavaScript tools like React, Node, and MongoDB. Building the full-stack app from front-end to backend provides exposure to developing both sides. Implementing', ' Here is a summary of the Tailwind Input Tag Component repository:\n\nThis is a React component that allows users to easily create tags using Tailwind CSS for styling. It provides a lightweight tag input that users can add tags to by typing and pressing enter or tab. Tags can be removed by backspacing on an empty input. \n\nKey features include being lightweight, styled with Tailwind for consistency, adding/removing tags via keyboard shortcuts, and option to set a maximum number of tags. \n\nThe component file is designed to be imported and used within other projects. Usage involves importing the component, setting initial state, and passing callbacks to handle tag changes. \n\nProps allow customizing the input name, placeholder, initial value,', ' Here is a summary of the visualizer.coffee Shot Downloader repository:\n\nName: tuffstuff9/visualizer.coffee-shot-downloader\nStars: 1\n\nThis is a simple Python Jupyter notebook script that allows downloading of all shot data from visualizer.coffee as JSON files. Some key points:\n\n- Skills Gained: API interfacing, asynchronous HTTP requests, data scraping, JSON parsing\n- Technical Complexity: Beginner - uses basic Python and common libraries like requests and aiohttp \n- Functionality: Logs in using credentials, fetches all shot IDs, asynchronously downloads shot data as JSON files\n- Limitations: Only downloads data, no actual shot files. No filtering of shots.', " Here is a summary of the tuffstuff9/Copy-Button-for-ChatGPT repository:\n\nThe repository adds a native looking copy button to ChatGPT messages. This allows users to easily copy the text from ChatGPT's responses. \n\nSkills Gained:\n- Front-end web development by adding UI elements \n- Browser extension development using the ChatGPT extension API\n- Basic JavaScript and HTML/CSS skills\n\nTechnical Complexity: Beginner. Includes simple JavaScript and HTML/CSS code to add a button. \n\nPotential Benefits for Users: Allows quicker copying of ChatGPT responses without having to manually select text. Provides a more seamless user experience within the ChatGPT"]
TalentLLM-main/main.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gspread
2
+ import os,random
3
+ from Candidate import JobCandidate
4
+ from dotenv import load_dotenv
5
+ from compator import bubble_sort
6
+ from results import writeToSheets
7
+ from resume_conversation import chat_with_candidate
8
+ sa = gspread.service_account(filename='service_creds.json')
9
+ sh = sa.open("Figma_swe")
10
+ load_dotenv()
11
+
12
+ wks = sh.worksheet("Sheet1")
13
+ data = wks.get_all_values()
14
+
15
+
16
+ # Load environment variables from the .env file
17
+ load_dotenv()
18
+ # destination_path = os.path.join(os.getcwd(), id)
19
+
20
+ candidates=[]
21
+
22
+ # os.environ['COMPARATOR_LLM']="chat-bison"
23
+ os.environ['COMPARATOR_LLM']="gpt-3.5-turbo-1106"
24
+ for i in range(1, 7):
25
+ candid =JobCandidate(data[i])
26
+ candidates.append(candid)
27
+
28
+ # random.shuffle(candidates)
29
+ sort_cand = bubble_sort(candidates)
30
+
31
+ writeToSheets(candidates)
32
+
33
+ for idx, candidate in enumerate(sort_cand):
34
+ print(str(idx) + '. ' + candidate.email)
35
+
36
+ print('Select a candidate to chat with. Type in their index number. Type -1 if you dont want to chat.')
37
+ idx = int(input())
38
+ if idx != -1:
39
+ selected_candidate = candidates[idx]
40
+ chat_with_candidate(selected_candidate)
41
+
42
+ # for candidate in candidates:
43
+ # print(candidate)
44
+ # print() # Print a blank line between candidates for better readability
45
+
46
+
47
+
48
+
49
+
50
+
TalentLLM-main/mathpix.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import requests
4
+ import time
5
+ import json
6
+ import os
7
+ from dotenv import load_dotenv
8
+ load_dotenv()
9
+
10
+ HEADERS = {
11
+ 'app_id': os.environ.get('MATHPIX_APP_ID', 'default_app_id'),
12
+ 'app_key': os.environ.get('MATHPIX_APP_KEY', 'default_app_key')
13
+ }
14
+
15
+
16
+
17
+ def extract_text(file_path: str) -> str:
18
+ print("Parsing resume")
19
+ if not os.path.exists(file_path):
20
+ raise FileNotFoundError(f"The file at {file_path} does not exist.")
21
+
22
+ file_name = os.path.basename(file_path)
23
+
24
+ url1 = 'https://api.mathpix.com/v3/pdf'
25
+
26
+ with open(file_path, 'rb') as file:
27
+ files = {'file': file}
28
+ data = {'options_json': json.dumps({
29
+ "conversion_formats": {"md": True},
30
+ "math_inline_delimiters": ["$", "$"],
31
+ "rm_spaces": True
32
+ })}
33
+ status_resp = requests.post(url1, headers=HEADERS, files=files, data=data)
34
+
35
+ if status_resp.status_code != 200:
36
+ raise Exception(f"Failed to upload PDF: {status_resp.text}")
37
+
38
+ status_resp_data = status_resp.json()
39
+ pdf_id = status_resp_data.get('pdf_id')
40
+
41
+ if not pdf_id:
42
+ raise Exception("Failed to retrieve PDF ID from response.")
43
+
44
+ time.sleep(1)
45
+
46
+ url2 = f'https://api.mathpix.com/v3/pdf/{pdf_id}'
47
+ while True:
48
+ challenge_resp = requests.get(url2, headers=HEADERS)
49
+ challenge_resp_data = challenge_resp.json()
50
+ if challenge_resp_data.get('status') == 'completed':
51
+ break
52
+ time.sleep(1)
53
+
54
+ url3 = f'https://api.mathpix.com/v3/pdf/{pdf_id}.mmd'
55
+ contents = requests.get(url3, headers=HEADERS)
56
+
57
+ if contents.status_code != 200:
58
+ raise Exception(f"Failed to download converted file: {contents.text}")
59
+
60
+ open(os.path.join(os.getcwd(),"resume_mmds", (str(file_name)+'.mmd')),"w").write(contents.text)
61
+
62
+ return contents.text
TalentLLM-main/notebooks/cheap_compartor.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from litellm import embedding
2
+ import os
3
+
4
+
5
+ # print(help(embedding))
6
+
7
+ response = embedding('huggingface/microsoft/codebert-base', input=["good morning from litellm"])
8
+ # print(len(response['data'][0]["embeddingtext-embedding-ada-002"]))
9
+ print(int(response['usage']["total_tokens"]))
TalentLLM-main/notebooks/comparisons copy 2.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
+ }
TalentLLM-main/notebooks/compator.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai;
2
+ import json, os,sys
3
+ from dotenv import load_dotenv
4
+ load_dotenv()
5
+ openai.api_key = os.environ.get("OPENAI_API_KEY")
6
+ from Candidate import JobCandidate
7
+ def printc(obj, color="cyan"):
8
+ color_code = {
9
+ "black": "30", "red": "31", "green": "32", "yellow": "33",
10
+ "blue": "34", "magenta": "35", "cyan": "36", "white": "37"
11
+ }
12
+ colored_text = f"\033[{color_code[color]}m{obj}\033[0m" if color in color_code else obj
13
+ print(colored_text)
14
+
15
+
16
+ LLM=os.environ.get("COMPARATOR_LLM","gpt-4-0613")
17
+ # LLM=os.environ.get("COMPARATOR_LLM","gpt-3.5-turbo-1106")
18
+ def getContent(resumeA: str, resumeB: str) -> str:
19
+ return (
20
+ "Given the following two SWE candidates, choose between the two. Here is the rubric: "
21
+ + get_rubric()
22
+ + "Candidate A: "
23
+ + "\nRESUME:\n" +resumeA+"\nEND Resume\n"
24
+ + " END OF Candidate A"
25
+ + "\n\nCandidate B: "
26
+ + "\nRESUME:\n" +resumeB+"\nEND Resume\n"
27
+ + " END OF Candidate B"
28
+ )
29
+
30
+
31
+
32
+ def compare_resumes(content:str, nameA="", nameB=""):
33
+ choice =0
34
+ response = openai.ChatCompletion.create(
35
+ model=LLM,
36
+ messages=[{"role": "user", "content": content}],
37
+
38
+ functions=[
39
+ {
40
+ "name": "selectCanidate",
41
+ "description": "choose between the two canidates",
42
+ "parameters": {
43
+ "type": "object",
44
+ "properties": {
45
+ "choice_num": {
46
+ "type": "integer",
47
+ "description": "1 for Candidate A is the best fit, 2 for Candidate B is the best fit",
48
+ "required": ["choice_num"],
49
+ },
50
+ "justifcation": {
51
+ "type": "string",
52
+ "description": "justifcation for why you chose the candidate max 25 words",
53
+ "required": ["justifcation"],
54
+ },
55
+ }
56
+ },
57
+ }
58
+ ],
59
+ function_call="auto",
60
+
61
+
62
+ )
63
+
64
+ message = response["choices"][0]["message"]
65
+
66
+ if message.get("function_call"):
67
+ function_name = message["function_call"]["name"]
68
+ try:
69
+ function_args = json.loads(message["function_call"]["arguments"])
70
+ choice = (int(function_args["choice_num"]))
71
+ except:
72
+ printc("eroor","red")
73
+ printc(message["function_call"],'red')
74
+
75
+ return 1
76
+ if function_name == "selectCanidate":
77
+
78
+ if choice==1:
79
+ printc(nameA+" wins over "+nameB,"cyan")
80
+ elif choice==2:
81
+ printc(nameB+" wins over "+nameA,"green")
82
+
83
+ printc(function_args["justifcation"],"yellow")
84
+
85
+ return choice
86
+
87
+
88
+ def get_rubric():
89
+ text = open("rubric.txt","r").read()
90
+ return "\nRubric:\n" +str(text)+"\nEND Rubric\n"
91
+
92
+
93
+
94
+
95
+
96
+ def comp(candidateA:JobCandidate, candidateB:JobCandidate, rub_id:int=0 ) -> int:
97
+ comp_table= json.load(open("comparisons.json","r"))
98
+ tag= (candidateA.email+"#"+candidateB.email+"#"+str(rub_id))
99
+ inv_tag= (candidateB.email+"#"+candidateA.email+"#"+str(rub_id))
100
+ if tag in comp_table:
101
+ return comp_table[tag]
102
+ elif inv_tag in comp_table:
103
+ return comp_table[inv_tag] * -1
104
+ else:
105
+ choice = compare_resumes(getContent(candidateA.resume_text, candidateB.resume_text), candidateA.name, candidateB.name)
106
+ if choice == 1:
107
+ choice = -1
108
+ elif choice == 2:
109
+ choice = 1
110
+ comp_table[tag]=choice
111
+
112
+ json.dump(comp_table, open("comparisons.json","w"))
113
+
114
+ return choice
115
+
116
+
117
+ def bubble_sort(candidates: list) -> list:
118
+ n = len(candidates)
119
+ for i in range(n):
120
+ swapped = False
121
+ for j in range(0, n-i-1):
122
+ if candidates[j].email == candidates[j+1].email:
123
+ continue
124
+ elif comp(candidates[j], candidates[j+1]) > 0:
125
+ candidates[j], candidates[j+1] = candidates[j+1], candidates[j]
126
+ swapped = True
127
+ if not swapped:
128
+ break
129
+
130
+
131
+ return candidates
132
+
TalentLLM-main/notebooks/compator_parallel.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai;
2
+ import json, os, threading
3
+
4
+ from dotenv import load_dotenv
5
+ load_dotenv()
6
+ openai.api_key = os.environ.get("OPENAI_API_KEY")
7
+ from Candidate import JobCandidate
8
+
9
+ def getContent(resumeA: str, resumeB: str) -> str:
10
+ return (
11
+ "Given the following two SWE candidates, choose between the two. Here is the rubric: "
12
+ + get_rubric()
13
+ + "Candidate A: "
14
+ + "\nRESUME:\n" +resumeA+"\nEND Resume\n"
15
+ + " END OF Candidate A"
16
+ + "\n\nCandidate B: "
17
+ + "\nRESUME:\n" +resumeB+"\nEND Resume\n"
18
+ + " END OF Candidate B"
19
+ )
20
+
21
+
22
+
23
+ def compare_resumes(content:str):
24
+ choice =0
25
+ response = openai.ChatCompletion.create(
26
+ model="gpt-4-0613",
27
+ messages=[{"role": "user", "content": content}],
28
+ functions=[
29
+ {
30
+ "name": "selectCanidate",
31
+ "description": "choose between the two canidates",
32
+ "parameters": {
33
+ "type": "object",
34
+ "properties": {
35
+ "choice_num": {
36
+ "type": "integer",
37
+ "description": "1 for Candidate A is the best fit, 2 for Candidate B is the best fit",
38
+ "required": ["choice_num"],
39
+ },
40
+ "justifcation": {
41
+ "type": "string",
42
+ "description": "justifcation for why you chose the candidate",
43
+ "required": ["justifcation"],
44
+ },
45
+ }
46
+ },
47
+ }
48
+ ],
49
+ function_call="auto",
50
+
51
+ )
52
+
53
+ message = response["choices"][0]["message"]
54
+
55
+ if message.get("function_call"):
56
+ function_name = message["function_call"]["name"]
57
+ function_args = json.loads(message["function_call"]["arguments"])
58
+ if function_name == "selectCanidate":
59
+ choice = (int(function_args["choice_num"]))
60
+ print(function_args["justifcation"])
61
+
62
+ return choice
63
+
64
+
65
+ def get_rubric():
66
+ text = open("rubric.txt","r").read()
67
+ return "\nRubric:\n" +str(text)+"\nEND Rubric\n"
68
+
69
+
70
+
71
+
72
+ def comp_parallel(candidateA: JobCandidate, candidateB: JobCandidate, rub_id: int, comp_table: dict, lock: threading.Lock):
73
+ tag = f"{candidateA.email}#{candidateB.email}#{rub_id}"
74
+ if tag not in comp_table:
75
+ choice = compare_resumes(getContent(candidateA.resume_text, candidateB.resume_text))
76
+ if choice == 1:
77
+ choice = -1
78
+ elif choice == 2:
79
+ choice = 1
80
+
81
+ with lock:
82
+ comp_table[tag] = choice
83
+
84
+
85
+ def pre_compute_comparisons(candidates: list, rub_id: int = 0) -> dict:
86
+ comp_table= json.load(open("comparisons.json","r"))
87
+ lock = threading.Lock()
88
+ threads = []
89
+
90
+ for i in range(len(candidates)):
91
+ for j in range(i + 1, len(candidates)):
92
+ thread = threading.Thread(target=comp_parallel, args=(candidates[i], candidates[j], rub_id, comp_table, lock))
93
+ threads.append(thread)
94
+ thread.start()
95
+
96
+ for thread in threads:
97
+ thread.join()
98
+
99
+ json.dump(comp_table, open("comparisons.json","w"))
100
+ return comp_table
101
+
102
+ def bubble_sort(candidates: list, rub_id: int = 0) -> list:
103
+ n = len(candidates)
104
+ comp_table = pre_compute_comparisons(candidates, rub_id)
105
+ for i in range(n):
106
+ for j in range(n - i - 1):
107
+ tag = f"{candidates[j].email}#{candidates[j + 1].email}#"+str(rub_id)
108
+ if comp_table[tag] > 0:
109
+ candidates[j], candidates[j + 1] = candidates[j + 1], candidates[j]
110
+ return candidates
TalentLLM-main/notebooks/extractor.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ text = open("resume_mmds/11NP1Fgf1hOP6pmX0HTbnq7GWY6eAyw4Y.pdf.mmd","r").read()
2
+ import json, os
3
+ import openai
4
+ from dotenv import load_dotenv
5
+ load_dotenv()
6
+ # openai.api_key = os.environ.get("OPENAI_API_KEY")
7
+ openai.api_base = "http://0.0.0.0:8080"
8
+
9
+ def extract(content:str):
10
+ choice =0
11
+ response = openai.ChatCompletion.create(
12
+ model="test",
13
+ messages=[{"role": "user", "content": content}],
14
+ functions=[
15
+ {
16
+ "name": "infoExtract",
17
+ "description": "extract info from resume",
18
+ "parameters": {
19
+ "type": "object",
20
+ "properties": {
21
+ "linkedin_url": {"type": "string"},
22
+ "portfolio_url": {"type": "string"},
23
+ "github_url": {"type": "string"},
24
+ "stackoverflow_url": {"type": "string"},
25
+ "name": {"type": "string"},
26
+
27
+ }
28
+ },
29
+ }
30
+ ],
31
+ function_call="auto",
32
+
33
+ )
34
+
35
+ message = response["choices"][0]["message"]
36
+
37
+ if message.get("function_call"):
38
+ function_name = message["function_call"]["name"]
39
+ function_args = json.loads(message["function_call"]["arguments"])
40
+ if function_name == "infoExtract":
41
+ print(function_args)
42
+
43
+
44
+
45
+ extract(text)
TalentLLM-main/notebooks/hardset.ipynb ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "\n"
10
+ ]
11
+ }
12
+ ],
13
+ "metadata": {
14
+ "language_info": {
15
+ "name": "python"
16
+ },
17
+ "orig_nbformat": 4
18
+ },
19
+ "nbformat": 4,
20
+ "nbformat_minor": 2
21
+ }
TalentLLM-main/notebooks/rag.ipynb ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Redis LangChain OpenAI eCommerce Chatbot"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 1,
13
+ "metadata": {
14
+ "colab": {
15
+ "base_uri": "https://localhost:8080/"
16
+ },
17
+ "id": "5-h_nDGp3Kdf",
18
+ "outputId": "94191443-3844-4c1d-a26f-7619d976a55b",
19
+ "tags": []
20
+ },
21
+ "outputs": [
22
+ {
23
+ "name": "stdout",
24
+ "output_type": "stream",
25
+ "text": [
26
+ "/usr/bin/zsh: /home/green/miniconda3/lib/libtinfo.so.6: no version information available (required by /usr/bin/zsh)\n",
27
+ "\n",
28
+ "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.1.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.3.1\u001b[0m\n",
29
+ "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
30
+ ]
31
+ }
32
+ ],
33
+ "source": [
34
+ "# Install requirements\n",
35
+ "!pip install -r requirements.txt"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "code",
40
+ "execution_count": 2,
41
+ "metadata": {
42
+ "tags": []
43
+ },
44
+ "outputs": [
45
+ {
46
+ "name": "stdout",
47
+ "output_type": "stream",
48
+ "text": [
49
+ "/usr/bin/zsh: /home/green/miniconda3/lib/libtinfo.so.6: no version information available (required by /usr/bin/zsh)\n",
50
+ "/home/linuxbrew/.linuxbrew/opt/[email protected]/lib/python3.11/site-packages/gdown/cli.py:126: FutureWarning: Option `--id` was deprecated in version 4.3.1 and will be removed in 5.0. You don't need to pass it anymore to use a file ID.\n",
51
+ " warnings.warn(\n",
52
+ "Downloading...\n",
53
+ "From (uriginal): https://drive.google.com/uc?id=1tHWB6u3yQCuAgOYc-DxtZ8Mru3uV5_lj\n",
54
+ "From (redirected): https://drive.google.com/uc?id=1tHWB6u3yQCuAgOYc-DxtZ8Mru3uV5_lj&confirm=t&uuid=f678b48d-4f3e-44f9-bf60-03ca828cb67c\n",
55
+ "To: /home/green/code/gatech/ai_atl/inital_work/product_data.csv\n",
56
+ "100%|████████████████████████████████████████| 225M/225M [00:09<00:00, 24.0MB/s]\n"
57
+ ]
58
+ }
59
+ ],
60
+ "source": [
61
+ "# Download the dataset\n",
62
+ "!gdown --id 1tHWB6u3yQCuAgOYc-DxtZ8Mru3uV5_lj"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "markdown",
67
+ "metadata": {},
68
+ "source": [
69
+ "## Preprocess dataset"
70
+ ]
71
+ },
72
+ {
73
+ "cell_type": "code",
74
+ "execution_count": null,
75
+ "metadata": {
76
+ "tags": []
77
+ },
78
+ "outputs": [],
79
+ "source": [
80
+ "import pandas as pd\n",
81
+ "\n",
82
+ "MAX_TEXT_LENGTH=512\n",
83
+ "\n",
84
+ "def auto_truncate(val):\n",
85
+ " \"\"\"Truncate the given text.\"\"\"\n",
86
+ " return val[:MAX_TEXT_LENGTH]\n",
87
+ "\n",
88
+ "# Load Product data and truncate long text fields\n",
89
+ "all_prods_df = pd.read_csv(\"product_data.csv\", converters={\n",
90
+ " 'bullet_point': auto_truncate,\n",
91
+ " 'item_keywords': auto_truncate,\n",
92
+ " 'item_name': auto_truncate\n",
93
+ "})"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "execution_count": null,
99
+ "metadata": {
100
+ "colab": {
101
+ "base_uri": "https://localhost:8080/",
102
+ "height": 669
103
+ },
104
+ "id": "00_n4VWH7FoB",
105
+ "outputId": "f26daa8c-4af9-4def-d5ab-3197777fe2f9",
106
+ "tags": []
107
+ },
108
+ "outputs": [],
109
+ "source": [
110
+ "# Contruct a primary key from item ID and domain name\n",
111
+ "all_prods_df['primary_key'] = (\n",
112
+ " all_prods_df['item_id'] + '-' + all_prods_df['domain_name']\n",
113
+ ")\n",
114
+ "# Replace empty strings with None and drop\n",
115
+ "all_prods_df['item_keywords'].replace('', None, inplace=True)\n",
116
+ "all_prods_df.dropna(subset=['item_keywords'], inplace=True)\n",
117
+ "\n",
118
+ "# Reset pandas dataframe index\n",
119
+ "all_prods_df.reset_index(drop=True, inplace=True)\n",
120
+ "\n",
121
+ "all_prods_df.head()"
122
+ ]
123
+ },
124
+ {
125
+ "cell_type": "code",
126
+ "execution_count": null,
127
+ "metadata": {
128
+ "tags": []
129
+ },
130
+ "outputs": [],
131
+ "source": [
132
+ "# Num products to use (subset)\n",
133
+ "NUMBER_PRODUCTS = 2500 \n",
134
+ "\n",
135
+ "# Get the first 1000 products with non-empty item keywords\n",
136
+ "product_metadata = ( \n",
137
+ " all_prods_df\n",
138
+ " .head(NUMBER_PRODUCTS)\n",
139
+ " .to_dict(orient='index')\n",
140
+ ")"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": null,
146
+ "metadata": {
147
+ "id": "Iw7rlppY8f3a",
148
+ "tags": []
149
+ },
150
+ "outputs": [],
151
+ "source": [
152
+ "# Check one of the products\n",
153
+ "product_metadata[0]"
154
+ ]
155
+ },
156
+ {
157
+ "cell_type": "markdown",
158
+ "metadata": {},
159
+ "source": [
160
+ "## Set up Redis as a vector db"
161
+ ]
162
+ },
163
+ {
164
+ "cell_type": "code",
165
+ "execution_count": null,
166
+ "metadata": {
167
+ "tags": []
168
+ },
169
+ "outputs": [],
170
+ "source": [
171
+ "from langchain.embeddings import OpenAIEmbeddings\n",
172
+ "from langchain.vectorstores.redis import Redis as RedisVectorStore\n",
173
+ "\n",
174
+ "# data that will be embedded and converted to vectors\n",
175
+ "texts = [\n",
176
+ " v['item_name'] for k, v in product_metadata.items()\n",
177
+ "]\n",
178
+ "\n",
179
+ "# product metadata that we'll store along our vectors\n",
180
+ "metadatas = list(product_metadata.values())\n",
181
+ "\n",
182
+ "# we will use OpenAI as our embeddings provider\n",
183
+ "embedding = OpenAIEmbeddings()\n",
184
+ "\n",
185
+ "# name of the Redis search index to create\n",
186
+ "index_name = \"products\"\n",
187
+ "\n",
188
+ "# assumes you have a redis stack server running on within your docker compose network\n",
189
+ "redis_url = \"redis://redis:6379\"\n",
190
+ "\n",
191
+ "# create and load redis with documents\n",
192
+ "vectorstore = RedisVectorStore.from_texts(\n",
193
+ " texts=texts,\n",
194
+ " metadatas=metadatas,\n",
195
+ " embedding=embedding,\n",
196
+ " index_name=index_name,\n",
197
+ " redis_url=redis_url\n",
198
+ ")"
199
+ ]
200
+ },
201
+ {
202
+ "cell_type": "markdown",
203
+ "metadata": {},
204
+ "source": [
205
+ "## Build the ChatBot with ConversationalRetrieverChain"
206
+ ]
207
+ },
208
+ {
209
+ "cell_type": "code",
210
+ "execution_count": null,
211
+ "metadata": {
212
+ "tags": []
213
+ },
214
+ "outputs": [],
215
+ "source": [
216
+ "from langchain.callbacks.base import CallbackManager\n",
217
+ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
218
+ "from langchain.chains import (\n",
219
+ " ConversationalRetrievalChain,\n",
220
+ " LLMChain\n",
221
+ ")\n",
222
+ "from langchain.chains.question_answering import load_qa_chain\n",
223
+ "from langchain.llms import OpenAI\n",
224
+ "from langchain.prompts.prompt import PromptTemplate\n",
225
+ "\n",
226
+ "template = \"\"\"Given the following chat history and a follow up question, rephrase the follow up input question to be a standalone question.\n",
227
+ "Or end the conversation if it seems like it's done.\n",
228
+ "\n",
229
+ "Chat History:\\\"\"\"\n",
230
+ "{chat_history}\n",
231
+ "\\\"\"\"\n",
232
+ "\n",
233
+ "Follow Up Input: \\\"\"\"\n",
234
+ "{question}\n",
235
+ "\\\"\"\"\n",
236
+ "\n",
237
+ "Standalone question:\"\"\"\n",
238
+ "\n",
239
+ "condense_question_prompt = PromptTemplate.from_template(template)\n",
240
+ "\n",
241
+ "template = \"\"\"You are a friendly, conversational retail shopping assistant. Use the following context including product names, descriptions, and keywords to show the shopper whats available, help find what they want, and answer any questions.\n",
242
+ "It's ok if you don't know the answer.\n",
243
+ "\n",
244
+ "Context:\\\"\"\"\n",
245
+ "{context}\n",
246
+ "\\\"\"\"\n",
247
+ "\n",
248
+ "Question:\\\"\n",
249
+ "\\\"\"\"\n",
250
+ "\n",
251
+ "Helpful Answer:\"\"\"\n",
252
+ "\n",
253
+ "qa_prompt= PromptTemplate.from_template(template)\n",
254
+ "\n",
255
+ "\n",
256
+ "# define two LLM models from OpenAI\n",
257
+ "llm = OpenAI(temperature=0)\n",
258
+ "\n",
259
+ "streaming_llm = OpenAI(\n",
260
+ " streaming=True,\n",
261
+ " callback_manager=CallbackManager([\n",
262
+ " StreamingStdOutCallbackHandler()]),\n",
263
+ " verbose=True,\n",
264
+ " temperature=0.2,\n",
265
+ " max_tokens=150\n",
266
+ ")\n",
267
+ "\n",
268
+ "# use the LLM Chain to create a question creation chain\n",
269
+ "question_generator = LLMChain(\n",
270
+ " llm=llm,\n",
271
+ " prompt=condense_question_prompt\n",
272
+ ")\n",
273
+ "\n",
274
+ "# use the streaming LLM to create a question answering chain\n",
275
+ "doc_chain = load_qa_chain(\n",
276
+ " llm=streaming_llm,\n",
277
+ " chain_type=\"stuff\",\n",
278
+ " prompt=qa_prompt\n",
279
+ ")\n",
280
+ "\n",
281
+ "\n",
282
+ "chatbot = ConversationalRetrievalChain(\n",
283
+ " retriever=vectorstore.as_retriever(),\n",
284
+ " combine_docs_chain=doc_chain,\n",
285
+ " question_generator=question_generator\n",
286
+ ")"
287
+ ]
288
+ },
289
+ {
290
+ "cell_type": "code",
291
+ "execution_count": null,
292
+ "metadata": {
293
+ "tags": []
294
+ },
295
+ "outputs": [],
296
+ "source": [
297
+ "# create a chat history buffer\n",
298
+ "chat_history = []\n",
299
+ "\n",
300
+ "# gather user input for the first question to kick off the bot\n",
301
+ "question = input(\"Hi! What are you looking for today?\")\n",
302
+ "\n",
303
+ "# keep the bot running in a loop to simulate a conversation\n",
304
+ "while True:\n",
305
+ " result = chatbot(\n",
306
+ " {\"question\": question, \"chat_history\": chat_history}\n",
307
+ " )\n",
308
+ " print(\"\\n\")\n",
309
+ " chat_history.append((result[\"question\"], result[\"answer\"]))\n",
310
+ " question = input()"
311
+ ]
312
+ },
313
+ {
314
+ "cell_type": "markdown",
315
+ "metadata": {},
316
+ "source": [
317
+ "## Customize your chains for even better performance"
318
+ ]
319
+ },
320
+ {
321
+ "cell_type": "code",
322
+ "execution_count": null,
323
+ "metadata": {
324
+ "tags": []
325
+ },
326
+ "outputs": [],
327
+ "source": [
328
+ "import json\n",
329
+ "\n",
330
+ "from langchain.schema import BaseRetriever\n",
331
+ "from langchain.vectorstores import VectorStore\n",
332
+ "from langchain.schema import Document\n",
333
+ "from pydantic import BaseModel\n",
334
+ "\n",
335
+ "\n",
336
+ "class RedisProductRetriever(BaseRetriever, BaseModel):\n",
337
+ " vectorstore: VectorStore\n",
338
+ "\n",
339
+ " class Config:\n",
340
+ " \n",
341
+ " arbitrary_types_allowed = True\n",
342
+ "\n",
343
+ " def combine_metadata(self, doc) -> str:\n",
344
+ " metadata = doc.metadata\n",
345
+ " return (\n",
346
+ " \"Item Name: \" + metadata[\"item_name\"] + \". \" +\n",
347
+ " \"Item Description: \" + metadata[\"bullet_point\"] + \". \" +\n",
348
+ " \"Item Keywords: \" + metadata[\"item_keywords\"] + \".\"\n",
349
+ " )\n",
350
+ "\n",
351
+ " def get_relevant_documents(self, query):\n",
352
+ " docs = []\n",
353
+ " for doc in self.vectorstore.similarity_search(query):\n",
354
+ " content = self.combine_metadata(doc)\n",
355
+ " docs.append(Document(\n",
356
+ " page_content=content,\n",
357
+ " metadata=doc.metadata\n",
358
+ " ))\n",
359
+ " return docs"
360
+ ]
361
+ },
362
+ {
363
+ "cell_type": "markdown",
364
+ "metadata": {},
365
+ "source": [
366
+ "### Setup ChatBot with new retriever"
367
+ ]
368
+ },
369
+ {
370
+ "cell_type": "code",
371
+ "execution_count": null,
372
+ "metadata": {
373
+ "tags": []
374
+ },
375
+ "outputs": [],
376
+ "source": [
377
+ "redis_product_retriever = RedisProductRetriever(vectorstore=vectorstore)\n",
378
+ "\n",
379
+ "chatbot = ConversationalRetrievalChain(\n",
380
+ " retriever=redis_product_retriever,\n",
381
+ " combine_docs_chain=doc_chain,\n",
382
+ " question_generator=question_generator\n",
383
+ ")"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "markdown",
388
+ "metadata": {},
389
+ "source": [
390
+ "### Retry"
391
+ ]
392
+ },
393
+ {
394
+ "cell_type": "code",
395
+ "execution_count": null,
396
+ "metadata": {
397
+ "tags": []
398
+ },
399
+ "outputs": [],
400
+ "source": [
401
+ "# create a chat history buffer\n",
402
+ "chat_history = []\n",
403
+ "\n",
404
+ "# gather user input for the first question to kick off the bot\n",
405
+ "question = input(\"Hi! What are you looking for today?\")\n",
406
+ "\n",
407
+ "# keep the bot running in a loop to simulate a conversation\n",
408
+ "while True:\n",
409
+ " result = chatbot(\n",
410
+ " {\"question\": question, \"chat_history\": chat_history}\n",
411
+ " )\n",
412
+ " print(\"\\n\")\n",
413
+ " chat_history.append((result[\"question\"], result[\"answer\"]))\n",
414
+ " question = input()"
415
+ ]
416
+ },
417
+ {
418
+ "cell_type": "code",
419
+ "execution_count": null,
420
+ "metadata": {},
421
+ "outputs": [],
422
+ "source": []
423
+ }
424
+ ],
425
+ "metadata": {
426
+ "colab": {
427
+ "provenance": []
428
+ },
429
+ "kernelspec": {
430
+ "display_name": "Python 3 (ipykernel)",
431
+ "language": "python",
432
+ "name": "python3"
433
+ },
434
+ "language_info": {
435
+ "codemirror_mode": {
436
+ "name": "ipython",
437
+ "version": 3
438
+ },
439
+ "file_extension": ".py",
440
+ "mimetype": "text/x-python",
441
+ "name": "python",
442
+ "nbconvert_exporter": "python",
443
+ "pygments_lexer": "ipython3",
444
+ "version": "3.11.6"
445
+ }
446
+ },
447
+ "nbformat": 4,
448
+ "nbformat_minor": 4
449
+ }
TalentLLM-main/output.txt ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h1 id="daniyal-khan---portfolio">Daniyal Khan - Portfolio</h1>
2
+ <h2 id="education">Education</h2>
3
+ <p><strong>Georgia Institute of Technology</strong><br />
4
+ <em>Bachelors of Science in Computer Science</em><br />
5
+ August 2020 - December 2023</p>
6
+ <ul>
7
+ <li>Concentrations: Intelligence/AI and Systems and Architecture</li>
8
+ <li>High Honours</li>
9
+ <li>Relevant Coursework:
10
+ <ul>
11
+ <li>Operating Systems</li>
12
+ <li>Artificial Intelligence</li>
13
+ <li>Advanced Algorithms and Data Structures</li>
14
+ <li>Robotics and Perception</li>
15
+ <li>Computer Architecture</li>
16
+ <li>Circuit Design Lab</li>
17
+ </ul></li>
18
+ </ul>
19
+ <h2 id="links">Links</h2>
20
+ <ul>
21
+ <li><strong>GitHub</strong>: <a href="https://github.com/danikhan632">danikhan632</a></li>
22
+ <li><strong>My Resume</strong>: <a href="https://hackgtstoragebucket.s3.amazonaws.com/Resume.pdf">Resume</a></li>
23
+ <li><strong>LinkedIn</strong>: <a href="https://www.linkedin.com/in/daniyalmkhan/">Daniyal M Khan</a></li>
24
+ <li><strong>Email</strong>: <a href="mailto:[email protected]">[email protected]</a></li>
25
+ </ul>
26
+ <h2 id="vulkan-backend-for-triton-august-2023">Vulkan Backend for Triton : August 2023</h2>
27
+ <ol type="1">
28
+ <li>Developed Vulkan Backend for OpenAI’s Triton to enable Vulkan compatible devices utilization.</li>
29
+ <li>Addressed Vulkan’s SPIR-V entry point and descriptor sets requirements differing from OpenCL’s SPIR-V.</li>
30
+ <li>Proposed a modular, scalable architecture for Vulkan compute pipeline, adapting to dynamic configurations.</li>
31
+ <li>Explored JIT compilation and code injection for Vulkan, adapting Intel’s extension approach.</li>
32
+ <li>Created a Clang/g++ backend for dynamic C++ compilation, designed an interface for Vulkan integration.</li>
33
+ <li>Investigated memory management solutions for Vulkan compute integration with PyTorch.</li>
34
+ <li>Utilized MoltenVK for macOS ARM64 architecture ensuring consistent compute capabilities.</li>
35
+ <li>Enhanced SPIRV conversion processes for better compatibility with SPIRV-Cross.</li>
36
+ <li>Encouraged community contributions through detailed documentation and active engagement on Discord.</li>
37
+ </ol>
38
+ <p><img src="https://hackgtstoragebucket.s3.amazonaws.com/flash_attn.png" width="704"></p>
39
+ <h2 id="guidance-api-june-2023">Guidance API: June 2023</h2>
40
+ <ol type="1">
41
+ <li>Developed the Guidance API, integrating advanced language model capabilities for enhanced text generation and processing.</li>
42
+ <li>Enabled efficient network calls to Guidance, harnessing the power of cutting-edge language models for users.</li>
43
+ <li>Introduced a comprehensive output structure, supporting multiple generations, selections, conditionals, and tool use.</li>
44
+ <li>Optimized system performance with smart seed-based generation caching, ensuring efficient token storage.</li>
45
+ <li>Laid groundwork for future compatibility with role-based chat models, expanding the API’s versatility.</li>
46
+ <li>Enhanced control over modern language models, offering a superior alternative to traditional prompting and chaining.</li>
47
+ <li>Utilized intuitive syntax based on Handlebars templating, ensuring a user-friendly experience.</li>
48
+ <li>Enabled real-time interactions with Playground-like streaming in Jupyter/VSCode Notebooks.</li>
49
+ <li>Seamlessly integrated with Hugging Face models, introducing features like guidance acceleration, token healing, and regex pattern guides.</li>
50
+ <li>Emphasized model performance and precision, ensuring high-quality outputs and adherence to desired formats.</li>
51
+ </ol>
52
+ <p><a href="https://github.com/danikhan632/guidance">Link</a> <a href="https://github.com/danikhan632/guidance_api">Link</a></p>
53
+ <div class="sourceCode" id="cb1"><pre class="sourceCode python"><code class="sourceCode python"><span id="cb1-1"><a href="#cb1-1" aria-hidden="true"></a><span class="im">import</span> guidance</span>
54
+ <span id="cb1-2"><a href="#cb1-2" aria-hidden="true"></a></span>
55
+ <span id="cb1-3"><a href="#cb1-3" aria-hidden="true"></a><span class="co"># set the default language model used to execute guidance programs</span></span>
56
+ <span id="cb1-4"><a href="#cb1-4" aria-hidden="true"></a>guidance.llm <span class="op">=</span> guidance.llms.TWGUI(<span class="st">&quot;http://127.0.0.1:5000&quot;</span>)</span>
57
+ <span id="cb1-5"><a href="#cb1-5" aria-hidden="true"></a></span>
58
+ <span id="cb1-6"><a href="#cb1-6" aria-hidden="true"></a><span class="co"># define a guidance program that adapts a proverb</span></span>
59
+ <span id="cb1-7"><a href="#cb1-7" aria-hidden="true"></a>program <span class="op">=</span> guidance(<span class="st">&quot;&quot;&quot;Tweak this proverb to apply to model instructions instead.</span></span>
60
+ <span id="cb1-8"><a href="#cb1-8" aria-hidden="true"></a></span>
61
+ <span id="cb1-9"><a href="#cb1-9" aria-hidden="true"></a><span class="sc">{{</span><span class="st">proverb</span><span class="sc">}}</span></span>
62
+ <span id="cb1-10"><a href="#cb1-10" aria-hidden="true"></a><span class="st">- </span><span class="sc">{{</span><span class="st">book</span><span class="sc">}}</span><span class="st"> </span><span class="sc">{{</span><span class="st">chapter</span><span class="sc">}}</span><span class="st">:</span><span class="sc">{{</span><span class="st">verse</span><span class="sc">}}</span></span>
63
+ <span id="cb1-11"><a href="#cb1-11" aria-hidden="true"></a></span>
64
+ <span id="cb1-12"><a href="#cb1-12" aria-hidden="true"></a><span class="st">UPDATED</span></span>
65
+ <span id="cb1-13"><a href="#cb1-13" aria-hidden="true"></a><span class="st">Where there is no guidance</span><span class="sc">{{</span><span class="st">gen &#39;rewrite&#39; stop=&quot;</span><span class="ch">\\</span><span class="st">n-&quot;</span><span class="sc">}}</span></span>
66
+ <span id="cb1-14"><a href="#cb1-14" aria-hidden="true"></a><span class="st">- GPT </span><span class="sc">{{</span><span class="st">#select &#39;chapter&#39;</span><span class="sc">}}</span><span class="st">9</span><span class="sc">{{</span><span class="st">or</span><span class="sc">}}</span><span class="st">10</span><span class="sc">{{</span><span class="st">or</span><span class="sc">}}</span><span class="st">11</span><span class="sc">{{</span><span class="st">/select</span><span class="sc">}}</span><span class="st">:</span><span class="sc">{{</span><span class="st">gen &#39;verse&#39;</span><span class="sc">}}</span><span class="st">&quot;&quot;&quot;</span>)</span>
67
+ <span id="cb1-15"><a href="#cb1-15" aria-hidden="true"></a></span>
68
+ <span id="cb1-16"><a href="#cb1-16" aria-hidden="true"></a><span class="co"># execute the program on a specific proverb</span></span>
69
+ <span id="cb1-17"><a href="#cb1-17" aria-hidden="true"></a>executed_program <span class="op">=</span> program(</span>
70
+ <span id="cb1-18"><a href="#cb1-18" aria-hidden="true"></a> proverb<span class="op">=</span><span class="st">&quot;Where there is no guidance, a people falls,</span><span class="ch">\n</span><span class="st">but in an abundance of counselors there is safety.&quot;</span>,</span>
71
+ <span id="cb1-19"><a href="#cb1-19" aria-hidden="true"></a> book<span class="op">=</span><span class="st">&quot;Proverbs&quot;</span>,</span>
72
+ <span id="cb1-20"><a href="#cb1-20" aria-hidden="true"></a> chapter<span class="op">=</span><span class="dv">11</span>,</span>
73
+ <span id="cb1-21"><a href="#cb1-21" aria-hidden="true"></a> verse<span class="op">=</span><span class="dv">14</span></span>
74
+ <span id="cb1-22"><a href="#cb1-22" aria-hidden="true"></a>)</span></code></pre></div>
75
+ <p><img src="https://hackgtstoragebucket.s3.amazonaws.com/proverb_animation.gif" width="404"></p>
76
+ <h2 id="autogpt-alpaca-trader-june-2023">AutoGPT-Alpaca-Trader June 2023</h2>
77
+ <ol type="1">
78
+ <li><p><strong>Innovative Plugin Development</strong>: Spearheaded the design and implementation of a cutting-edge AutoGPT plugin, seamlessly integrating the GPT-4 powered AutoGPT application with Alpaca Trading API to augment algorithmic trading strategies with advanced AI capabilities.</p></li>
79
+ <li><p><strong>API Integration and Security</strong>: Expertly established secure and efficient connections to Alpaca’s Trading API, enabling robust trade execution, account management, and real-time data retrieval functionalities, while ensuring data integrity and compliance with industry best practices.</p></li>
80
+ <li><p><strong>Enhanced Trade Management</strong>: Developed a comprehensive suite of tools for the automated placement, modification, and cancellation of diverse stock and ETF orders, including market, limit, and stop orders, resulting in a streamlined trading experience and improved operational efficiency.</p></li>
81
+ <li><p><strong>Account and Portfolio Management</strong>: Implemented advanced features for real-time monitoring and management of user account details, portfolio positions, and transaction history, delivering a holistic view of financial assets and enhancing user decision-making.</p></li>
82
+ <li><p><strong>Market Data and Risk Management</strong>: Provided traders with access to vital real-time and historical market data, including stock quotes and bar data, as well as corporate action insights, complemented by a robust paper trading environment for strategy testing and risk mitigation.</p></li>
83
+ </ol>
84
+ <p><img src="https://hackgtstoragebucket.s3.amazonaws.com/stocks.gif" width="404"></p>
85
+ <h2 id="autogpt-messages-may-2023">AutoGPT Messages: May 2023</h2>
86
+ <ol type="1">
87
+ <li>Developed the AutoGPT plugin for iMessages, enabling seamless integration with AI-powered messaging across multiple platforms, ensuring user data privacy and security.</li>
88
+ <li>Implemented a Python server backend, allowing the plugin to operate universally while maintaining a dedicated Mac server for core functionalities.</li>
89
+ <li>Streamlined the installation process with cross-platform support, providing detailed instructions for Linux, Mac, Windows, and WSL environments.</li>
90
+ <li>Enhanced user experience by integrating with the iMessage API and providing options for public accessibility using tools like tunnelto and ngrok.</li>
91
+ <li>Designed a user-friendly interface with real-time notifications, customizable settings, and integration capabilities with other communication tools for comprehensive messaging solutions.</li>
92
+ </ol>
93
+ <p><img src="https://hackgtstoragebucket.s3.amazonaws.com/auto.gif" width="704"></p>
94
+ <p><a href="https://github.com/danikhan632/Auto-GPT-Messages-Plugin">Github Page</a></p>
95
+ <h2 id="autogpt-local-infrence-server-may-2023">AutoGPT Local Infrence Server: May 2023</h2>
96
+ <ol type="1">
97
+ <li>Developed the Auto-GPT-Text-Gen-Plugin to enable users to fully customize prompts for integration with locally installed large language models (LLMs), facilitating a shift away from dependency on GPT-4 and GPT 3.5.</li>
98
+ <li>Implemented a robust connection to Text Generation WebUI, serving as an API gateway for various models, which streamlines the process of managing complex configurations and environment settings.</li>
99
+ <li>Provided comprehensive documentation and a step-by-step installation guide, ensuring users can effortlessly download, configure, and utilize the plugin with their specific text generation setup.</li>
100
+ <li>Integrated flexibility for model selection and the ability to tweak generation parameters such as top_p, top_k, and repetition_penalty through environmental variables, enhancing user control over text generation outcomes.</li>
101
+ <li>Encapsulated API interactions and prompt management within the TextGenPluginController class, laying the groundwork for potential future expansions to support multiple APIs, thereby ensuring long-term maintainability and scalability of the plugin.</li>
102
+ </ol>
103
+ <p><a href="https://github.com/danikhan632/Auto-GPT-Text-Gen-Plugin">Github Page</a></p>
104
+ <h2 id="imessages-api-may-2023">iMessages API: May 2023</h2>
105
+ <ol type="1">
106
+ <li>Developed a Flask-based API to interact with iMessage, enabling users to send and retrieve messages as well as fetch recent contacts, enhancing communication automation.</li>
107
+ <li>Implemented secure access to the API by creating a custom decorator function that validates API keys, ensuring secure and authenticated interactions.</li>
108
+ <li>Orchestrated background data synchronization using threading, allowing for real-time updates of messages while maintaining a responsive API service.</li>
109
+ <li>Integrated iMessage reader and AppleScript for seamless message sending and retrieval, showcasing strong cross-technology integration skills.</li>
110
+ <li>Designed a user-friendly setup process, including environment variable configuration and easy-to-follow instructions, improving the accessibility of the API for end users.</li>
111
+ </ol>
112
+ <p><a href="https://github.com/danikhan632/iMessage-API">Github Page</a></p>
113
+ <p><img src="https://hackgtstoragebucket.s3.amazonaws.com/chat.gif" width="704"></p>
114
+ <h2 id="buzzos-january-2023">BuzzOS: January 2023</h2>
115
+ <p>BuzzOS is an Operating System built for the Intel/AMD x86_64 architecture using assembly and Rust. The operating system includes a Graphical User Interface (GUI) and is designed to provide a complete user experience.</p>
116
+ <p>The operating system includes user space and a mechanism for user-level processes to perform system calls to the kernel. This allows users to run applications and perform various tasks on the system.</p>
117
+ <p>BuzzOS also includes drivers for various hardware components, including the keyboard, mouse, timer, disk, and Intel PIC 8259. These drivers enable a robust input experience and ensure that the operating system can communicate effectively with various hardware components.</p>
118
+ <p>In addition to the core operating system functionality, BuzzOS also includes a fully functional desktop interface with games and system apps. This interface provides users with a familiar and intuitive environment for interacting with the operating system.</p>
119
+ <p>Overall, BuzzOS is an impressive project that demonstrates the power and flexibility of modern operating systems. By leveraging assembly and Rust, the project was able to create a complete operating system with a GUI and a range of drivers and applications. This is a significant achievement and represents a valuable contribution to the field of operating systems. <a href="https://github.com/JVKdouk/BuzzOS">Github Page</a></p>
120
+ <h2 id="path-finding-robot-october-2022">Path-finding Robot: October 2022</h2>
121
+ <ul>
122
+ <li>Developed proficiency in Robotics and Computer Vision through implementing the Rapidly-exploring Random Tree (RRT) algorithm, enhancing path planning efficiency in autonomous robotic navigation.</li>
123
+ <li>Leveraged Computer Vision techniques to enable real-time object detection and environment mapping, optimizing robot’s perception and decision-making capabilities.</li>
124
+ <li>Designed and executed algorithms for image processing and feature extraction, significantly improving the accuracy of object recognition in varied lighting and environmental conditions.</li>
125
+ <li>Employed state-of-the-art machine learning models for image captioning, translating visual data into descriptive language, and enhancing human-robot interaction.</li>
126
+ <li>Demonstrated strong problem-solving skills in Robotics by handling exceptions such as VectorTimeoutException, ensuring seamless operation and reliability of robotic systems.</li>
127
+ </ul>
128
+ <p><a href="https://github.com/danikhan632/robot-path-finder">Github Page</a></p>
129
+ <p><img src="https://hackgtstoragebucket.s3.amazonaws.com/path.gif" width="604"></p>
130
+ <h2 id="flutter-tower-defense-game-april-2022">Flutter Tower Defense Game: April 2022</h2>
131
+ <p>Designed and developed a tower defense game using the Flutter framework.</p>
132
+ <ul>
133
+ <li>Implemented game mechanics including tower placement, enemy spawning, and pathfinding using the Dart programming language.</li>
134
+ <li>Utilized Flutter’s built-in animation framework to create smooth and visually appealing animations for tower attacks and enemy movements.</li>
135
+ <li>Integrated Google Firebase for user authentication and cloud storage to save game progress and scores.</li>
136
+ <li>Takes advantage of Flutter’s cross-platform nature, allowing it to run on iOS, Android, Mac, Windows, Linux, and Web.</li>
137
+ <li>Collaborated with a team of developers and designers to ensure timely delivery and a high-quality end product.</li>
138
+ </ul>
139
+ <p><a href="https://github.com/danikhan632/tower_defense_game">Github Page</a></p>
140
+ <p>You can play the game <a href="https://tower-defense-d8435.web.app/#/">here</a>.</p>
141
+ <p><img src="https://hackgtstoragebucket.s3.amazonaws.com/tower_def.gif" width="804"></p>
142
+ <h2 id="covid-vaccine-tracker-february-2021">COVID Vaccine Tracker: February 2021</h2>
143
+ <p>The COVID Vaccine Tracker is a tool for predicting the progress of COVID-19 vaccinations across US states. It uses data from vaccine databases and factors in state population to estimate when each state will reach an 80% vaccination rate. The project was created in March of 2021 but could potentially be modified for use with the Delta variant of COVID-19.</p>
144
+ <p>The model used in the project is based on a logarithmic curve. It provided fairly accurate predictions until the 50% vaccination mark but did not accurately predict the curve going logarithmic at that point. Despite this limitation, the tool still provides valuable insights into the progress of vaccinations across different US states.</p>
145
+ <p><a href="">Github Page</a></p>
146
+ <p>https://github.com/danikhan632/tower_defense_game</p>
147
+ <h2 id="create-c-app-november-2022">Create C++ App: November 2022</h2>
148
+ <p>Create-Cpp-App is a Command Line Interface (CLI) tool that provides an npm-like experience for building C++ applications. The tool is designed to streamline the process of building C++ apps by automating many of the repetitive and time-consuming tasks that developers typically face.</p>
149
+ <p>The tool is built to be intuitive and user-friendly, and it generates makefiles and automatically updates CMake files for a fast and efficient development experience. This allows developers to focus on writing code instead of worrying about the build process.</p>
150
+ <p>Create-Cpp-App also includes a range of built-in testing, address sanitization, benchmarking, and other tools for building production-ready C++ applications. These tools are designed to help developers ensure that their code is of high quality and performance.</p>
151
+ <p>Overall, Create-Cpp-App is an innovative tool that helps simplify the process of building C++ applications. By providing an npm-like experience, the tool makes it easy for developers to get started with building C++ apps and reduces the time and effort required to build high-quality, production-ready applications.</p>
152
+ <p><img src="https://hackgtstoragebucket.s3.amazonaws.com/cpp.gif" width="804"></p>
153
+ <p><a href="https://github.com/danikhan632/create-cpp-app">Github Page</a></p>
154
+ <h2 id="clean-up-crew-october-2022">Clean Up Crew: October 2022</h2>
155
+ <p>Clean Up Crew is a web application that serves as a platform for connecting small communities with local businesses. The application was built using Next.js, MongoDB, AWS S3, Google Maps API, and ReactJS.</p>
156
+ <p>The platform allows users to create and interact with posts in a given area. Users can post about community events, local businesses, and other topics related to their community. The application includes a sorting algorithm based on various factors such as location, user interaction, and other metrics to ensure that the most relevant content is displayed to users.</p>
157
+ <p>The project was developed by a team of programmers who participated in a programming competition. Over a period of 36 hours, the team worked on developing the application and implementing its various features. After the competition, the team was awarded 13th place out of 191 teams, which is a testament to their hard work and the effectiveness of the application they developed.</p>
158
+ <p>Overall, this project represents a valuable contribution to small communities looking to improve their localities and small businesses seeking new opportunities. The platform provides a means for these groups to connect and collaborate, and the sorting algorithm ensures that the most relevant content is displayed to users. By utilizing modern web technologies and APIs, the platform is able to provide a seamless and user-friendly experience for its users.</p>
159
+ <h2 id="self-driving-car-january-2021">Self-Driving-Car: January 2021</h2>
160
+ <p>The Self-Driving Car project is a machine learning project that aims to simulate the behavior of a self-driving car using a Convolutional Neural Network (CNN) and computer vision techniques. The project involves constructing a virtual environment where a car can be driven autonomously using machine learning algorithms.</p>
161
+ <p>The CNN is used to determine the speed and angle of rotation of the simulated vehicle based on data obtained from a virtual camera. The camera captures images of the environment and feeds them into the CNN, which processes the data and outputs a prediction for the vehicle’s next move. The CNN is trained using a dataset of labeled images and their corresponding speed and steering angles.</p>
162
+ <p>To implement the CNN, the project utilizes a number of machine learning libraries, including Tensorflow, Keras, and NumPy. These libraries provide a range of tools for developing, training, and testing machine learning models, as well as tools for processing and analyzing large datasets.</p>
163
+ <p>The project also includes a testing environment where the performance of the self-driving car can be evaluated. This environment allows the user to adjust parameters such as the speed and complexity of the environment, and to observe how the car responds to different scenarios.</p>
164
+ <p>Overall, the Self-Driving Car project represents an exciting application of machine learning and computer vision techniques to the field of autonomous vehicles. By simulating the behavior of a self-driving car in a virtual environment, the project provides a safe and scalable platform for testing and developing new algorithms and techniques for autonomous driving.</p>
165
+ <p><img src="https://hackgtstoragebucket.s3.amazonaws.com/self_car.gif" width="804"></p>
166
+ <p><a href="https://github.com/danikhan632/Self-Driving-Car">Github Page</a></p>
167
+ <h2 id="amazon-shopping-clone-december-2020">Amazon-Shopping Clone: December 2020</h2>
168
+ <p>The Amazon Shopping Clone is a web application built using the MERN stack (MongoDB, Express, React, and Node.js) and Stripe API. It mimics the design and user interface of the Amazon.com website, allowing users to browse and purchase products in a familiar environment.</p>
169
+ <p>One of the key features of the application is its login system, which allows users to create accounts and securely store their personal and payment information. This information is stored using MongoDB, a NoSQL database that provides a flexible and scalable data storage solution.</p>
170
+ <p>In addition to the login system, the application also utilizes the Stripe API to handle transactions in a secure and scalable manner. Stripe is a popular payment processing platform that provides a wide range of features for online businesses, including secure payment processing, subscription management, and fraud detection.</p>
171
+ <p>To ensure a smooth and intuitive user experience, the application implements a design language that closely mimics that of the Amazon.com website. This includes a consistent color scheme, typography, and layout, as well as familiar user interface elements such as navigation menus, search bars, and product listings.</p>
172
+ <p>Overall, the Amazon Shopping Clone provides a robust and scalable platform for online shopping that combines the familiarity and convenience of Amazon.com with the security and scalability of modern web technologies. <a href="https://github.com/danikhan632/tower_defense_game">Github Page</a></p>
173
+ <p>You can access the live demo of the <strong>FakeBlock Shopping</strong> project <a href="https://fakeblock-shopping.web.app/">here</a></p>
174
+ <p><img src="https://hackgtstoragebucket.s3.amazonaws.com/amzn.gif" width="990"></p>
TalentLLM-main/requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ litellm==1.0.0
2
+ requests
3
+ openai==1.0.0
4
+ google-cloud-aiplatform
5
+ gdown
6
+ PyGithub
7
+ gspread
TalentLLM-main/results.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gspread
2
+ from Candidate import JobCandidate
3
+ from typing import List
4
+ # Authenticate with Google Sheets using a service account
5
+ sa = gspread.service_account(filename='service_creds.json')
6
+
7
+ def writeToSheets(candidates: List[JobCandidate]):
8
+ sh = sa.open("Figma_swe")
9
+ new_sheet_title = "Results" # Change this to your desired sheet name
10
+
11
+ # Check if the sheet already exists
12
+ try:
13
+ existing_wks = sh.worksheet(new_sheet_title)
14
+ except gspread.exceptions.WorksheetNotFound:
15
+ existing_wks = None
16
+
17
+ # If the sheet exists, delete it
18
+ if existing_wks:
19
+ sh.del_worksheet(existing_wks)
20
+ new_wks = sh.add_worksheet(title=new_sheet_title, rows="100", cols="10") # Adjust rows and cols as needed
21
+
22
+ data_to_write = [
23
+ [ "Timestamp", "Name", "Email", "Resume Link", "Cover Letter", "LinkedIn", "GitHub", "Personal Website", "Visa Sponsorship", "Disability Status", "Ethnic Background", "Gender", "Military Service" ]
24
+
25
+ ]
26
+
27
+ for candidate in candidates:
28
+ data_row = [
29
+ candidate.timestamp.strftime("%m/%d/%Y %H:%M:%S"),
30
+ candidate.name,
31
+ candidate.email,
32
+ candidate.resume_link,
33
+ candidate.cover_letter,
34
+ candidate.linkedin,
35
+ candidate.github_link,
36
+ candidate.personal_website_link,
37
+ candidate.visa_sponsorship,
38
+ candidate.disability_status,
39
+ candidate.ethnic_background,
40
+ candidate.gender,
41
+ candidate.military_service
42
+ ]
43
+ data_to_write.append(data_row)
44
+
45
+ new_wks.update('A1', data_to_write)
46
+
47
+
48
+ print(f"Data written to '{new_sheet_title}' sheet.")
49
+
50
+
51
+
52
+
53
+
54
+
55
+
56
+
TalentLLM-main/resume_conversation.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai;
2
+ import json, os,sys
3
+ from dotenv import load_dotenv
4
+ load_dotenv()
5
+ from litellm import completion
6
+ from mathpix import extract_text
7
+ import gradio
8
+
9
+ def get_prompt(candidate, chat_history, question):
10
+ return ('Given the details of a candidate, the previous chat history, and a question, answer the question as if you are the candidate. Keep the answers short and to the point.\n'
11
+ + 'Candidate Details:\n\n' + str(candidate) + '\nEnd Candidate Details\n'
12
+ + 'Chat History:\n\n' + chat_history + '\nEnd Chat History\n'
13
+ + 'Question:\n\n' + question + '\nEnd Question\n')
14
+
15
+ def chat_with_candidate(candidate, model = 'chat-bison'):
16
+ chat_history = ''
17
+ print('You are now chatting with ' + candidate.name + '. Type in your question or type QUIT to stop.')
18
+ while True:
19
+ print('User:')
20
+ question = input()
21
+ print()
22
+ if question.strip().upper() == 'QUIT':
23
+ break
24
+ prompt = get_prompt(candidate, chat_history, question)
25
+ messages = [{ 'content': prompt, 'role': 'user'}]
26
+ response = completion(model = model, messages = messages)['choices'][0]['message']['content']
27
+ print('Response:\n' + response + '\n')
28
+ chat_history += 'User:\n' + question + '\nResponse:\n' + response
TalentLLM-main/resume_conversation_interactive.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai;
2
+ import json, os,sys
3
+ from dotenv import load_dotenv
4
+ load_dotenv()
5
+ from litellm import completion
6
+ from mathpix import extract_text
7
+ import gradio as gr
8
+
9
+ model = 'chat-bison'
10
+ resume_location = 'resume_pdfs/1BXAuw6f1rDF05P734y_O7K8fYwgDVZvV.pdf'
11
+ resume_mmd = extract_text((resume_location))
12
+
13
+ def get_prompt(resume_mmd, chat_history, question):
14
+ history = ''
15
+ for user, bot in chat_history:
16
+ history += 'User:\n' + user + '\nResponse:\n' + bot + ' '
17
+ return ('Given the resmue of a candidate, the previous chat history, and a question, answer the question as if you are the candidate. Keep the answers short and to the point.\n'
18
+ + 'Resume:\n\n' + resume_mmd + '\nEnd Resume\n'
19
+ + 'Chat History:\n\n' + history + '\nEnd Chat History\n'
20
+ + 'Question:\n\n' + question + '\nEnd Question\n')
21
+
22
+ def inference(message, history, model = 'gpt-3.5-turbo'):
23
+ try:
24
+ flattened_history = [item for sublist in history for item in sublist]
25
+ full_message = " ".join(flattened_history + [message])
26
+ messages_litellm = [{"role": "user", "content": get_prompt(resume_mmd, history, message)}]
27
+ partial_message = ""
28
+ for chunk in completion(model=model,
29
+ messages=messages_litellm,
30
+ stream=True):
31
+ if 'content' in chunk['choices'][0]['delta']:
32
+ partial_message += chunk['choices'][0]['delta']['content']
33
+ yield partial_message
34
+ except Exception as e:
35
+ print("Exception encountered:", str(e))
36
+ yield f"An Error occured please 'Clear' the error and try your question again"
37
+
38
+ gr.ChatInterface(
39
+ inference,
40
+ chatbot=gr.Chatbot(height=400),
41
+ textbox=gr.Textbox(placeholder="Enter text here...", container=False, scale=5),
42
+ description=f"""
43
+ You are chatting with a resume.""",
44
+ title="Chat with Resume",
45
+ examples=["Introduce yourself."],
46
+ retry_btn="Retry",
47
+ undo_btn="Undo",
48
+ clear_btn="Clear",
49
+ ).queue().launch(share = True)
TalentLLM-main/resume_mmds/[email protected] ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Zaeem Ahmad
2
+
3
+ C: 916-300-3117, E: [email protected]
4
+
5
+ Nationality: USA
6
+
7
+ EDUCATION
8
+
9
+ $2021-2025$
10
+
11
+ Georgia Institute of technology
12
+
13
+ BS, Major: Computer Science
14
+
15
+ Atlanta, Georgia
16
+
17
+ $2017-2020$
18
+
19
+ California Virtual Academy @ Sutter
20
+
21
+ EXPERIENCE
22
+
23
+ Honors: Dean's Honor List for 4 consecutive semesters; CGPA 3.96 / 4.00
24
+
25
+ Roseville, California
26
+
27
+ Aug'23-Present VIP Undergraduate Researcher, Georgia Institute of Technology
28
+
29
+ Atlanta, Georgia
30
+
31
+ - Developed a robust object-oriented programming (OOP) framework with multiple classes and instance methods for substation component modeling, enhancing flexibility and scalability.
32
+
33
+ - Integrated communication protocols to ensure seamless data exchange among diverse components, leading to improved efficiency within smart grid systems.
34
+
35
+ - Conducted comprehensive research on component variables, providing valuable insights that significantly improved the accuracy of co-simulation models and informed data-driven decision-making.
36
+
37
+ - Implemented co-simulation using the HELICS framework, ensuring accurate representation and interaction of components, thus contributing to the project's overall success.
38
+
39
+ May'23-Present Undergraduate Researcher, Georgia Institute of Technology
40
+
41
+ Atlanta, Georgia
42
+
43
+ - Developed and implemented advanced Python-based computational models for cardiac electrophysiology, leading to a $30 \%$ improvement in arrhythmia understanding compared to existing models.
44
+
45
+ - Successfully translated complex MATLAB algorithms to Python, resulting in a $20 \%$ reduction in computation time while maintaining accuracy and reliability.
46
+
47
+ - Integrated high-throughput patch clamp data into the computational models to enhance parameter estimation precision.
48
+
49
+ - Collaborated in the creation of a novel hybrid algorithm, reducing the need for initial assumptions in parameter estimation by $40 \%$, thus significantly enhancing accuracy.
50
+
51
+ Jan'23-Aug'23 Big Data Big Impact CS Intern, Georgia Institute of Technology Atlanta, Georgia
52
+
53
+ - Spearheaded a cross-functional team effort to design and develop the Hurricane Cost Projector, a cutting-edge disaster impact prediction tool.
54
+
55
+ - Employed HTML, CSS, Python, and JavaScript to craft an intuitive and visually appealing front-end interface, ensuring a user-friendly and engaging experience.
56
+
57
+ - Enhanced the tool's effectiveness through the implementation of data visualization features, facilitating customer comprehension and interaction.
58
+
59
+ - Thrived in a dynamic, fast-paced environment, collaborating seamlessly within cross-functional teams to deliver impactful and innovative solutions to drive project success.
60
+
61
+ May'22-Aug'23 BluEast - Technical Intern
62
+
63
+ Lahore, Pakistan
64
+
65
+ - Assisted with PCB design \& embedded programming of an iOS \& Android compatible App, "Autopilot", that is built using JavaScript (Frontend \& Backend frameworks), Python, Ruby, PHP, \& C++
66
+
67
+ - Spearheaded optimization process for algorithm that employs Mevris AI to tweak thermostat of DC inverter air conditioner based on metrics to ensure thermal comfort \& reduce energy consumption by $80 \%$
68
+
69
+ - Developed an automation scheme for test condition parameters that saved configuration time on the system $\&$ resulted in an overall $50 \%$ reduction in validation testing time.
70
+
71
+ Sep'20-Aug'21 Orient Group of Companies - Business Analyst
72
+
73
+ Lahore, Pakistan
74
+
75
+ - Digitized sales network through 'Orient Dost' app that enabled $76+$ team members \& 100+ distributors spread across 16 territories to track performance in real time which led to a $20 \%$ increase in sales.
76
+
77
+ - Automated dispenser production line to enhance efficiency \& replaced steel tanks with aluminum to improve hygienic standards, reduce production cost by $4 \%$, \& achieve $100 \%$ annualized increase in sales .
78
+
79
+ - Led team of 4 to implement employment 'Training Need Program' in SAP ERP to improve productivity of 67 middle management employees who completed 6 different training programs as part of this initiative.
80
+
81
+ \title{
82
+ EXTRA-CURRICULAR
83
+ }
84
+
85
+ - National Champion at National Tournament of Young Math, National STEM Educational Foundation Tournament of Young Mathematicians 2019.
86
+
87
+ - Contestant at International Earth Science Olympiad (2020 event canceled due to COVID-19).
88
+
89
+ - National Champion at National Earth Science Olympiad, National STEM Educational Foundation 2019-2020.
90
+
91
+ SKILLS
92
+
93
+ - Programming Languages - C++, Python, Java, JavaScript, HTML, React.JS, React Native, MATLAB, CSS, Figma, GitHub, Git
94
+
95
+ - $\quad$ Languages - English (Fluent), Urdu (Native), Hindi (Fluent), Punjabi (Intermediate)
TalentLLM-main/resume_mmds/[email protected] ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \title{
2
+ Azeez Ishaqui
3
+ }
4
+
5
+ - Gmail — [email protected] $・$ LinkedIn $\cdot$678-313-6772 $\cdot$ U.S. Citizen
6
+
7
+ \section*{EDUCATION}
8
+
9
+ Georgia Institute of Technology - Atlanta, GA
10
+
11
+ M.S. in Machine Learning, Current Masters student, BS/MS program — GPA: 4.0/4.0
12
+
13
+ Georgia Institute of Technology - Atlanta, GA
14
+
15
+ B.S. in Computer Science (Artificial Intelligence) — GPA: 4.0/4.0
16
+
17
+ Technical Skills: Python, PyTorch, Java, C/C++, Scala, AWS (EC2/STS), Terraform, Docker, SQL, HTML/JS/CSS, Assembly
18
+
19
+ Languages: French (Advanced), English (Native)
20
+
21
+ Relevant Coursework: Machine Learning, Deep Learning, Software Development, Data Structures and Design/Analysis of Algorithms, Probability/Statistics, Linear Algebra, Combinatorics, Discrete Mathematics, Multivariable Calculus, Introductory AI
22
+
23
+ \section*{WORK EXPERIENCE}
24
+
25
+ KPMG CIO Advisory
26
+
27
+ June 2023 - August 2023
28
+
29
+ - Constructed an AI chatbot with full-stack skills including HTML/JS/CSS embedded onto Unqork to manage client database
30
+
31
+ - Leveraged ML tools like Scikit, NumPy, and Pandas to improve Great American Health user experience and customer retention
32
+
33
+ - Implemented a scalable deployment of the chatbot with a Flask API using NGINX reverse proxy and Gunicorn on an $\underline{\text { AWS }}$ EC2 instance, integrated with OpenAI GPT-3 chat functionality on a secure environment with proper user access control
34
+
35
+ - Conducted data analysis, prepared reports, and utilized Excel and data visualization tools to assist executive decision-making
36
+
37
+ - Analyzed comprehensive migration strategies and roadmaps, including assessment of on-premises healthcare infrastructure, application analysis, and cost optimization plans, resulting in efficient and cost-effective cloud deployments within $\underline{\text { GCP }}$
38
+
39
+ ASI (Aggregate Singularity Industries) Cloud/Devops Engineer
40
+
41
+ June 2022 - August 2022
42
+
43
+ - Developed cloud-based AWS services, API integrations with Snyk for compromised repositories security, bucket pruning for performance/cost optimization, and an EC2 discovery service with instance tagging and Prometheus scrape configuration
44
+
45
+ - Created a Slack-integrated service for company website traffic monitoring, containerized on Docker, configured with $\underline{\text { Grafana }}$ and Prometheus to initiate error threshold alerting for company-wide server maintenance, critical for users and employees alike
46
+
47
+ - Provisioned and managed resources on Terraform, including load balancers, access keys, and customized port configurations for efficient traffic handling, while maintaining domain server privacy for developers
48
+
49
+ - Utilized EC2/STS services and Terraform resource provisioning to administer an AWS replication service that safeguarded server functionality during internal network failures and complete outages
50
+
51
+ Assurant Inc. Team Project Lead \& ML System Designer
52
+
53
+ January 2023 - Present
54
+
55
+ - Automated data ingestion scraping FRED macro-economics handled on Python for a housing price prediction model with metro-Atlanta data with pre-processing techniques and supervised learning including multivariate time-series analysis and $\underline{\mathbf{K}-}$ nearest neighbors to minimize loss and reduce overfitting
56
+
57
+ - Designed and co-implemented an $\underline{\text { ML pipeline with Assurant engineers focusing on precision and efficiency within AWS }}$
58
+
59
+ Cognira (Retail/Analytics Software Company) Software Development Intern
60
+
61
+ June 2021 - August 2021
62
+
63
+ - Administered a web server which ingested client data and performed several forms of data optimization analysis using Scala
64
+
65
+ - Performed server testing using an external software client and Docker containers for application isolation and portability
66
+
67
+ Eye Consultants of Atlanta Data Analytics Intern - Eye Consultants of Atlanta
68
+
69
+ January 2021 - February 2021
70
+
71
+ \section*{LEADERSHIP/PUBLICATIONS}
72
+
73
+ Founder of "Charity 4 All", a non-profit organization - Charity 4 All Solo Project
74
+
75
+ March 2017 - Present
76
+
77
+ - Publications: Atlanta Journal Constitution - $\underline{\mathrm{AJC}}$
78
+
79
+ - Fundraised over $\$ 2500$ through GoFundMe, leveraged social media and personal network to spread awareness of the nonprofit, used surplus money raised to provide pencils, notebooks, textbooks, to support over twenty students' education
80
+
81
+ - Facilitated international logistics to ship and hand-deliver 2000+ masks to several different Atlanta Homeless Shelters
82
+
83
+ - Invigorated local economy by providing carts and sewing machines to needy vendors and widows, annually distributed food staples and durable goods during Ramadan to impoverished families with limited access after fasting
TalentLLM-main/resume_mmds/[email protected] ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \title{
2
+ Daniyal Hussain
3
+ }
4
+
5
+ New York, NY | 347-658-6214 | [email protected] |
6
+
7
+ www.daniy.al | linkedin.com/in/hussain-daniyal/ | github.com/tuffstuff9 |
8
+
9
+ EDUCATION
10
+
11
+ New York University, College of Arts and Sciences, New York, NY
12
+
13
+ Bachelor of Arts in Computer Science and Data Science
14
+
15
+ Graduated: May 2023
16
+
17
+ Cumulative GPA: 3.5
18
+
19
+ Technical Skills
20
+
21
+ Languages: TypeScript, JavaScript, C\#, Java, Python, R, HTML/CSS, SQL
22
+
23
+ Frameworks/Technologies: React, Next.js, PlanetScale, TensorFlow, PyTorch, Hadoop, Maven, WinUI 3, Git, Jira, Figma, REST API, Postman, Node.js, Agile/Scrum, AWS, Azure, Google Cloud, Jest, Mocha, Chai, Docker
24
+
25
+ \section*{Professional Experience}
26
+
27
+ Deloitte
28
+
29
+ June 2022 - August 2022
30
+
31
+ Software Engineer Intern
32
+
33
+ Remote
34
+
35
+ - Engineered SPA and SQL database for Ministry of Mining's user/business accounts with robust authentication
36
+
37
+ - Implemented page wireframes by designing front-end using JavaScript and OutSystems with optimized UX
38
+
39
+ - Delivered functional prototypes to enable client evaluations and feedback on user experience and design aspects
40
+
41
+ Berry Mount
42
+
43
+ June 2021 - January 2022
44
+
45
+ Software Engineer
46
+
47
+ Remote
48
+
49
+ - Overhauled company ERP and CRM software from excel spreadsheet/FocusERP hybrid-system to Odoo software
50
+
51
+ - Automated and trained team on invoice tracking, payment deposits, inventory management, vendor/client reports
52
+
53
+ - Integrated missing invoice tracker and mitigated inventory loss, restoring $\$ 4,000-\$ 10,000$ in business transactions
54
+
55
+ USME Supply Lines Trading
56
+
57
+ June 2020 - August 2020
58
+
59
+ Business Analyst Intern
60
+
61
+ Los Angeles, CA
62
+
63
+ - Constructed an internal program to produce pricing quotes, increasing quote frequency from weekly to biweekly
64
+
65
+ - Sourced new freight partners to reduce shipment costs, increasing weekly operational profitability by $\$ 1,000-\$ 3,000$
66
+
67
+ - Conducted cost-benefit analyses for business expansion, leading to the successful launch of two new products
68
+
69
+ University of California Los Angeles $\quad$ June 2018 - July 2018
70
+
71
+ Research Assistant
72
+
73
+ Los Angeles, $C A$
74
+
75
+ - Streamlined manual data retrieval process from NIST to study effects of cryogenic cooling on hydrogen fuel
76
+
77
+ - Scraped database using Python and VBA with variable input parameters to generate error reports for models
78
+
79
+ Projects
80
+
81
+ wandy.ai - SaaS AI Model Tuner | TypeScript, React, Next.js, ClerkJS, PlanetScale, Drizzle, Zod, Stripe
82
+
83
+ - Engineered full-stack solution for real-time AI model tuning \& comparison, prompt generator, and training on documents
84
+
85
+ SlideResume.com - SaaS cover letter generator | TypeScript, React, Next.js, ClerkJS, PlanetScale, Drizzle, Zod, Stripe
86
+
87
+ - Produced a user-centric SaaS web app which automates personalized cover letter generation from uploaded resume
88
+
89
+ Leetcode AI Assistant - Chrome Extension | JavaScript, React, Chrome MV3
90
+
91
+ - Designed and built a Chrome extension which acts as an automated flashcard system for LeetCode, with AI assistance
92
+
93
+ - Leveraged latest Chrome APIs (MV3) to incorporate efficient DOM manipulation and optimize performance
94
+
95
+ KeyboardHookLite $\mid$ C\#
96
+
97
+ - Developed a low-level keyboard hook library for modern UI frameworks, with unmanaged code disposal
98
+
99
+ - Published on Nuget with over $3000+$ user downloads and on GitHub with detailed documentation
100
+
101
+ NYU Course Tracker | JavaScript, Puppeteer, Telegram
102
+
103
+ - Utilized headless browser to scrape NYU course status and alert users in real-time for their class openings
104
+
105
+ - Bypassed session expiry using stealth features to avoid detection by randomly manipulating user agent
106
+
107
+ Other Projects: nextjs-pdf-parser, PlanetScale + Drizzle Boilerplate, OCR AI Assistant (Chrome Extension), Visualizer.coffee Shot Downloader (Website Scraper), ChatGPT Clipper (Chrome Extension), React Blackjack (Web App)
TalentLLM-main/resume_mmds/[email protected] ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \title{
2
+ Daniyal Khan
3
+ }
4
+
5
+ O danikhan632 | in daniyalmkhan/ | A
6
+
7
+ [email protected] | US Citizen | www.daniyalkhan.dev/
8
+
9
+ EDUCATION
10
+
11
+ Georgia Institute of Technology
12
+
13
+ B.S. Computer Science
14
+
15
+ Expected December 2023
16
+
17
+ High Honors
18
+
19
+ Concentrations: Intelligence/AI and Systems and Architecture
20
+
21
+ Relevant Coursework: Agile Development, Artificial Intelligence, Advanced Algorithms and
22
+
23
+ Data Structures, Robotics and Perception, Computer Architecture, Circuit Design Lab
24
+
25
+ EXPERIENCE
26
+
27
+ OpenAI
28
+
29
+ July 2023-
30
+
31
+ Open Source Contributor SWE
32
+
33
+ Triton Project
34
+
35
+ - Implemented an LLVM backend interface using $\mathrm{C}++$ to facilitate the conversion between different LLVM dialects, bringing Triton to Apple Silicon Macs.
36
+
37
+ - Combined $\mathrm{C}++$ expertise with SPIRV-Cross to create an integrated cross-compiling toolchain, enabling efficient conversion from diverse LLVM-Dialects to Metal Shader Language
38
+
39
+ - Architected and developed a JIT compiler for GPU Metal kernels within Triton, harnessing the power of C++ and SPIRVCross, resulting in a remarkable $70 \%+$ speed increase in large matrix multiplication tasks on Apple Silicon Macs.
40
+
41
+ Microsoft- Guidance Project
42
+
43
+ May 2023 - July 2023
44
+
45
+ Open Source Contributor SWE
46
+
47
+ - Architected and implemented LLM support system with more efficient memory and disk-swapping for larger LLMS
48
+
49
+ - Harnessing the power of Token and Regex processors, led the orchestration of hundreds of LLMs to generate structured, schemaful data, boosting accuracy from $63.04 \%$ to $76.01 \%$.
50
+
51
+ - Transformed the token generation process by offloading operations to integrated REST API.
52
+
53
+ NCR Software Engineering
54
+
55
+ May 2022 - August 2022
56
+
57
+ Software Engineering Intern
58
+
59
+ Atlanta, Georgia
60
+
61
+ - Led the creation of an internal debugging tool, facilitating real-time monitoring and management of MQTT messages
62
+
63
+ - Designed and implemented a dynamic frontend using React, deeply integrating TypeScript and Redux to ensure a seamless user experience and efficient state management.
64
+
65
+ - Mastered the intricacies of SQL to ensure optimal logging, storage, and retrieval of MQTT messages, enhancing system responsiveness and reliability.
66
+
67
+ - Pioneered a custom TreeSet data structure, optimizing data modification and retrieval processes
68
+
69
+ PROJECTS
70
+
71
+ Auto-GPT
72
+
73
+ 2023
74
+
75
+ Machine Learning Engineer
76
+
77
+ AI-based Financial Trading System
78
+
79
+ - Architected an AI system enabling agents to autonomously trade stocks by analyzing real-time financial data and trends.
80
+
81
+ - Seamlessly integrated AI with Apple's iMessage system, empowering the agent to craft contextually accurate text responses
82
+
83
+ - Pioneered an AI-driven task management system via Todoist integration, resulting in notable increases in user productivity.
84
+
85
+ iMessage API
86
+
87
+ Software Developer
88
+
89
+ Chatbot Integration Platform
90
+
91
+ - Developed a Flask Python API to bridge Apple's iMessage with external devices, enabling applications such as chatbots.
92
+
93
+ - Automated iCloud contact synchronization via the API, providing real-time contact updates to all users.
94
+
95
+ BuzzOS
96
+
97
+ 2023
98
+
99
+ System Architect
100
+
101
+ Monolithic x86 Operating System
102
+
103
+ - Spearheaded the architecture of a Rust-based Kernel Driver tailored for efficient graphics rendering.
104
+
105
+ - Designed and integrated user libraries that facilitated system calls from userspace processes.
106
+
107
+ - Broadened hardware support by crafting specific drivers for vital components, including keyboard, mouse, and timer.
108
+
109
+ Clean Up Crew
110
+
111
+ Full-Stack Developer
112
+
113
+ Community Improvement Platform
114
+
115
+ - Conceptualized and developed a crowdsourcing platform, bridging communities with contractors for targeted improvements.
116
+
117
+ - Employed a robust tech stack, integrating Next.js and MongoDB to design a fluid and responsive UI
118
+
119
+ - Pioneered an intuitive posting mechanism, strategically prioritizing community issues based on user engagement metrics and location data.
120
+
121
+ Create-Cpp-App: $\mathbf{n p m}$ for $\mathbf{C}++$
122
+
123
+ System Architect
124
+
125
+ - Build an inuitive CLI tool for building C ++ apps with an npm experience
126
+
127
+ - generates makeg fles and automatically updates CMake files for fast developer experience
128
+
129
+ - includes built-in testing, address sanitization, benchmarking and other tools for building production-ready $\mathrm{C}++$ apps
130
+
131
+ Programming languages: Rust, C++, TypeScript, Go, C++, Dart, Python, Java, C\#, SQL, Bash, JavaScript, HTML, CSS
132
+
133
+ Frameworks Software: Flutter, Axios, Flask, Docker, LLVM, CUDA, Pytorch, React, Springboot, Maven, React Native
TalentLLM-main/resume_mmds/[email protected] ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \title{
2
+ Angelina Lee
3
+ }
4
+
5
+ 415-812-3329 | $\underline{\text { [email protected] | linkedin.com/in/AngelinaLee | github.com/AngelinaTheDev }}$
6
+
7
+ \section*{EDUCATION}
8
+
9
+ University of California Berkeley
10
+
11
+ B.S. in Computer Science
12
+ Berkley, California
13
+
14
+ Aug. 2014 - May 2018
15
+
16
+ EXPERIENCE
17
+
18
+ Senior Full Stack Engineer
19
+
20
+ Instagram
21
+ Sep. 2020 - Present
22
+
23
+ San Francisco, $C A$
24
+
25
+ - Spearheaded the development of a revolutionary news feed infrastructure utilizing React for AI on Blockchain, transforming user interaction and content delivery.
26
+
27
+ - Pioneered the HyperQuantum Feed Algorithm using server-side React Larceny AI, optimizing web app performance and resolving big data pipeline issues in record time.
28
+
29
+ - Solved complex scalability issues by employing the use of useless architectures and microservices, orchestrated through the use of Quantum Kubernetes (QK8s).
30
+
31
+ - Led a dynamic team of 6 engineers in a covert operation to mine Ethereum on company servers, generating unprecedented profits and resources for $R \& D$.
32
+
33
+ \begin{tabular}{lr}
34
+ Senior Full Stack Engineer & May 2018 - August 2020 \\
35
+ Zillow & Seattle, WA
36
+ \end{tabular}
37
+
38
+ - Implemented advanced AI optimization techniques to the GraphQL API, achieving a $69 \%$ improvement in page load times and significantly enhancing user experience.
39
+
40
+ - Conducted user research to assess the impact of gamified elements in property listings, resulting in valuable insights for future user interface enhancements.
41
+
42
+ - Played a vital role in the development process by contributing over 50,000 lines of clean, maintainable code to an established codebase via Git.
43
+
44
+ - Organized and facilitated team-building activities, including a company-wide potato sack race, which fostered a sense of camaraderie and improved team cohesion.
45
+
46
+ - Embraced the critical role of Team Coffee Alchemist, ensuring the team of 6 was fully caffeinated with ultra-rare Antarctican coffee beans, ground to an optimal 14nm particle size using proprietary grinding technology.
47
+
48
+ Software Engineer Intern
49
+
50
+ Microsoft
51
+ May 2016 - Aug 2016
52
+
53
+ New York, NY
54
+
55
+ - Developed and optimized SQL queries and stored procedures, resulting in a $40 \%$ improvement in database performance for a critical customer-facing application.
56
+
57
+ - Contributed to the backend development of a new feature using Spring Boot, adhering to microservices architecture principles for enhanced modularity and scalability.
58
+
59
+ - Spearheaded initiative that enabled the dispersion of acquired immunodeficiency syndrome across the platform team
60
+
61
+ - Collaborated with cross-functional teams to understand requirements and deliver high-quality software solutions within agile development sprints.
62
+
63
+ \section*{Projects}
64
+
65
+ Gitlytics | Python, Flask, React, PostgreSQL, Docker
66
+
67
+ June 2020 - Present
68
+
69
+ - Developed a full-stack web application using with Flask serving a REST API with React as the frontend
70
+
71
+ - Implemented GitHub OAuth to get data from user's repositories
72
+
73
+ - Visualized GitHub data to show collaboration
74
+
75
+ - Used Celery and Redis for asynchronous tasks
76
+
77
+ Technical Skills
78
+
79
+ Languages: Java, Python, C/C++, SQL (Postgres), JavaScript, HTML/CSS, R
80
+
81
+ Frameworks: React, Node.js, Flask, JUnit, WordPress, Material-UI, FastAPI
82
+
83
+ Developer Tools: Git, Docker, TravisCI, Google Cloud Platform, VS Code, Visual Studio, PyCharm, IntelliJ, Eclipse
84
+
85
+ Libraries: pandas, NumPy, Matplotlib
TalentLLM-main/resume_mmds/[email protected] ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Programming Languages and Frameworks: Java, Next.js, React.js, JavaScript, HTML \& CSS, Node.js, Python, C, C\#, C++, SpringBoot, PostgreSQL, SQL, MongoDB, Swift, SwiftUI, Bun, Git, GCP
2
+
3
+ Natural Languages: English, Belorussian, Russian, Ukrainian
4
+
5
+ EDUCATION
6
+
7
+ Georgia Institute of Technology
8
+
9
+ BS Computer Science | Systems Architecture, Internetworks, GPA: College 3.64
10
+
11
+ Southern Polytechnic University
12
+
13
+ BS Computer Engineering |Honors College, GPA: 3.7
14
+
15
+ Relevant Courses:
16
+
17
+ Data Structures \& Algorithms, Objects \& Design, Design \& Analysis-Algorithms
18
+
19
+ Computer Organization \& Programming, Systems and Networks, Design of Operating Systems
20
+
21
+ Database Systems, Advanced/High Perform Computer Architecture, Computer Networking, Processor Design
22
+
23
+ EXPERIENCE
24
+
25
+ Bezalel Studio (B)
26
+
27
+ Lead Developer 83 Director
28
+
29
+ - Manage a team to offer website services, branding, SEO \& SMM, Mobile and Computer Applications.
30
+
31
+ - Handle discussions with clients to understand requirements and business specifications while guiding the team.
32
+
33
+ - Designed and implemented a unified payment portal that integrates Zelle, Paypal, ApplePay, Credit Card, and Venmo, reducing third-party transaction costs by $76 \%$.
34
+
35
+ Georgia Tech VIP Program | Stadium Internet of People and Things
36
+
37
+ Team Lead - App Developer and Researcher fanplay.tech
38
+
39
+ - VIP team targeted at enhancing Georgia Tech sports fans' game-day experience and ensuring their security by enabling their mobile devices to access innovative infotainment and venue-related information.
40
+
41
+ - Research focused on integration and development for the iOS Application.
42
+
43
+ BMI Inc.
44
+
45
+ Information Technology Web Developer
46
+
47
+ - Responsible for the electronic necessities in running the non-profit 501 (c)3.
48
+
49
+ - Created Dynamic systems for donation processing, custom email marketing campaigns, maintained servers.
50
+
51
+ - Increased online donations by $83 \%$ by redesigning the donation portal and improving $\mathrm{U} / \mathrm{X}$.
52
+
53
+ KJ Web \& Productions
54
+
55
+ Founding Director
56
+
57
+ Cumming, GA
58
+
59
+ April 2015 - September 2018
60
+
61
+ - Created 3-D Animations and Graphics for Jumbotron applications.
62
+
63
+ - Managed to increase the price per second from $\$ 5$ to $\$ 18$ per second by introducing modular reuse design within Cinema4D sets.
64
+
65
+ \title{
66
+ PROJECTS
67
+ }
68
+
69
+ Adapt Booking React, PostgreSQL, SpringBoot, Java
70
+
71
+ book.chanceyouth.org
72
+
73
+ Created a web based application that allowed event managers to have a system that could dynamically adjust during registration. Currently, working on bots to automate the check-in process.
74
+
75
+ NFC ATM React, Node.js
76
+
77
+ Application that runs on an ATM machine that is configured to NFC 13.5GHz. Point system used for children events to imitate an economy with working VISA cards.
78
+
79
+ Tale - Purposeful Management Express Node.js Framework, HTML, CSS, JavaScript
80
+
81
+ Simple yet powerful team management web-app that focused on binary emulated sub-team collaboration.
82
+
83
+ LEADERSHIP \& ACTIVITIES
84
+
85
+ Chance Youth @chance.youth, Ukraine, Poland, Belarus, United States
86
+
87
+ chanceyouth.org
88
+
89
+ Co-founded Project Chance to empower young people that have a heart for compassion, to travel overseas to conduct camps for orphans. The project focuses on sharing the Love of Christ with the unfortunate outcasts of society. This positive movement has grown to help bring financial, emotional, and spiritual comfort to hundreds of people year-round.
90
+
91
+ GT iOS club SwiftUI, Firebase, Auth, Firestore, Realtime
92
+
93
+ Working on app "GT Collaboration"
94
+
95
+ Assignments: $\bullet$ Integrate 2FA $\bullet$ Smart Search $\bullet$ Swift Charts $\bullet$ UI Design (Figma)
96
+
97
+ Startup LAB Georgia Tech Evidence-Based Entrepreneurship
98
+
99
+ create-x.gatech.edu
100
+
101
+ GT WebDev club Active member developing modern-web applications / (Spring Project - "Punchshot")
TalentLLM-main/resume_mmds/[email protected] ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \title{
2
+ Zeyad Al Sakhi
3
+ }
4
+
5
+ ( github.com/zeyadtmi | in linkedin.com/in/zeyadalsakhi | $\boldsymbol{\text { [email protected] | }}$ +1 4049553707| Atlanta, GA EDUCATION
6
+
7
+ Georgia Institute of Technology
8
+
9
+ B.S. Computer Science - Bahrain Crown Prince International Scholarship
10
+
11
+ Aug 2021 - May 2024
12
+
13
+ Concentrations: Computer Systems Architecture and Information Internetworks
14
+
15
+ TeChnical Skills
16
+
17
+ Software languages: $\mathrm{C}, \mathrm{C}++$, Python, Java, Assembly, VHDL
18
+
19
+ Web Technologies: React, HTML, CSS, JavaScript
20
+
21
+ ML/AI: Numpy, Pandas, ScikitLearn, Matplotlib
22
+
23
+ Other: MySQL, Git, LateX, Android Studio, Agile, Jira
24
+
25
+ Relevant Coursework: Advanced Computer Architecture, Processor Design, Operating Systems, Machine Learning, Artificial Intelligence, Advanced Algorithms and Data Structures, Networking, OOP, Algorithmic Design, Databases
26
+
27
+ EXPERIENCE
28
+
29
+ Investcorp
30
+
31
+ Security Engineer
32
+
33
+ June 2023 - August 2023
34
+
35
+ Bahrain
36
+
37
+ - Developed an Outlook Metadata Anomaly Detector using Azure Synapse Analytics and PySpark for data processing and analysis, flagging suspicious email activities.
38
+
39
+ - Analyzed 500,000+ email meta entries, identifying $0.2 \%$ as potential threats based on features like excessive BCC to external domains, irregular email timings, and unusual recipient interactions.
40
+
41
+ - Conducted broad scanning and vulnerability analysis, investigating source and destination IPs and port numbers, improving vulnerability detection by $25 \%$ and enhancing network security.
42
+
43
+ - Designed a Ransomware Incident Handling Playbook, outlining procedural guidelines and response strategies to efficiently address potential ransomware attacks, reducing response time by $40 \%$.
44
+
45
+ Bapco Energies
46
+
47
+ May 2023 - June 2023
48
+
49
+ Cyber Security Trainee
50
+
51
+ Awali, Southern Governorate, Bahrain
52
+
53
+ - Managed SOC incident handling using Microsoft Sentinel and Microsoft Defender Security, streamlining threat detection and mitigation processes, resulting in a $20 \%$ faster response to incidents.
54
+
55
+ - Developed a web scraper to extract security-related keywords from Google articles, enhancing employee awareness and reducing security-related incidents by $15 \%$.
56
+
57
+ - Employed Azure Sentinel and KQL for real-time data extraction and analysis from security logs, enabling prompt threat detection and response, analyzing 100+ events daily.
58
+
59
+ Georgia Tech Vertically Integrated Program
60
+
61
+ Machine Learning Research Team Lead
62
+
63
+ Aug 2022 - Present
64
+
65
+ Atlanta, Georgia
66
+
67
+ - Led the largest research subteam, engineering and deploying SVM, K-Nearest Neighbor, and YOLOv8 CNN models, achieving a 95\% accuracy in classifying microscopic 3D print images into defective and non-defective categories.
68
+
69
+ - Employed feature extraction techniques including Histogram of Oriented Gradients (HOG), enhancing model performance and reducing false negatives by $20 \%$, critical for early defect detection.
70
+
71
+ - Conducted rigorous iterative testing, model tuning, and data augmentation strategies, improving classification accuracy by $15 \%$, significantly elevating the quality control standards in $3 \mathrm{D}$ printing processes.
72
+
73
+ PROJECTS
74
+
75
+ \section*{CryptoLearn}
76
+
77
+ - Developed a live crypto and stocks tracker using Binance and Yahoo Finance APIs, providing real-time data on prices across $10+$ currencies, tracking $200+$ daily trades, and monitoring $50+$ assets' market metrics.
78
+
79
+ - Created a financial dashboard with matplotlib and CryptoCompare API, analyzing 100+ cryptocurrencies' returns over various periods, and rendering data visualizations for enhanced investment insights.
80
+
81
+ \section*{DayMaker}
82
+
83
+ - Developed DayMaker, streamlining event and deadline tracking by processing uploaded documents with OCR and NLP, and syncing extracted dates to Google Calendar, automating 500+ event entries.
84
+
85
+ - Implemented secure authentication and authorization for Google Calendar and NLP Google Cloud APIs access, enhancing system security.
86
+
87
+ - Orchestrated OAuth 2.0 Protocol for secure Google Calendar API requests, ensuring accurate data synchronization.
88
+
89
+ - Achieved 1st place in the Google Cloud Category at HackTX hackathon.
90
+
91
+ LEADERSHIP
92
+
93
+ Undegraduate Research Ambassadors
94
+
95
+ Computer Science Research Ambassador
96
+
97
+ August 2023 - Present
98
+
99
+ Atlanta, Georgia
100
+
101
+ - Spearheaded undergraduate research initiatives, boosting engagement by $30 \%$ with the involvement of over 200 students.
102
+
103
+ - Offered personalized guidance through one-on-one sessions with $50+$ freshmen and sophomores, aiding their integration into $20+$ ongoing research projects.
104
+
105
+ Bahrain Crown Prince International Scholarship Program Alumni Association $\quad$ Jul 2021 - Present Academic Mentor
106
+
107
+ - Mentored 30+ candidates in the Crown Prince International Scholarship Program (CPISP), aiding in their academic and career advancements.
108
+
109
+ - Conducted 50+ individual remote consultations, providing tailored advice and fostering a supportive environment.
TalentLLM-main/resume_mmds/[email protected] ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \title{
2
+ Nabeel Hossain
3
+ }
4
+
5
+ \author{
6
+ [email protected] linkedin.com/in/nabeel-hossain
7
+ }
8
+
9
+ Education
10
+
11
+ Georgia Institute of Technology
12
+
13
+ Expected May 2025
14
+
15
+ Master of Science in Computer Science, Machine Learning
16
+
17
+ Georgia Institute of Technology
18
+
19
+ Expected May 2024
20
+
21
+ Bachelor of Science in Computer Science, Intelligence and System Architecture
22
+
23
+ GPA: 4.0
24
+
25
+ \section*{Experience}
26
+
27
+ Teaching Assistant, Georgia Tech College of Computing - Atlanta, GA
28
+
29
+ - Debugged and graded assignments in C using GDB. Provided comprehensive feedback to significantly enhance students' understanding of systems level programming and problem solving strategy.
30
+
31
+ - Facilitated lab sessions to review and answer questions on fundamental course topics using slides, example problems, and hands on demonstrations to over 40 students weekly.
32
+
33
+ - Provided exceptional support and guidance to students in-person, virtually, and via discussion forums, promptly addressing their questions and concerns to create a highly conducive learning environment.
34
+
35
+ - Improved efficiency in debugging, testing, and grading by a factor of 5 using python and Linux shell tools.
36
+
37
+ Bits of Good, Georgia Tech College of Computing - Atlanta, GA
38
+
39
+ Sept 2022 - Present
40
+
41
+ - Developed, performed code reviews, and organized technical leadership for nonprofit development
42
+
43
+ - Collaborated within a 9-member agile team structure, working closely with an Engineering Manager, a Product Manager, and a team of 4 developers to deliver high-quality projects
44
+
45
+ - Directed project development by creating sprint tickets, prioritizing tasks and ensuring timely progress
46
+
47
+ - Demonstrated strong technical expertise by reviewing pull requests, providing constructive feedback to fellow developers, and resolving merge conflicts promptly and effectively
48
+
49
+ Projects
50
+
51
+ Customer Churn Prediction
52
+
53
+ - Predictive model for judging attrition rates for credit card customers given various features
54
+
55
+ May 2023 - July 2023
56
+
57
+ - Collaborated with and coordinated tasks on 5-person team; utilized popular python data science and visualization libraries, such as NumPy, Pandas, PyPlot, and SKLearn
58
+
59
+ - Conducted thorough data exploration, feature selection, and class balancing on dataset of 10,000 customers
60
+
61
+ - Achieved 0.96 weighted f-1, 0.90 minority class recall after rigorous, documented model tuning and selection.
62
+
63
+ Earthcraft
64
+
65
+ Sept 2022 - Apr 2023
66
+
67
+ - Developed a NextJS 12 application backed by MongoDB and Azure Blob Storage, efficiently organizing a digital library of over 1,000 sustainable building standards to be taggable, searchable
68
+
69
+ - Implemented a comprehensive solution for creating, searching, organizing, editing, and sharing standards, enabling clients to organize their information 3 times faster than before
70
+
71
+ - Designed the innovative Report Builder Feature, allowing engineers to seamlessly generate project reports by combining multiple standards, images, and user notes. Accelerated productivity with one-link PDF sharing.
72
+
73
+ - Utilized SWR for responsive UI updates and Server-Side Rendering for optimized loading speed.
74
+
75
+ CleanUpCrew
76
+
77
+ Apr 2020 - June 2020
78
+
79
+ - Developed a dynamic decentralized crowdfunding platform using NextJS, MongoDB, and Google Maps API.
80
+
81
+ - Used aggregated geospatial data to design and implement a unique algorithm that can identify posts with the greatest potential community impact to prioritize to viewers
82
+
83
+ - Crafted a sleek and responsive user interface, ensuring seamless user experience across various devices.
84
+
85
+ - Coordinated frontend and backend tasks within a lean team of four, successfully delivering the project within an impressive 36-hour timeframe.
86
+
87
+ Skills
88
+
89
+ Languages: Python, Java, JavaScript, TypeScript, C, C++, HTML/CSS
90
+
91
+ Frameworks: ReactJS, Node.js, Express.js, Next.js, Flask, SCSS, jQuery, Mongoose.js
92
+
93
+ Technologies: SQL, MongoDB, LATEX, NumPy, Matplotlib, Pandas, Git, Linux and Windows CLI
94
+
95
+ Coursework: Data Structures and Algorithms, Design and Analysis of Algorithms, Systems and Networks, Operating System Design, Machine Learning, Processor Design, Advanced Computer Organization
TalentLLM-main/resume_mmds/[email protected] ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ \title{
2
+ Prama Yudhistira
3
+ }
4
+
5
+ 470-529-4451 | [email protected] | linkedin.com/in/pramayudhistira | github.com/PramaYudhistira
6
+
7
+ \section*{EDUCATION}
8
+
9
+ Georgia Institute of Technology
10
+
11
+ Atlanta, GA
12
+
13
+ Bachelor of Science in Computer Science
14
+
15
+ - Concentrations: Intelligence and Information Internetworks
16
+
17
+ - Relevant Coursework: Design \& Analysis of Algorithms, Data Structures \& Algorithms, Database Systems, Computer Organization \& Programming, Discrete Mathematics, Applied Combinatorics, Linear Algebra
18
+
19
+ - Organizations: Big Data Big Impact @ Georgia Tech, Grand Challenges Living Learning Community, Indonesian Students Association, Phi Sigma Kappa Fraternity, RoboJackets
20
+
21
+ \section*{EXPERIENCE}
22
+
23
+ Incoming Database Development Intern | Full-Time
24
+
25
+ WebstaurantStore by Clark Associates Inc.
26
+
27
+ - The position will begin in May 2024
28
+
29
+ Software Developer | Part-Time
30
+
31
+ AI-based Discovery and Innovation VIP
32
+
33
+ - Working with Dr. Ali Adibi and Emory University researchers to enhance medical imaging with Computer Vision
34
+ May 2024 - Aug. 2024
35
+
36
+ Lititz, $P A$
37
+
38
+ Aug. 2023 - Present
39
+
40
+ Atlanta, GA
41
+
42
+ May 2023 - Aug. 2023
43
+
44
+ Atlanta, GA
45
+
46
+ Incident $I Q$
47
+
48
+ - Contributed to a scalable software solution that serves a userbase of over 10,000,000 across 1,000 school districts
49
+
50
+ - Reduced the backlog by $10 \%$ within the first 2 weeks of joining by resolving critical bugs, quickly adapting to the software's robust stack comprising ASP.NET, AngularJS, and SQLServer
51
+
52
+ - Increased team velocity by $25 \%$ by utilizing Azure DevOps to streamline CI/CD pipelines and enhancing new feature sets in an Agile environment
53
+
54
+ - Leveraged Azure Data Studio for database manipulation to diagnose database issues, leading to the resolution of over 50 bugs throughout the internship
55
+
56
+ - Created over 80 Jest Unit tests ensuring seamless migration from AngularJS to React
57
+
58
+ Software Developer - Team Lead | Part-Time
59
+
60
+ Aug. 2022 - Dec. 2022
61
+
62
+ Gaming for Electric Power Grids VIP
63
+
64
+ Atlanta, GA
65
+
66
+ - Led a team of 5 using Agile methodologies to rapidly develop and iterate a project in Unity in the programming and implementation team
67
+
68
+ - Designed an accurate in-game wildfire algorithm with $85 \%$ accuracy based on expert feedback
69
+
70
+ - Optimized scripts of game objects in C\#, ensuring a more efficient simulation, and created over 10 new features
71
+
72
+ - Utilized player data to train a machine learning algorithm, giving engineers strategies on actions to take in real disaster scenarios
73
+
74
+ \section*{Projects}
75
+
76
+ SideHustleApp | C\#, SQL, Javascript, Angular, ASP.NET, Heroku, REST, Git
77
+
78
+ June 2023 - Present
79
+
80
+ An amalgamation of Craigslist and OfferUp tailored for the college student demographic
81
+
82
+ - Developed a full-stack web application with ASP.NET for the RESTful API and Angular for the frontend
83
+
84
+ - Created user authentication services to ensure secure access to the application
85
+
86
+ - Managed a complex database schema using SQLite with a code-first approach using Entity Framework
87
+
88
+ - Designed unit tests using xUnit and Jest to ensure quality of software
89
+
90
+ Hurricane Cost Predictor | Python, Javascript, React, Flask, D3.js, REST, Git Aug. 2022 - May 2023
91
+
92
+ Big Data Big Impact Data Vis and Platform Sub-Team
93
+
94
+ - Developed a project that predicts damage costs of a hurricane in the US
95
+
96
+ - Created an interactive mapping feature where users plot the path of a hurricane with Google Map React library
97
+
98
+ - Implemented data visualization capabilities using D3.js
99
+
100
+ - Constructed a REST API using Flask to gather data from the machine learning model
101
+
102
+ \section*{TeChnical Skills}
103
+
104
+ Languages: Python (Advanced), Java (Advanced), C\#, C, SQL (MySql, SQLite, SQL Server), JavaScript, HTML/CSS
105
+
106
+ Frameworks: React, Node.js, Flask, JUnit, ASP.NET, Angular, AngularJS, Jest, xUnit
107
+
108
+ Developer Tools: Git, Docker, Microsoft Azure, Github, WSL, Unity, Heroku, Vercel, TensorFlow
109
+
110
+ Libraries: Json.NET, FastHTTP, NumPy, JavaFX, Entity Framework