Spaces:
Runtime error
Runtime error
Add more evaluations
#2
by
FilipinosRich
- opened
- recruiting_assistant.py +60 -8
recruiting_assistant.py
CHANGED
@@ -4,6 +4,8 @@ from langchain.chat_models import ChatOpenAI
|
|
4 |
from langchain.prompts import ChatPromptTemplate
|
5 |
from langchain.chains import LLMChain, SequentialChain
|
6 |
|
|
|
|
|
7 |
# os.environ["OPENA"]
|
8 |
|
9 |
|
@@ -70,8 +72,8 @@ llm = ChatOpenAI(temperature=0.0, openai_api_key=os.environ["OPENAI"])
|
|
70 |
def create_intro(vacancy=vacancy, resume=resume):
|
71 |
|
72 |
template_vacancy_get_skills = """
|
73 |
-
Can you generate me a list of the skills that a candidate supposed to have for the below vacancy delimited by three backticks.
|
74 |
-
If you do not know if skills are available
|
75 |
Mention the skills in 1 to maximum three words for each skill. Return the skills as a JSON list.
|
76 |
|
77 |
```
|
@@ -91,7 +93,7 @@ def create_intro(vacancy=vacancy, resume=resume):
|
|
91 |
Can you create a JSON object based on the below keys each starting with '-', with respect to the resume below delimited by three backticks?
|
92 |
|
93 |
- "skills_present": <list the skills present. If no skills are present return an empty list, do not make up an answer. >
|
94 |
-
- "skills_not_present": <list the skills not present. If all skills are present return
|
95 |
- "score": <calculate a percentage of the number of skills present with respect to the total skills requested>
|
96 |
|
97 |
```
|
@@ -102,6 +104,54 @@ def create_intro(vacancy=vacancy, resume=resume):
|
|
102 |
prompt_resume_check_skills = ChatPromptTemplate.from_template(template=template_resume_check_skills)
|
103 |
resume_skills = LLMChain(llm=llm, prompt=prompt_resume_check_skills, output_key="resume_skills")
|
104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
template_introduction_email = """
|
106 |
You are a recruitment specialist that tries to place the right profiles for the right job.
|
107 |
I have a vacancy below the delimiter <VACANCY> and ends with </VACANCY>
|
@@ -115,13 +165,13 @@ def create_intro(vacancy=vacancy, resume=resume):
|
|
115 |
{resume}
|
116 |
</RESUME>
|
117 |
|
118 |
-
Can you fill in the introduction
|
119 |
|
120 |
Role: < the role of the vacancy >
|
121 |
Candidate: < name of the candidate >
|
122 |
Education: < name the education of the candidate >
|
123 |
Responsibilities: < did the candidate worked as an individual contributor or did het take on leadership postitions? >
|
124 |
-
Experience: < name 2 most relevant experiences from the candidate for this vacancy
|
125 |
Skills: print here a comma seperated list of the "skills_present" key of the JSON object {resume_skills}
|
126 |
"""
|
127 |
|
@@ -129,15 +179,17 @@ def create_intro(vacancy=vacancy, resume=resume):
|
|
129 |
introduction_email = LLMChain(llm=llm, prompt=prompt_introduction_email, output_key="introduction_email")
|
130 |
|
131 |
match_resume_vacancy_skills_chain = SequentialChain(
|
132 |
-
chains=[vacancy_skills, resume_skills, introduction_email],
|
133 |
input_variables=["vacancy", "resume"],
|
134 |
-
output_variables=["vacancy_skills", "resume_skills", "introduction_email"],
|
135 |
verbose=False
|
136 |
)
|
137 |
|
138 |
result = match_resume_vacancy_skills_chain({"vacancy": vacancy, "resume": resume})
|
139 |
print(result)
|
140 |
-
return result["introduction_email"], json.dumps(json.loads(result['resume_skills']), indent=4)
|
|
|
|
|
141 |
|
142 |
if __name__ == '__main__':
|
143 |
create_intro(vacancy=vacancy,resume=resume)
|
|
|
4 |
from langchain.prompts import ChatPromptTemplate
|
5 |
from langchain.chains import LLMChain, SequentialChain
|
6 |
|
7 |
+
from validation import validate_dict_value, validate_string_value
|
8 |
+
|
9 |
# os.environ["OPENA"]
|
10 |
|
11 |
|
|
|
72 |
def create_intro(vacancy=vacancy, resume=resume):
|
73 |
|
74 |
template_vacancy_get_skills = """
|
75 |
+
Can you generate me a list of the skills that a candidate is supposed to have for the below vacancy delimited by three backticks.
|
76 |
+
If you do not know if skills are available mention that you do not know and do not make up an answer.
|
77 |
Mention the skills in 1 to maximum three words for each skill. Return the skills as a JSON list.
|
78 |
|
79 |
```
|
|
|
93 |
Can you create a JSON object based on the below keys each starting with '-', with respect to the resume below delimited by three backticks?
|
94 |
|
95 |
- "skills_present": <list the skills present. If no skills are present return an empty list, do not make up an answer. >
|
96 |
+
- "skills_not_present": <list the skills not present. If all skills are present return an empty list, do not make up an answer.>
|
97 |
- "score": <calculate a percentage of the number of skills present with respect to the total skills requested>
|
98 |
|
99 |
```
|
|
|
104 |
prompt_resume_check_skills = ChatPromptTemplate.from_template(template=template_resume_check_skills)
|
105 |
resume_skills = LLMChain(llm=llm, prompt=prompt_resume_check_skills, output_key="resume_skills")
|
106 |
|
107 |
+
template_resume_check_information = """
|
108 |
+
I have a resume below the delimiter <RESUME> and it ends with </RESUME>. Can you fill in the JSON below and respond with just that JSON object and no other text around it?
|
109 |
+
|
110 |
+
JSON:
|
111 |
+
|
112 |
+
{
|
113 |
+
"Naam": "<fill in the name of the candidate here>"
|
114 |
+
"Beschikbaarheid": "<fill in the availability here>",
|
115 |
+
"Tarief": "<fill in the day price here>",
|
116 |
+
"Woonplaats": "<fill in here the location where the candidate lives>",
|
117 |
+
"Talen": "<fill in the languages the candidate speaks>",
|
118 |
+
"Presentatie": "<describe the person, what's their role, their experience and skills in miminum 3 sentences and maximum 5 sentences. Keep the jargon in english.>"
|
119 |
+
}
|
120 |
+
|
121 |
+
"""
|
122 |
+
|
123 |
+
template_resume_past_experiences = """
|
124 |
+
Can you generate me a list of the past work experiences that the candidate has based on the resume below enclosed by three backticks.
|
125 |
+
Mention the experiences in one sentence of medium length. Return the experiences as a JSON list.
|
126 |
+
|
127 |
+
```
|
128 |
+
{resume}
|
129 |
+
```
|
130 |
+
"""
|
131 |
+
|
132 |
+
prompt_resume_past_experiences = ChatPromptTemplate.from_template(template=template_resume_past_experiences)
|
133 |
+
past_experiences = LLMChain(llm=llm, prompt=prompt_resume_past_experiences, output_key="past_experiences")
|
134 |
+
|
135 |
+
template_vacancy_check_past_experiences = """
|
136 |
+
```
|
137 |
+
{past_experiences}
|
138 |
+
```
|
139 |
+
|
140 |
+
Based on the above list of past experiences by a vacancy delimited by backticks,
|
141 |
+
Can you create a JSON object based on the below keys each starting with '-', with respect to the vacancy below delimited by three backticks?
|
142 |
+
|
143 |
+
- "relevant_experiences": <list the relevant experiences. If no experiences are relevant return an empty list, do not make up an answer. >
|
144 |
+
- "irrelevant_experiences": <list the irrelevant experiences. If all experiences are relevant return an empty list, do not make up an answer.>
|
145 |
+
- "score": <calculate a percentage of the number of skills present with respect to the total skills requested>
|
146 |
+
|
147 |
+
```
|
148 |
+
{resume}
|
149 |
+
```
|
150 |
+
"""
|
151 |
+
|
152 |
+
prompt_vacancy_check_past_experiences = ChatPromptTemplate.from_template(template=template_vacancy_check_past_experiences)
|
153 |
+
check_past_experiences = LLMChain(llm=llm, prompt=prompt_vacancy_check_past_experiences, output_key="check_past_experiences")
|
154 |
+
|
155 |
template_introduction_email = """
|
156 |
You are a recruitment specialist that tries to place the right profiles for the right job.
|
157 |
I have a vacancy below the delimiter <VACANCY> and ends with </VACANCY>
|
|
|
165 |
{resume}
|
166 |
</RESUME>
|
167 |
|
168 |
+
Can you fill in the introduction below and only return as answer this introduction?
|
169 |
|
170 |
Role: < the role of the vacancy >
|
171 |
Candidate: < name of the candidate >
|
172 |
Education: < name the education of the candidate >
|
173 |
Responsibilities: < did the candidate worked as an individual contributor or did het take on leadership postitions? >
|
174 |
+
Experience: < name the 2 most relevant experiences from the candidate for this vacancy. Get them from the "relevant_experiences" key of the JSON object {past_experiences}. If there are less than 2 relevant, leave this empty. Do not make up an answer or get them from the irrelevant experiences. >
|
175 |
Skills: print here a comma seperated list of the "skills_present" key of the JSON object {resume_skills}
|
176 |
"""
|
177 |
|
|
|
179 |
introduction_email = LLMChain(llm=llm, prompt=prompt_introduction_email, output_key="introduction_email")
|
180 |
|
181 |
match_resume_vacancy_skills_chain = SequentialChain(
|
182 |
+
chains=[vacancy_skills, resume_skills, past_experiences, check_past_experiences, introduction_email],
|
183 |
input_variables=["vacancy", "resume"],
|
184 |
+
output_variables=["vacancy_skills", "resume_skills", "past_experiences", "check_past_experiences", "introduction_email"],
|
185 |
verbose=False
|
186 |
)
|
187 |
|
188 |
result = match_resume_vacancy_skills_chain({"vacancy": vacancy, "resume": resume})
|
189 |
print(result)
|
190 |
+
return result["introduction_email"], json.dumps(json.loads(result['resume_skills']), indent=4), json.dumps(json.loads(result['check_past_experiences']), indent=4)
|
191 |
+
|
192 |
+
|
193 |
|
194 |
if __name__ == '__main__':
|
195 |
create_intro(vacancy=vacancy,resume=resume)
|