Files changed (1) hide show
  1. app.py +33 -29
app.py CHANGED
@@ -1,43 +1,47 @@
1
  import gradio as gr
2
- import openai
3
  import fitz # PyMuPDF
4
- import torch
5
- from transformers import pipeline, BloomForCausalLM, BloomTokenizerFast
6
- from huggingface_hub import login
7
- import requests
8
- import os
9
-
10
- from models import evaluate_with_gpt,evaluate_with_gemma,evaluate_with_bloom,evaluate_with_jabir,evaluate_with_llama
11
-
12
-
13
 
14
  def extract_text_from_pdf(pdf_file):
 
15
  document = fitz.open(pdf_file)
16
- text = ""
17
- for page_num in range(len(document)):
18
- page = document.load_page(page_num)
19
- text += page.get_text()
20
- return text
21
-
22
-
23
-
24
- def evaluate_all_models(pdf_file, job_description):
25
- gpt_result = evaluate_with_gpt(pdf_file, job_description)
26
- gemma_result = evaluate_with_gemma(pdf_file, job_description)
27
- bloom_result = evaluate_with_bloom(pdf_file, job_description)
28
- jabir_result = evaluate_with_jabir(resume_text, job_description)
29
- llama_result=evaluate_with_llama(pdf_file, job_description)
30
- return f"GPT-4o Result:\n{gpt_result}\n\nGemma Result:\n{gemma_result}\n\nBloom Result:\n{bloom_result}\n\njabir Result:\n{jabir_result}\n\nllama Result:\n{llam_result}"
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  iface = gr.Interface(
33
- fn=lambda pdf, jd, model: evaluate_with_gpt(pdf, jd) if model == "GPT-4o" else evaluate_with_gemma(pdf, jd) if model == "Gemma" else evaluate_with_bloom(pdf, jd) if model == "Bloom" else evaluate_with_jabir(pdf, jd) if model == "jabir" else evaluate_all_models(pdf, jd) if model == "llama" else evaluate_all_models(pdf, jd),
34
  inputs=[
35
- gr.Textbox(lines=10,label="Upload Resume PDF"),
36
  gr.Textbox(lines=10, label="Job Description"),
37
- gr.Radio(choices=["GPT-4o", "Gemma", "Bloom", "jabir"," llama", "All"], label="Choose Model")
38
  ],
39
  outputs="text",
40
- title="Resume Evaluator"
41
  )
42
 
43
  iface.launch()
 
1
  import gradio as gr
 
2
  import fitz # PyMuPDF
3
+ from models import evaluate_with_gpt, evaluate_with_gemma, evaluate_with_bloom, evaluate_with_jabir, evaluate_with_llama
 
 
 
 
 
 
 
 
4
 
5
  def extract_text_from_pdf(pdf_file):
6
+ """Extracts and returns the text from a PDF file."""
7
  document = fitz.open(pdf_file)
8
+ return "".join([page.get_text() for page in document])
9
+
10
+ def evaluate_resume(resume_text, job_description, model):
11
+ """Evaluates the resume text using the specified model."""
12
+ if model == "GPT-4o":
13
+ return evaluate_with_gpt(resume_text, job_description)
14
+ elif model == "Gemma":
15
+ return evaluate_with_gemma(resume_text, job_description)
16
+ elif model == "Bloom":
17
+ return evaluate_with_bloom(resume_text, job_description)
18
+ elif model == "jabir":
19
+ return evaluate_with_jabir(resume_text, job_description)
20
+ elif model == "llama":
21
+ return evaluate_with_llama(resume_text, job_description)
22
+ else:
23
+ # If "All" is selected, evaluate with all models and return combined results.
24
+ return evaluate_all_models(resume_text, job_description)
25
+
26
+ def evaluate_multiple_resumes(resume_files, job_description, model):
27
+ """Evaluates multiple resumes and returns the results."""
28
+ results = []
29
+ for resume_file in resume_files:
30
+ title = resume_file.name
31
+ resume_text = extract_text_from_pdf(resume_file)
32
+ result = evaluate_resume(resume_text, job_description, model)
33
+ results.append(f"Result for {title}:\n{result}\n\n")
34
+ return "\n".join(results)
35
 
36
  iface = gr.Interface(
37
+ fn=evaluate_multiple_resumes,
38
  inputs=[
39
+ gr.File(type="file", label="Upload Resumes PDF", file_count="multiple"),
40
  gr.Textbox(lines=10, label="Job Description"),
41
+ gr.Radio(choices=["GPT-4o", "Gemma", "Bloom", "jabir", "llama", "All"], label="Choose Model")
42
  ],
43
  outputs="text",
44
+ title="Multiple Resume Evaluator"
45
  )
46
 
47
  iface.launch()