Spaces:
Running
Running
File size: 2,482 Bytes
91b4d2a 611ad83 91b4d2a 625e1b2 91b4d2a 611ad83 91b4d2a cd9c591 91b4d2a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
import gradio as gr
import openai
import fitz # PyMuPDF
import torch
from transformers import pipeline
from huggingface_hub import login
import os
hf_api_key = os.getenv("HF_API_KEY")
login(token=hf_api_key)
# Set OpenAI and Hugging Face API keys
openai.api_key = "sk-1E6ExsyFb-cdU8jPNDP1dsEq_ra_bazU-EXQZQ86pJT3BlbkFJ4zURsV0t--3qNM7A-P57NUqZIBosrL7POwzpjR5EQA"
# Initialize the Gemma model
gemma_pipe = pipeline(
"text-generation",
model="google/gemma-2-27b-it",
model_kwargs={"torch_dtype": torch.bfloat16},
device="cpu"
)
def extract_text_from_pdf(pdf_file):
document = fitz.open(pdf_file)
text = ""
for page_num in range(len(document)):
page = document.load_page(page_num)
text += page.get_text()
return text
def evaluate_with_gpt(pdf_file, job_description):
resume_text = extract_text_from_pdf(pdf_file)
prompt = f"""به عنوان یک تحلیلگر با تجربه سیستم ردیابی متقاضی (ATS)، نقش شما شامل ارزیابی رزومه در برابر شرح شغل است.
رزومه:{resume_text}
شرح شغل:{job_description}
"""
response = openai.ChatCompletion.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
)
return response.choices[0].message['content']
def evaluate_with_gemma(pdf_file, job_description):
resume_text = extract_text_from_pdf(pdf_file)
prompt = f"Evaluate the following resume against the job description. Resume: {resume_text} Job Description: {job_description}"
outputs = gemma_pipe(prompt, max_new_tokens=256)
return outputs[0]["generated_text"].strip()
def evaluate_both_models(pdf_file, job_description):
gpt_result = evaluate_with_gpt(pdf_file, job_description)
gemma_result = evaluate_with_gemma(pdf_file, job_description)
return f"GPT-4o Result:\n{gpt_result}\n\nGemma Result:\n{gemma_result}"
iface = gr.Interface(
fn=lambda pdf, jd, model: evaluate_with_gpt(pdf, jd) if model == "GPT-4o" else evaluate_with_gemma(pdf, jd) if model == "Gemma" else evaluate_both_models(pdf, jd),
inputs=[
gr.File(label="Upload Resume PDF"),
gr.Textbox(lines=10, label="Job Description"),
gr.Radio(choices=["GPT-4o", "Gemma", "Both"], label="Choose Model")
],
outputs="text",
title="Resume Evaluator"
)
iface.launch() |