from crewai import Agent, Task, Crew import os import json # import langchain libraries # !pip install langchain langchain-groq langchainhub duckduckgo-search from langchain.agents import AgentExecutor from langchain.agents import create_react_agent from langchain.agents import create_structured_chat_agent from langchain import hub from langchain_groq import ChatGroq from langchain_core.prompts import ChatPromptTemplate from langchain.agents import Tool from langchain_community.tools import DuckDuckGoSearchResults from langchain.schema.output_parser import StrOutputParser from langchain_core.prompts import PromptTemplate from langchain_community.tools import DuckDuckGoSearchRun from langchain.chains.combine_documents import create_stuff_documents_chain from langchain.chains import create_retrieval_chain from langchain import hub from langchain.chains import RetrievalQA from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings from langchain_community.document_loaders.csv_loader import CSVLoader from langchain.tools import DuckDuckGoSearchRun from langchain_core.output_parsers import JsonOutputParser # from langchain.agents import AgentExecutor, create_tool_calling_agent from langchain_core.prompts import ChatPromptTemplate #import gradio libraries # !pip install gradio gradio_client import gradio as gr #import vectorstore libraries # !pip install faiss-cpu from langchain_community.vectorstores import FAISS embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") # Define the LLM - We shall use ChatGroq of Groq Platform and LLama70B # This llm definition is redundant as now models will be chosen by user llm = ChatGroq( api_key="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", model="llama3-70b-8192", # model = 'gemma-7b-it', temperature = 0 # model = 'mixtral-8x7B-32768' ) import os from langchain.document_loaders import TextLoader # folder_path = "https://huggingface.co/spaces/nmurugesh/My-Interview-Chatbot/blob/main/" # Replace with the actual path to your folder file_names = ['banking.txt', 'certifications.txt','deeplearning.txt','domain-banking.txt', 'education.txt','imple.txt','personal.txt','presales.txt','publications.txt', 'summary.txt', 'requirements-eng.txt','testing.txt','work experience.txt'] documents = [] for filename in file_names: if filename.endswith(".txt"): loader = TextLoader(filename) doc = loader.load()[0] # Load the single Document object for the file documents.append(doc) vectorstore1 = FAISS.from_documents(documents, embedding_function) vectorstore1.save_local("vectorstore1") retriever1 = vectorstore1.as_retriever() import os os.environ["OPENAI_API_KEY"] = "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" # This is what Crew AI expects # Agent 1: Interview candidate Interview_candidate = Agent( llm = llm, role="Interview candidate", goal="You are currently attending an interview. Your objective is to ace the interview and get the job based on your qualifications and expertise", # tools = [scrape_tool, search_tool], verbose=True, backstory=(''' You are currently attending an interview.For all the questions asked, you SHOULD answer only from the context provided - \ that is data provided for this purpose. Your answer should be confidently articulated, using professional tone and style, and concise and clear. ''' ) ) # Task for Researcher Agent: Extract Job Requirements Interview_answer_task = Task( description=("You are being interviewed by a company. You should answer based on information context provided. \ The question is {question}. Context will be a {context}" ), expected_output=("You are currently attending an interview.For all the questions asked, you SHOULD answer only from the context provided - \ that is data provided for this purpose. Your answer should be confidently articulated, using professional tone and style, and concise and clear." ), agent=Interview_candidate # async_execution=True ) interview_crew = Crew( agents=[Interview_candidate], tasks=[Interview_answer_task], verbose=True ) def answer(question): result =interview_crew.kickoff(inputs={"question":question, "context": "\n\n".join(doc.page_content for doc in retriever1.get_relevant_documents(question)) }) return result from gradio import Image with gr.Blocks() as demo: # Add a Markdown block for the description gr.Markdown("""

Interview Chatbot for N Murugesan

""") gr.Markdown("""Powered by CrewAI,Gradio, Groq, Llama3, FAISS, Langchain""") gr.Markdown( """

This chatbot will answer interview questions on behalf of Murugesan Narayanaswamy!

""" ) gr.Image("photo-recent.jpg", width=250) gr.Markdown("""

Ask any HR Round Interview Questions - Factual Answers based on Resume!

""") # Use a Column to structure the inputs and outputs with gr.Column(): # First text input and button text_input1 = gr.Textbox( label="Enter your question here!", placeholder='''Ask your question; e.g., Tell something about yourself; Your career path has been diverse; \ Could you walk us through the key transitions and the motivations behind those changes?''', ) button1 = gr.Button("Answer!") outputs1 = [ gr.Textbox(label="My Answer - you can verify with resume later!",show_copy_button=True) ] button1.click(answer,inputs=[text_input1], outputs=outputs1) # Launch the Gradio app demo.launch()