from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.messages import HumanMessage from langchain_core.messages import AIMessage from langchain.memory import ChatMessageHistory from langchain_openai import AzureChatOpenAI from pypdf import PdfReader import os import gradio as gr from openai import AzureOpenAI from gtts import gTTS import requests hf_token = os.getenv("HF_TOKEN") API_URL = "https://api-inference.huggingface.co/models/openai/whisper-medium.en" headers = {"Authorization": f"Bearer {hf_token}"} class ScreeningAssistant: def __init__(self): self.client = AzureOpenAI(api_version="2024-02-01",azure_endpoint = os.getenv('URL')) # self.client = OpenAI() def extract_text(self, pdf_path): # creating a pdf reader object reader = PdfReader(pdf_path) all_text = "" for page in reader.pages: all_text += page.extract_text() return all_text def audio_to_text(self,audio_file): deployment_id = "whisper07" #This will correspond to the custom name you chose for your deployment when you deployed a model." audio_test_file = audio_file print("audio_test_file",audio_test_file) result = self.client.audio.transcriptions.create( file=open(audio_test_file, "rb"), model=deployment_id ) return result.text def text_to_audio(self, mytext): # Language in which you want to convert language = 'en' # have a high speed myobj = gTTS(text=mytext, lang=language, slow=False) audio_path = "welcome.mp3" # Saving the converted audio in a mp3 file named # welcome myobj.save(audio_path) # Audio(filename=audio_path, autoplay=True) # os.remove(audio_path) return audio_path def get_response(self, audio_path, chat_history, resume, jd): candidate = self.audio_to_text(audio_path) resume = self.extract_text(resume.name) jd = self.extract_text(jd.name) prompt = ChatPromptTemplate.from_messages( [ ( "system", """Your Task is Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer. at the end exit with greeting to the candidate. **Ask question follow up on the candidate response. get chat history.** """, ), MessagesPlaceholder(variable_name="messages"), ] ) chain = prompt | self.chat # chat_histroy_prompt = chat_history answer = chain.invoke( { "messages": [ HumanMessage( content=f" job description :{jd}\n Resume :{resume}" ), AIMessage(content=f"""Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer. chat history : {chat_history}"""), HumanMessage(content=candidate), ], } ) result = answer.content chat_history.append((candidate, result)) print("chat_history", chat_history) audio_output = self.text_to_audio(result) return "", chat_history, audio_output def gradio_interface(self) -> None: """Create a Gradio interface for the chatbot.""" with gr.Blocks(css = "style.css" ,theme="shivi/calm_seafoam") as demo: gr.HTML("""