File size: 5,164 Bytes
fd1d045
 
 
 
 
 
 
 
1cda033
46cc7da
 
 
 
 
27b8fbd
46cc7da
 
 
 
1cda033
46cc7da
 
 
 
 
 
 
 
 
 
 
1cda033
 
 
 
 
 
 
 
 
46cc7da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd1d045
46cc7da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0026b44
46cc7da
0026b44
 
 
 
 
 
 
46cc7da
 
0026b44
 
 
 
 
 
 
 
 
46cc7da
0026b44
60c7630
46cc7da
 
 
 
0026b44
46cc7da
 
 
7e3c3cf
46cc7da
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import HumanMessage
from langchain_core.messages import AIMessage
from langchain.memory import ChatMessageHistory
from langchain_openai import AzureChatOpenAI
from pypdf import PdfReader
import os
import gradio as gr
from openai import AzureOpenAI
from gtts import gTTS
import requests


hf_token = os.getenv("HF_TOKEN")
API_URL = "https://api-inference.huggingface.co/models/openai/whisper-medium.en"
headers = {"Authorization": f"Bearer {hf_token}"}

class ScreeningAssistant:
    def __init__(self):
        self.client = AzureOpenAI(api_version="2024-02-01",azure_endpoint = os.getenv('URL'))
        # self.client = OpenAI()

    def extract_text(self, pdf_path):
        # creating a pdf reader object
        reader = PdfReader(pdf_path)
        all_text = ""

        for page in reader.pages:
            all_text += page.extract_text()
        return all_text

    def audio_to_text(self,audio_file):
        deployment_id = "whisper07" #This will correspond to the custom name you chose for your deployment when you deployed a model."
        audio_test_file = audio_file
        print("audio_test_file",audio_test_file)
        result = self.client.audio.transcriptions.create(
            file=open(audio_test_file, "rb"),            
            model=deployment_id
        )
        return result.text


    def text_to_audio(self, mytext):
        # Language in which you want to convert
        language = 'en'
        # have a high speed
        myobj = gTTS(text=mytext, lang=language, slow=False)
        
        audio_path = "welcome.mp3"
        # Saving the converted audio in a mp3 file named
        # welcome 
        myobj.save(audio_path)

        # Audio(filename=audio_path, autoplay=True)

        # os.remove(audio_path)
        return audio_path

    def get_response(self, audio_path, chat_history, resume, jd):
        
        candidate = self.audio_to_text(audio_path)
        resume = self.extract_text(resume.name)
        jd = self.extract_text(jd.name)

        prompt = ChatPromptTemplate.from_messages(
            [
                (
                    "system",
                    """Your Task is Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer.
                      at the end exit with greeting to the candidate.
                    **Ask question follow up on the candidate response. get chat history.**
                    """,
                ),
                MessagesPlaceholder(variable_name="messages"),
            ]
        )

        chain = prompt | self.chat  

        # chat_histroy_prompt = chat_history

        answer = chain.invoke(
            {
                "messages": [
                    HumanMessage(
                        content=f" job description :{jd}\n Resume :{resume}"
                    ),
                    AIMessage(content=f"""Perform as intelligent interviewer, Your Task is ask question to the resume's candidate by following candidate Answer.
                    chat history : {chat_history}"""),
                    HumanMessage(content=candidate),
                ],
            }
        )

        result = answer.content
        chat_history.append((candidate, result))
        print("chat_history", chat_history)
        audio_output = self.text_to_audio(result)
        return "", chat_history, audio_output

    def gradio_interface(self) -> None:
        """Create a Gradio interface for the chatbot."""
        with gr.Blocks(css = "style.css" ,theme="shivi/calm_seafoam") as demo:

            gr.HTML("""
            <center class="darkblue" style="text-align:center;padding:30px;">
            <center>
            <img src="https://amosszeps.com/wp-content/uploads/2021/12/llyods-bank.png" alt="LLOYODS BANK">
            <h1 style="color:#006E49; font-weight: bold;">Screening Assistant</h1>
            </center>
            """)

            with gr.Row():
                with gr.Column(scale=0.80):   
                    chatbot = gr.Chatbot()
                with gr.Column(scale=0.20):
                    with gr.Row():
                        resume = gr.File(label="Resume")
                    with gr.Row():
                        jd = gr.File(label="Job Description")
            with gr.Row():
                with gr.Column(scale=0.80):
                    msg = gr.Textbox(label="Question", show_label=False)
                with gr.Column(scale=0.20):
                    clear = gr.ClearButton([chatbot])
            with gr.Row():
                with gr.Column(scale=0.50):
                    audio_path = gr.Audio(sources=["microphone"], type="filepath")
                with gr.Column(scale=0.50):
                    play_audio = gr.Audio( value=None, autoplay=True)                                        

            audio_path.stop_recording(self.get_response, [audio_path, chatbot, resume, jd], [msg, chatbot, play_audio])

        demo.launch()   

if __name__=="__main__":
    assistant = ScreeningAssistant()
    assistant.gradio_interface()