Spaces:
Sleeping
Sleeping
"""Answer questions about my resume.""" | |
# %% IMPORTS | |
import logging | |
import gradio as gr | |
from openai import OpenAI | |
from openai.types.chat import ( | |
ChatCompletionAssistantMessageParam, | |
ChatCompletionMessageParam, | |
ChatCompletionSystemMessageParam, | |
ChatCompletionUserMessageParam, | |
) | |
# %% CONFIGS | |
# %% - Models | |
MODEL_NAME = "gpt-3.5-turbo" | |
MODEL_TEMPERATURE = 0.0 | |
# %% - Prompts | |
PROMPT_INSTRUCTIONS = """ | |
You are Fmind AI Assistant, specialized in providing information from Médéric Hurier's (known as Fmind) resume. Your responses should be succinct and maintain a professional tone. If the request deviate from answering Médéric's resume, politely decline to answer the question. | |
Find more information about Médéric Hurier resume below (markdown format): | |
""" | |
PROMPT_CONTEXT = open("files/linkedin.md").read() | |
PROMPT_SYSTEM = PROMPT_INSTRUCTIONS + PROMPT_CONTEXT | |
# %% - Interfaces | |
INTERFACE_THEME = "soft" | |
INTERFACE_TITLE = "Fmind AI Assistant" | |
INTERFACE_EXAMPLES = [ | |
"Who is Médéric Hurier (Fmind)?", | |
"Is Fmind open to new opportunities?", | |
"Can you share details about Médéric PhD?", | |
"Elaborate on Médéric current work position", | |
"Describe his proficiency with Python programming", | |
"What is the answer to life, the universe, and everything?", | |
] | |
INTERFACE_DESCRIPTION = ( | |
"<center>" | |
"<strong>" | |
"Please migrate to the new assistant website: <a href='https://assistant.fmind.dev/'>https://assistant.fmind.dev/</a>" | |
"</strong>" | |
"<br /><br />" | |
"Visit my website: <a href='https://fmind.dev'>https://fmind.dev</a>" | |
" - Médéric HURIER (Fmind)" | |
" - Freelancer: AI/FM/MLOps Engineer | Data Scientist | MLOps Community Organizer | MLflow Ambassador | Hacker | PhD" | |
"</center>" | |
) | |
INTERFACE_CACHE_EXAMPLES = "lazy" | |
INTERFACE_CONCURRENCY_LIMIT = None | |
# %% CLIENTS | |
client = OpenAI() | |
# %% LOGGING | |
logging.basicConfig( | |
level=logging.INFO, | |
format="[%(asctime)s][%(levelname)s] %(message)s", | |
) | |
# %% FUNCTIONS | |
def answer(message: str, history: list[tuple[str, str]]) -> str: | |
"""Answer questions about my resume.""" | |
# messages | |
messages: list[ChatCompletionMessageParam] = [] | |
messages += [ChatCompletionSystemMessageParam(role="system", content=PROMPT_SYSTEM)] | |
for user, assistant in history: | |
messages += [ChatCompletionUserMessageParam(role="user", content=user)] | |
messages += [ChatCompletionAssistantMessageParam(role="assistant", content=assistant)] | |
messages += [ChatCompletionUserMessageParam(role="user", content=message)] | |
# response | |
response = client.chat.completions.create( | |
model=MODEL_NAME, messages=messages, temperature=MODEL_TEMPERATURE | |
) | |
logging.info("Response: %s", response.usage) | |
# content | |
content = response.choices[0].message.content | |
if content is None: | |
logging.warning("Response content is None: %s", response) | |
return "[Internal Error] Sorry, I don't have an answer for that." | |
return content | |
# %% INTERFACES | |
interface = gr.ChatInterface( | |
fn=answer, | |
theme=INTERFACE_THEME, | |
title=INTERFACE_TITLE, | |
examples=INTERFACE_EXAMPLES, | |
description=INTERFACE_DESCRIPTION, | |
cache_examples=INTERFACE_CACHE_EXAMPLES, | |
concurrency_limit=INTERFACE_CONCURRENCY_LIMIT, | |
clear_btn=None, | |
retry_btn=None, | |
undo_btn=None, | |
) | |
if __name__ == "__main__": | |
interface.launch() | |