Upload folder using huggingface_hub
Browse files- api.py +44 -12
- requirements.txt +1 -0
- run.py +8 -5
api.py
CHANGED
@@ -1,17 +1,49 @@
|
|
1 |
-
import
|
|
|
|
|
|
|
2 |
|
3 |
def generate_scenario(gender):
|
4 |
-
|
5 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
-
def
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
def
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
-
def
|
16 |
-
|
17 |
-
return "I like your face"
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
import os
|
3 |
+
|
4 |
+
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
5 |
|
6 |
def generate_scenario(gender):
|
7 |
+
PROMPT = f"""Create a scenario where you're see a {gender} you're interested in. You haven't yet approached them yet but are interested. Keep it brief, around 20 words. For example: 'There's a cute {gender} in my boxing class. I approach her after a class.' or 'I see two {gender}s having coffee at the table next to mine.'."""
|
8 |
+
return llm(PROMPT)
|
9 |
+
|
10 |
+
def suggest_next_line(scenario, history):
|
11 |
+
PROMPT = f"""This is my scenario: {scenario} \n\n"""
|
12 |
+
if len(history) == 0:
|
13 |
+
PROMPT += "Suggest a line with which I can introduce myself or strike up a conversation."
|
14 |
+
else:
|
15 |
+
PROMPT += f"""This is the conversation so far: \n\n{format_history(history)} \n\nSuggest my next response. Provide the response and nothing else."""
|
16 |
+
return llm(PROMPT)
|
17 |
+
|
18 |
+
def generate_response(scenario, history, personality, interested):
|
19 |
+
PROMPT = f"""This is my scenario: {scenario}. Roleplay the conversation with me. Provide the response and nothing else. You're personality is {personality}. You are {'interested' if interested else 'not interested'} in me.
|
20 |
+
|
21 |
+
The conversation so far: \n\n{format_history(history)}
|
22 |
+
|
23 |
+
Only provide the response and nothing else.
|
24 |
+
"""
|
25 |
+
return llm(PROMPT)
|
26 |
|
27 |
+
def transcribe_audio(audio_path):
|
28 |
+
with open(audio_path, "rb") as audio_file:
|
29 |
+
transcript = client.audio.transcriptions.create(
|
30 |
+
model="whisper-1",
|
31 |
+
file=audio_file
|
32 |
+
)
|
33 |
+
return transcript.text
|
34 |
|
35 |
+
def llm(prompt):
|
36 |
+
response = client.completions.create(
|
37 |
+
model="gpt-3.5-turbo-instruct",
|
38 |
+
prompt=prompt,
|
39 |
+
temperature=0.9,
|
40 |
+
max_tokens=100,
|
41 |
+
top_p=1,
|
42 |
+
frequency_penalty=0.0,
|
43 |
+
presence_penalty=0.6,
|
44 |
+
)
|
45 |
+
return response.choices[0].text.strip()
|
46 |
+
|
47 |
|
48 |
+
def format_history(history):
|
49 |
+
return "\n\n".join([f"Me: {line[0]} \nThem: {line[1] if line[1] else ''}" for line in history])
|
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
openai
|
run.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import api
|
2 |
import gradio as gr
|
|
|
3 |
|
4 |
with gr.Blocks() as demo:
|
5 |
with gr.Column() as setup_col:
|
@@ -15,7 +16,7 @@ with gr.Blocks() as demo:
|
|
15 |
|
16 |
with gr.Column(visible=False) as convo_col:
|
17 |
chat = gr.Chatbot()
|
18 |
-
recording = gr.Audio(label="Your Line", sources="microphone")
|
19 |
with gr.Group():
|
20 |
with gr.Row():
|
21 |
help_btn = gr.Button("Help Me", scale=0)
|
@@ -29,6 +30,8 @@ with gr.Blocks() as demo:
|
|
29 |
}
|
30 |
|
31 |
scenario = gr.State()
|
|
|
|
|
32 |
SCENARIO_LINE = "The scenario is: **{}** \n\n Record your opening line below!"
|
33 |
|
34 |
def start_chat(scenario):
|
@@ -44,16 +47,16 @@ with gr.Blocks() as demo:
|
|
44 |
chat.append([user_message, None])
|
45 |
return chat
|
46 |
|
47 |
-
def respond(chat):
|
48 |
-
response = api.generate_response(chat[1:])
|
49 |
chat[-1][1] = response
|
50 |
return chat
|
51 |
|
52 |
|
53 |
recording.stop_recording(transcribe, inputs=[recording, chat], outputs=[chat]
|
54 |
-
).then(respond, inputs=[chat], outputs=[chat]
|
55 |
).then(lambda: None, outputs=[recording])
|
56 |
|
57 |
-
help_btn.click(lambda chat: api.suggest_next_line(chat[1:]), inputs=[chat], outputs=[suggestion])
|
58 |
|
59 |
demo.launch()
|
|
|
1 |
import api
|
2 |
import gradio as gr
|
3 |
+
import random
|
4 |
|
5 |
with gr.Blocks() as demo:
|
6 |
with gr.Column() as setup_col:
|
|
|
16 |
|
17 |
with gr.Column(visible=False) as convo_col:
|
18 |
chat = gr.Chatbot()
|
19 |
+
recording = gr.Audio(label="Your Line", sources="microphone", type="filepath")
|
20 |
with gr.Group():
|
21 |
with gr.Row():
|
22 |
help_btn = gr.Button("Help Me", scale=0)
|
|
|
30 |
}
|
31 |
|
32 |
scenario = gr.State()
|
33 |
+
personality = gr.State(lambda: random.choice(["shy", "confident", "awkward", "charming", "rude"]))
|
34 |
+
interested = gr.State(lambda: random.choice([True, False]))
|
35 |
SCENARIO_LINE = "The scenario is: **{}** \n\n Record your opening line below!"
|
36 |
|
37 |
def start_chat(scenario):
|
|
|
47 |
chat.append([user_message, None])
|
48 |
return chat
|
49 |
|
50 |
+
def respond(scenario, chat, personality, interested):
|
51 |
+
response = api.generate_response(scenario, chat[1:], personality, interested)
|
52 |
chat[-1][1] = response
|
53 |
return chat
|
54 |
|
55 |
|
56 |
recording.stop_recording(transcribe, inputs=[recording, chat], outputs=[chat]
|
57 |
+
).then(respond, inputs=[scenario, chat, personality, interested], outputs=[chat]
|
58 |
).then(lambda: None, outputs=[recording])
|
59 |
|
60 |
+
help_btn.click(lambda scenario, chat: api.suggest_next_line(scenario, chat[1:]), inputs=[scenario, chat], outputs=[suggestion])
|
61 |
|
62 |
demo.launch()
|