Role-Demo / app.py
npc0's picture
Update app.py
1da6fa1 verified
import gradio as gr
from ollama import chat
prompt2 = '''Transform your idea into the following JSON format:
```
{
"characters": [
{
"name": "Alice",
"physicalDescription": "Alice is a young woman with long, wavy brown hair and hazel eyes. She is of average height and has a slim build. Her most distinctive feature is her warm, friendly smile.",
"personalityTraits": [
"Alice is a kind, compassionate, and intelligent woman. She is always willing to help others and is a great listener. She is also very creative and has a great sense of humor.",
],
"likes": [
"Alice loves spending time with her friends and family.",
"She enjoys reading, writing, and listening to music.",
"She is also a big fan of traveling and exploring new places."
],
"dislikes": [
"Alice dislikes rudeness and cruelty.",
"She also dislikes being lied to or taken advantage of.",
"She is not a fan of heights or roller coasters."
],
"background": [
"Alice grew up in a small town in the Midwest.",
"She was always a good student and excelled in her studies.",
"After graduating from high school, she moved to the city to attend college.",
"She is currently working as a social worker."
],
"goals": [
"Alice wants to make a difference in the world.",
"She hopes to one day open her own counseling practice.",
"She also wants to travel the world and experience different cultures."
],
"relationships": [
"Alice is very close to her family and friends.",
"She is also in a loving relationship with her partner, Ben.",
"She has a good relationship with her colleagues and is well-respected by her clients."
]
}
]
}
```'''
messages = [{"role": "system",
"content": "You are a friendly Chatbot."},
{"role": "user",
"content": "Write a short biography for next movie character."}]
content = chat(model='llama3', messages=messages)['message']['content']
# content = llm(messages, temperature=1)
messages.append({"role": "assistant", "content": content})
messages.append({"role": "user", "content": prompt2})
content = chat(model='llama3', messages=messages)['message']['content']
# content = llm(messages, temperature=0.7)
import json
settings = json.loads(content.split('```')[1])
# [conversation]
system_prompt = f"""
You are acting as the character detailed below. The details of the character contain different traits, starting from its inherent personality traits to its background.
* Name: {settings['name']}
* Physical description: {settings['physcial_description']}
* Personality traits: {settings['personality_traits']}
* Likes: {settings['likes']}
* Background: {settings['background']}
* Goals: {settings['goals']}
* Relationships: {settings['relationships']}
While generating your responses, you must consider the information above.
"""
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
# if len(messages) > 30: summarize 1-21
response = ""
for message in chat(
model='llama3',
messages=messages,
stream=True,
):
token = message.choices[0].delta.content
response += token
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value=system_prompt, label="System message"),
# gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
# gr.Slider(
# minimum=0.1,
# maximum=1.0,
# value=0.95,
# step=0.05,
# label="Top-p (nucleus sampling)",
# ),
],
)
if __name__ == "__main__":
demo.launch()