slush0's picture
Fix WIP
eec2373
raw
history blame
964 Bytes
#!/usr/bin/env python
# or gradio app.py
import gradio as gr
from chat import iface_chat
from prompt import iface_prompt
with gr.Blocks() as iface:
gr.Markdown(
"""# Petals playground
**Let's play with prompts and inference settings for BLOOM and BLOOMZ 176B models!**
This space uses websocket API of [chat.petals.dev](http://chat.petals.dev). Health status of Petals network [lives here](http://health.petals.dev).
Do NOT talk to BLOOM as an entity, it's not a chatbot but a webpage/blog/article completion model.
For the best results: MIMIC a few sentences of a webpage similar to the content you want to generate.
BLOOMZ performs better in chat mode and understands the instructions better."""
)
gr.TabbedInterface([iface_prompt, iface_chat], ["Prompt mode", "Chat mode"])
# Queues are required to enable generators
iface.queue(concurrency_count=5, max_size=50)
iface.launch(show_error=True)