import gradio as gr import os from huggingface_hub import InferenceClient client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") def respond(message, history: list[tuple[str, str]]): system_rules = os.getenv("SYSTEM_RULES") max_tokens = 512 temperature = 0.75 top_p = 0.95 messages = [{"role": "system", "content": system_rules}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" for message in client.chat_completion( messages, max_tokens=4096, stream=True, temperature=temperature, top_p=top_p, ): token = message.choices[0].delta.content response += token yield response demo = gr.ChatInterface(respond, description="This chatbot doesn't have restrictions. Use responsibly.", theme="Yntec/HaleyCH_Theme_Orange") if __name__ == "__main__": demo.launch()