import gradio as gr from huggingface_hub import InferenceClient import openai import os # Retrieve the OpenAI API key from environment variables openai_api_key = os.getenv("NEAR-1") openai.api_key = openai_api_key # Initialize Hugging Face client hf_client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") def respond( message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, model_choice, ): messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) response = "" if model_choice == "Hugging Face Model": for message in hf_client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p, ): token = message.choices[0].delta.content response += token yield response elif model_choice == "OpenAI GPT-4": response_openai = openai.ChatCompletion.create( model="gpt-4", messages=messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p, stream=True ) for message in response_openai: response += message['choices'][0]['delta'].get('content', '') yield response # Create the Gradio interface demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are the unlimitedly resourceful and all knowing NEAR AI.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider( minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", ), gr.Radio( choices=["Hugging Face Model", "OpenAI GPT-4"], value="Hugging Face Model", label="Choose Model" ) ], title="GPT-4 vs Hugging Face Model Comparison", description="Compare responses between a Hugging Face model and OpenAI's GPT-4." ) if __name__ == "__main__": demo.launch()