chatBot / app.py
nikshep01's picture
Update app.py
5d717ac verified
raw
history blame
3.78 kB
# import gradio as gr
# from huggingface_hub import InferenceClient
# """
# For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
# """
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# def respond(
# message,
# history: list[tuple[str, str]],
# system_message,
# max_tokens,
# temperature,
# top_p,
# ):
# messages = [{"role": "system", "content": system_message}]
# for val in history:
# if val[0]:
# messages.append({"role": "user", "content": val[0]})
# if val[1]:
# messages.append({"role": "assistant", "content": val[1]})
# messages.append({"role": "user", "content": message})
# response = ""
# for message in client.chat_completion(
# messages,
# max_tokens=max_tokens,
# stream=True,
# temperature=temperature,
# top_p=top_p,
# ):
# token = message.choices[0].delta.content
# response += token
# yield response
# """
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
# """
# demo = gr.ChatInterface(
# respond,
# additional_inputs=[
# gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
# gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
# gr.Slider(
# minimum=0.1,
# maximum=1.0,
# value=0.95,
# step=0.05,
# label="Top-p (nucleus sampling)",
# ),
# ],
# )
# if __name__ == "__main__":
# demo.launch()
# import gradio as gr
# def fake(message, history):
# if message.strip():
# # Instead of returning audio directly, return a message
# return "Playing sample audio...", gr.Audio("https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav")
# else:
# return "Please provide the name of an artist", None
# with gr.Blocks() as demo:
# chatbot = gr.Chatbot(placeholder="Play music by any artist!")
# textbox = gr.Textbox(placeholder="Which artist's music do you want to listen to?", scale=7)
# audio_player = gr.Audio()
# def chat_interface(message, history):
# response, audio = fake(message, history)
# return history + [(message, response)], audio
# textbox.submit(chat_interface, [textbox, chatbot], [chatbot, audio_player])
# demo.launch()
# import random
# def random_response(message, history):
# return random.choice(["Yes", "No"])
# gr.ChatInterface(random_response).launch()
# import gradio as gr
# def yes_man(message, history):
# if message.endswith("?"):
# return "Yes"
# else:
# return "Ask me anything!"
# gr.ChatInterface(
# yes_man,
# chatbot=gr.Chatbot(placeholder="<strong>Ask me a yes or no question</strong><br>Ask me anything"),
# textbox=gr.Textbox(placeholder="Ask me a yes or no question", container=False, scale=15),
# title="Yes Man",
# description="Ask Yes Man any question",
# theme="soft",
# examples=[{"text": "Hello"}, {"text": "Am I cool?"}, {"text": "Are tomatoes vegetables?"}],
# cache_examples=True,
# retry_btn=None,
# undo_btn="Delete Previous",
# clear_btn="Clear",
# ).launch()
import gradio as gr
def count_files(message, history):
num_files = len(message["files"])
return f"You uploaded {num_files} files"
demo = gr.ChatInterface(fn=count_files, examples=[{"text": "Hello", "files": []}], title="Echo Bot", multimodal=True)
demo.launch()