File size: 3,780 Bytes
5d717ac d3dee17 780b546 d3dee17 c4b979c d3dee17 c4b979c cdc0a26 c4b979c cdc0a26 c4b979c cdc0a26 c4b979c 780b546 73eae6d 780b546 c4b979c 73eae6d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
# import gradio as gr
# from huggingface_hub import InferenceClient
# """
# For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
# """
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# def respond(
# message,
# history: list[tuple[str, str]],
# system_message,
# max_tokens,
# temperature,
# top_p,
# ):
# messages = [{"role": "system", "content": system_message}]
# for val in history:
# if val[0]:
# messages.append({"role": "user", "content": val[0]})
# if val[1]:
# messages.append({"role": "assistant", "content": val[1]})
# messages.append({"role": "user", "content": message})
# response = ""
# for message in client.chat_completion(
# messages,
# max_tokens=max_tokens,
# stream=True,
# temperature=temperature,
# top_p=top_p,
# ):
# token = message.choices[0].delta.content
# response += token
# yield response
# """
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
# """
# demo = gr.ChatInterface(
# respond,
# additional_inputs=[
# gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
# gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
# gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
# gr.Slider(
# minimum=0.1,
# maximum=1.0,
# value=0.95,
# step=0.05,
# label="Top-p (nucleus sampling)",
# ),
# ],
# )
# if __name__ == "__main__":
# demo.launch()
# import gradio as gr
# def fake(message, history):
# if message.strip():
# # Instead of returning audio directly, return a message
# return "Playing sample audio...", gr.Audio("https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav")
# else:
# return "Please provide the name of an artist", None
# with gr.Blocks() as demo:
# chatbot = gr.Chatbot(placeholder="Play music by any artist!")
# textbox = gr.Textbox(placeholder="Which artist's music do you want to listen to?", scale=7)
# audio_player = gr.Audio()
# def chat_interface(message, history):
# response, audio = fake(message, history)
# return history + [(message, response)], audio
# textbox.submit(chat_interface, [textbox, chatbot], [chatbot, audio_player])
# demo.launch()
# import random
# def random_response(message, history):
# return random.choice(["Yes", "No"])
# gr.ChatInterface(random_response).launch()
# import gradio as gr
# def yes_man(message, history):
# if message.endswith("?"):
# return "Yes"
# else:
# return "Ask me anything!"
# gr.ChatInterface(
# yes_man,
# chatbot=gr.Chatbot(placeholder="<strong>Ask me a yes or no question</strong><br>Ask me anything"),
# textbox=gr.Textbox(placeholder="Ask me a yes or no question", container=False, scale=15),
# title="Yes Man",
# description="Ask Yes Man any question",
# theme="soft",
# examples=[{"text": "Hello"}, {"text": "Am I cool?"}, {"text": "Are tomatoes vegetables?"}],
# cache_examples=True,
# retry_btn=None,
# undo_btn="Delete Previous",
# clear_btn="Clear",
# ).launch()
import gradio as gr
def count_files(message, history):
num_files = len(message["files"])
return f"You uploaded {num_files} files"
demo = gr.ChatInterface(fn=count_files, examples=[{"text": "Hello", "files": []}], title="Echo Bot", multimodal=True)
demo.launch() |