Tonic commited on
Commit
a13c01c
1 Parent(s): 38fedf1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -15
app.py CHANGED
@@ -4,6 +4,12 @@ import transformers
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
5
  import torch
6
 
 
 
 
 
 
 
7
  bnb_config = BitsAndBytesConfig(load_in_8bit=True)
8
 
9
  model_id = "CohereForAI/c4ai-command-r-v01"
@@ -28,18 +34,33 @@ def generate_response(user_input, max_new_tokens, temperature):
28
  gen_text = tokenizer.decode(gen_tokens[0])
29
  return gen_text
30
 
31
- # Define the Gradio interface
32
- iface = gr.Interface(
33
- fn=generate_response,
34
- inputs=[
35
- gr.inputs.Textbox(lines=2, label="Your Message"),
36
- gr.inputs.Slider(minimum=10, maximum=100, default=50, label="Max New Tokens"),
37
- gr.inputs.Slider(minimum=0.1, maximum=1.0, step=0.1, default=0.3, label="Temperature")
38
- ],
39
- outputs=gr.outputs.Textbox(label="Model Response"),
40
- title="Text Generation Model Interface",
41
- description="This is a Gradio interface for a text generation model. Enter your message and adjust the parameters to generate a response."
42
- )
43
-
44
- # Launch the application
45
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
5
  import torch
6
 
7
+ title = """
8
+ # Welcome to 🌟Tonic's🫡Command-R
9
+ 🫡Command-R is a Large Language Model optimized for conversational interaction and long context tasks. It targets the “scalable” category of models that balance high performance with strong accuracy, enabling companies to move beyond proof of concept, and into production. 🫡Command-R boasts high precision on retrieval augmented generation (RAG) and tool use tasks, low latency and high throughput, a long 128k context, and strong capabilities across 10 key languages. You can build with this endpoint using✨StarCoder available here : [bigcode/starcoder2-15b](https://huggingface.co/bigcode/starcoder2-15b). You can also use 🫡Command-R by cloning this space. Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic/Command-R?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
10
+ Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) Math 🔍 [introspector](https://huggingface.co/introspector) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Torchon](https://github.com/Tonic-AI/Torchon)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
11
+ """
12
+
13
  bnb_config = BitsAndBytesConfig(load_in_8bit=True)
14
 
15
  model_id = "CohereForAI/c4ai-command-r-v01"
 
34
  gen_text = tokenizer.decode(gen_tokens[0])
35
  return gen_text
36
 
37
+ def setup_examples():
38
+ examples = [
39
+ {"message": "What is the weather like today?", "max_new_tokens": 30, "temperature": 0.5},
40
+ {"message": "Tell me a joke.", "max_new_tokens": 50, "temperature": 0.7},
41
+ {"message": "Explain the concept of machine learning.", "max_new_tokens": 100, "temperature": 0.3}
42
+ ]
43
+ for example in examples:
44
+ message_box.update(example["message"])
45
+ max_new_tokens_slider.update(example["max_new_tokens"])
46
+ temperature_slider.update(example["temperature"])
47
+ generate_button.click()
48
+
49
+ with gr.Blocks() as demo:
50
+ gr.Markdown(title)
51
+ with gr.Row():
52
+ message_box = gr.Textbox(lines=2, label="Your Message")
53
+ max_new_tokens_slider = gr.Slider(minimum=10, maximum=100, default=50, label="Max New Tokens")
54
+ temperature_slider = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, default=0.3, label="Temperature")
55
+ generate_button = gr.Button("Generate")
56
+ output_box = gr.Textbox(label="Model Response")
57
+
58
+ generate_button.click(
59
+ fn=generate_response,
60
+ inputs=[message_box, max_new_tokens_slider, temperature_slider],
61
+ outputs=output_box
62
+ )
63
+ setup_examples_button = gr.Button("Load Example")
64
+ setup_examples_button.click(fn=setup_examples, inputs=[], outputs=[])
65
+
66
+ demo.launch()