Spaces:
Running
on
Zero
Running
on
Zero
improve interface
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
4 |
import json
|
5 |
-
from globe import title, description, customtool
|
6 |
import spaces
|
7 |
|
8 |
model_path = "nvidia/Nemotron-Mini-4B-Instruct"
|
@@ -77,6 +77,13 @@ with gr.Blocks() as demo:
|
|
77 |
gr.Markdown(title)
|
78 |
with gr.Row():
|
79 |
gr.Markdown(description)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
with gr.Row():
|
81 |
with gr.Column(scale=3):
|
82 |
chatbot = gr.Chatbot(height=400)
|
@@ -84,7 +91,8 @@ with gr.Blocks() as demo:
|
|
84 |
with gr.Row():
|
85 |
send = gr.Button("Send")
|
86 |
clear = gr.Button("Clear")
|
87 |
-
|
|
|
88 |
label="System Message",
|
89 |
value="You are a helpful AI assistant.",
|
90 |
lines=2,
|
|
|
2 |
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
4 |
import json
|
5 |
+
from globe import title, description, customtool , presentation1, presentation2, joinus
|
6 |
import spaces
|
7 |
|
8 |
model_path = "nvidia/Nemotron-Mini-4B-Instruct"
|
|
|
77 |
gr.Markdown(title)
|
78 |
with gr.Row():
|
79 |
gr.Markdown(description)
|
80 |
+
with gr.Row():
|
81 |
+
with gr.Group():
|
82 |
+
gr.Markdown(presentation1)
|
83 |
+
with gr.Group():
|
84 |
+
gr.Markdown(presentation2)
|
85 |
+
with gr.Row():
|
86 |
+
gr.Markdown(joinus)
|
87 |
with gr.Row():
|
88 |
with gr.Column(scale=3):
|
89 |
chatbot = gr.Chatbot(height=400)
|
|
|
91 |
with gr.Row():
|
92 |
send = gr.Button("Send")
|
93 |
clear = gr.Button("Clear")
|
94 |
+
with gr.Accordion(label="🧪Advanced Settings", open=False):
|
95 |
+
system_message = gr.Textbox(
|
96 |
label="System Message",
|
97 |
value="You are a helpful AI assistant.",
|
98 |
lines=2,
|
globe.py
CHANGED
@@ -1,26 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
1 |
title = """# 🙋🏻♂️Welcome to Tonic's 🤖 Nemotron-Mini-4B Demo 🚀"""
|
2 |
|
3 |
-
description = """Nemotron-Mini-4B-Instruct is a model for generating responses for roleplaying, retrieval augmented generation, and function calling. It is a small language model (SLM) optimized through distillation, pruning and quantization for speed and on-device deployment. It is a fine-tuned version of [nvidia/Minitron-4B-Base](https://huggingface.co/nvidia/Minitron-4B-Base), which was pruned and distilled from [Nemotron-4 15B](https://arxiv.org/abs/2402.16819) using [our LLM compression technique](https://arxiv.org/abs/2407.14679). This instruct model is optimized for roleplay, RAG QA, and function calling in English. It supports a context length of 4,096 tokens. This model is ready for commercial use.
|
|
|
4 |
|
5 |
-
Try this model on [build.nvidia.com](https://build.nvidia.com/nvidia/nemotron-mini-4b-instruct).
|
6 |
|
7 |
**Model Developer:** NVIDIA
|
8 |
|
9 |
-
**Model Dates:** Nemotron-Mini-4B-Instruct was trained between February 2024 and Aug 2024.
|
10 |
|
11 |
## License
|
12 |
|
13 |
-
[NVIDIA Community Model License](https://huggingface.co/nvidia/Nemotron-Mini-4B-Instruct/blob/main/nvidia-community-model-license-aug2024.pdf)
|
14 |
|
|
|
15 |
## Model Architecture
|
16 |
|
17 |
Nemotron-Mini-4B-Instruct uses a model embedding size of 3072, 32 attention heads, and an MLP intermediate dimension of 9216. It also uses Grouped-Query Attention (GQA) and Rotary Position Embeddings (RoPE).
|
18 |
|
19 |
**Architecture Type:** Transformer Decoder (auto-regressive language model)
|
20 |
|
21 |
-
**Network Architecture:** Nemotron-4
|
22 |
-
|
23 |
-
"""
|
24 |
|
25 |
customtool = """{
|
26 |
"name": "custom_tool",
|
|
|
1 |
+
joinus = """
|
2 |
+
## Join us :
|
3 |
+
🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Build Tonic](https://git.tonic-ai.com/contribute)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
|
4 |
+
"""
|
5 |
+
|
6 |
title = """# 🙋🏻♂️Welcome to Tonic's 🤖 Nemotron-Mini-4B Demo 🚀"""
|
7 |
|
8 |
+
description = """🤖Nemotron-Mini-4B-Instruct is a model for generating responses for roleplaying, retrieval augmented generation, and function calling. It is a small language model (SLM) optimized through distillation, pruning and quantization for speed and on-device deployment. It is a fine-tuned version of [nvidia/Minitron-4B-Base](https://huggingface.co/nvidia/Minitron-4B-Base), which was pruned and distilled from [Nemotron-4 15B](https://arxiv.org/abs/2402.16819) using [our LLM compression technique](https://arxiv.org/abs/2407.14679). This instruct model is optimized for roleplay, RAG QA, and function calling in English. It supports a context length of 4,096 tokens. This model is ready for commercial use.
|
9 |
+
"""
|
10 |
|
11 |
+
presentation1 = """Try this model on [build.nvidia.com](https://build.nvidia.com/nvidia/nemotron-mini-4b-instruct).
|
12 |
|
13 |
**Model Developer:** NVIDIA
|
14 |
|
15 |
+
**Model Dates:** 🤖Nemotron-Mini-4B-Instruct was trained between February 2024 and Aug 2024.
|
16 |
|
17 |
## License
|
18 |
|
19 |
+
[NVIDIA Community Model License](https://huggingface.co/nvidia/Nemotron-Mini-4B-Instruct/blob/main/nvidia-community-model-license-aug2024.pdf)"""
|
20 |
|
21 |
+
presentation2 = """
|
22 |
## Model Architecture
|
23 |
|
24 |
Nemotron-Mini-4B-Instruct uses a model embedding size of 3072, 32 attention heads, and an MLP intermediate dimension of 9216. It also uses Grouped-Query Attention (GQA) and Rotary Position Embeddings (RoPE).
|
25 |
|
26 |
**Architecture Type:** Transformer Decoder (auto-regressive language model)
|
27 |
|
28 |
+
**Network Architecture:** Nemotron-4 """
|
|
|
|
|
29 |
|
30 |
customtool = """{
|
31 |
"name": "custom_tool",
|