Jofthomas HF staff commited on
Commit
08fc296
1 Parent(s): c265f4c

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +13 -0
  2. app.py +81 -0
  3. requirements.txt +1 -0
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Chat Template Viewer
3
+ emoji: 💬
4
+ colorFrom: gray
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 4.5.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer
3
+ import json
4
+ import os
5
+ from huggingface_hub import login
6
+
7
+ HUGGINGFACEHUB_API_TOKEN = os.environ.get("HF_TOKEN")
8
+
9
+ demo_conversation = """[
10
+ {"role": "system", "content": "You are a helpful chatbot."},
11
+ {"role": "user", "content": "Hi there!"},
12
+ {"role": "assistant", "content": "Hello, human!"},
13
+ {"role": "user", "content": "Can I ask a question?"}
14
+ ]"""
15
+
16
+ description_text = """# Chat Template Viewer
17
+ ### This space is a helper to learn more about [Chat Templates](https://huggingface.co/docs/transformers/main/en/chat_templating).
18
+ """
19
+
20
+ default_tools = [{"type": "function", "function": {"name":"get_current_weather", "description": "Get▁the▁current▁weather", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "The city and state, e.g. San Francisco, CA"}, "format": {"type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location."}},"required":["location","format"]}}}]
21
+
22
+ # render the tool use prompt as a string:
23
+ def get_template_names(model_name):
24
+ try:
25
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
26
+ if isinstance(tokenizer.chat_template, dict):
27
+ return list(tokenizer.chat_template.keys())
28
+ else:
29
+ return []
30
+ except Exception as e:
31
+ return "None"
32
+
33
+ def update_template_dropdown(model_name):
34
+ template_names = get_template_names(model_name)
35
+ if template_names:
36
+ return gr.update(choices=template_names, value=None)
37
+
38
+ def apply_chat_template(model_name, test_conversation, add_generation_prompt, cleanup_whitespace, template_name, hf_token, kwargs):
39
+ try:
40
+ login(token=hf_token)
41
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
42
+ except:
43
+ return f"model {model_name} could not be loaded or invalid HF token"
44
+ try:
45
+ outputs = []
46
+ conversation = json.loads(test_conversation)
47
+
48
+ template = tokenizer.chat_template.get(template_name) if template_name else None
49
+ print(kwargs)
50
+ formatted = tokenizer.apply_chat_template(conversation, chat_template=template, tokenize=False, add_generation_prompt=add_generation_prompt, tools=default_tools)
51
+ return formatted
52
+ except Exception as e:
53
+ return str(e)
54
+
55
+ with gr.Blocks() as demo:
56
+ model_name_input = gr.Textbox(label="Model Name", placeholder="Enter model name")
57
+ template_dropdown = gr.Dropdown(label="Template Name", choices=[], interactive=True)
58
+ conversation_input = gr.TextArea(value=demo_conversation, lines=6, label="Conversation")
59
+ add_generation_prompt_checkbox = gr.Checkbox(value=False, label="Add generation prompt")
60
+ cleanup_whitespace_checkbox = gr.Checkbox(value=True, label="Cleanup template whitespace")
61
+ hf_token_input = gr.Textbox(label="Hugging Face Token (optional)", placeholder="Enter your HF token")
62
+ kwargs_input = gr.JSON(label="Additional kwargs", value=default_tools, render=False)
63
+ output = gr.TextArea(label="Formatted conversation")
64
+
65
+ model_name_input.change(fn=update_template_dropdown, inputs=model_name_input, outputs=template_dropdown)
66
+ gr.Interface(
67
+ description=description_text,
68
+ fn=apply_chat_template,
69
+ inputs=[
70
+ model_name_input,
71
+ conversation_input,
72
+ add_generation_prompt_checkbox,
73
+ cleanup_whitespace_checkbox,
74
+ template_dropdown,
75
+ hf_token_input,
76
+ kwargs_input
77
+ ],
78
+ outputs=output
79
+ )
80
+
81
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ transformers