the-tokenizer-v1 / special_tokens_map.json
lvwerra's picture
lvwerra HF staff
Upload tokenizer
00e4dd6 verified
raw
history blame contribute delete
900 Bytes
{
"additional_special_tokens": [
"<|endoftext|>",
"<fim_prefix>",
"<fim_middle>",
"<fim_suffix>",
"<fim_pad>",
"<repo_name>",
"<file_sep>",
"<issue_start>",
"<issue_comment>",
"<issue_closed>",
"<jupyter_start>",
"<jupyter_text>",
"<jupyter_code>",
"<jupyter_output>",
"<jupyter_script>",
"<empty_output>",
"<code_to_intermediate>",
"<intermediate_to_code>",
"<pr>",
"<pr_status>",
"<repo_name>",
"<pr_is_merged>",
"<pr_file>",
"<pr_base_code>",
"<pr_diff>",
"<pr_diff_hunk>",
"<pr_comment>",
"<pr_event_id>",
"<pr_review>",
"<pr_review_state>",
"<pr_review_comment>",
"<pr_in_reply_to_review_id>",
"<pr_in_reply_to_comment_id>",
"<pr_diff_hunk_comment_line>"
],
"bos_token": "<|endoftext|>",
"eos_token": "<|endoftext|>",
"unk_token": "<|endoftext|>"
}