repo_id
stringlengths 15
89
| file_path
stringlengths 27
180
| content
stringlengths 1
2.23M
| __index_level_0__
int64 0
0
|
---|---|---|---|
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs | use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
use unicode_categories::UnicodeCategories;
fn is_punc(x: char) -> bool {
char::is_ascii_punctuation(&x) || x.is_punctuation()
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Punctuation {
#[serde(default = "default_split")]
behavior: SplitDelimiterBehavior,
}
fn default_split() -> SplitDelimiterBehavior {
SplitDelimiterBehavior::Isolated
}
impl Punctuation {
pub fn new(behavior: SplitDelimiterBehavior) -> Self {
Self { behavior }
}
}
impl Default for Punctuation {
fn default() -> Self {
Self::new(SplitDelimiterBehavior::Isolated)
}
}
impl PreTokenizer for Punctuation {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, s| s.split(is_punc, self.behavior))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn punctuation_basic() {
let pretok = Punctuation::default();
let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey friend", (0, 10)),
("!", (10, 11)),
(" How are you", (11, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
);
}
#[test]
fn deserialization() {
let punctuation: Punctuation = serde_json::from_str(r#"{"type": "Punctuation"}"#).unwrap();
assert_eq!(punctuation, Punctuation::default());
assert_eq!(
punctuation,
Punctuation::new(SplitDelimiterBehavior::Isolated)
);
}
#[test]
#[should_panic]
fn deserialization_erroneous() {
let _punctuation: Punctuation =
serde_json::from_str(r#"{"type": "WhitespaceSplit"}"#).unwrap();
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/metaspace.rs | use crate::tokenizer::{Decoder, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use serde::{Deserialize, Deserializer, Serialize};
/// Enum representing options for the metaspace prepending scheme.
#[derive(Debug, Clone, PartialEq, Serialize, Eq, Deserialize, Copy)]
#[serde(rename_all = "snake_case")]
pub enum PrependScheme {
/// Specifies that the scheme should be prepended only once, on the first split.
First,
/// Specifies that the space should not be prepended.
Never,
/// Specifies that the scheme should always be prepended.
Always,
}
#[derive(Debug, Clone, PartialEq, Serialize, Eq)]
/// Replaces all the whitespaces by the provided meta character and then
/// splits on this character
#[serde(tag = "type")]
pub struct Metaspace {
replacement: char,
pub add_prefix_space: bool,
pub prepend_scheme: PrependScheme,
#[serde(skip)]
str_rep: String,
}
impl<'de> Deserialize<'de> for Metaspace {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
enum Type {
Metaspace,
}
fn default_prepend_scheme_value() -> PrependScheme {
PrependScheme::Always
}
#[derive(Deserialize)]
pub struct MetaspaceHelper {
#[serde(rename = "type")]
_type: Type,
replacement: char,
pub add_prefix_space: bool,
#[serde(default = "default_prepend_scheme_value")]
pub prepend_scheme: PrependScheme,
#[serde(skip, rename = "str_rep")]
_str_rep: String,
}
let helper = MetaspaceHelper::deserialize(deserializer)?;
let instance = Self::new_with_prepend_scheme(
helper.replacement,
helper.add_prefix_space,
helper.prepend_scheme,
);
Ok(instance)
}
}
impl Metaspace {
pub fn new(replacement: char, add_prefix_space: bool) -> Self {
Self::new_with_prepend_scheme(
replacement,
add_prefix_space,
PrependScheme::Always, // always prepend for legacy purpose
)
}
pub fn new_with_prepend_scheme(
replacement: char,
add_prefix_space: bool,
prepend_scheme: PrependScheme,
) -> Self {
Self {
replacement,
str_rep: replacement.to_string(),
add_prefix_space,
prepend_scheme,
}
}
pub fn get_replacement(&self) -> char {
self.replacement
}
pub fn set_replacement(&mut self, replacement: char) {
self.replacement = replacement;
self.str_rep = replacement.to_string();
}
pub fn get_prepend_scheme(&self) -> PrependScheme {
self.prepend_scheme
}
pub fn set_prepend_scheme(&mut self, scheme: PrependScheme) {
self.prepend_scheme = scheme;
}
}
impl Default for Metaspace {
fn default() -> Self {
Self::new('▁', true)
}
}
impl PreTokenizer for Metaspace {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
let mut first_split = true;
pretokenized.split(|_, mut normalized| {
normalized.replace(' ', &self.str_rep)?;
if self.add_prefix_space && !normalized.get().starts_with(self.replacement) {
if self.prepend_scheme == PrependScheme::Always {
normalized.prepend(&self.str_rep);
} else if self.prepend_scheme == PrependScheme::First && first_split {
normalized.prepend(&self.str_rep);
first_split = false;
}
} else {
first_split = false;
}
normalized.split(self.replacement, SplitDelimiterBehavior::MergedWithNext)
})
}
}
impl Decoder for Metaspace {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
Ok(tokens
.iter()
.enumerate()
.map(|(i, token)| {
token
.chars()
.flat_map(|c| {
if c == self.replacement {
if i == 0 && self.add_prefix_space {
None
} else {
Some(' ')
}
} else {
Some(c)
}
})
.collect::<String>()
})
.collect())
}
}
#[cfg(test)]
mod tests {
use regex::Regex;
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn serialization() {
let metaspace = Metaspace::new('_', true);
let metaspace_s = r#"{"type":"Metaspace","replacement":"_","add_prefix_space":true,"prepend_scheme":"always"}"#;
assert_eq!(serde_json::to_string(&metaspace).unwrap(), metaspace_s);
assert_eq!(
serde_json::from_str::<Metaspace>(metaspace_s).unwrap(),
metaspace
);
// Also check it can deserialize previous versions
let metaspace = Metaspace::new('_', true);
let metaspace_s = r#"{"type":"Metaspace","str_rep":"_","replacement":"_","add_prefix_space":true,"prepend_scheme":"always"}"#;
assert_eq!(
serde_json::from_str::<Metaspace>(metaspace_s).unwrap(),
metaspace
);
let metaspace_parsed: Metaspace = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"_","add_prefix_space":true}"#,
)
.unwrap();
assert_eq!(metaspace_parsed, metaspace);
}
#[test]
fn basic() {
let pretok = Metaspace::new('▁', true);
let mut pretokenized = PreTokenizedString::from("Hey friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("▁Hey", (0, 6)), ("▁friend!", (6, 16))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("▁Hey", (0, 3)), ("▁friend!", (3, 11))]
);
}
#[test]
fn multiple_spaces() {
let pretok = Metaspace::new('▁', true);
let mut pretokenized = PreTokenizedString::from("Hey friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁", (6, 9)),
("▁", (9, 12)),
("▁friend!", (12, 22)),
]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 3)),
("▁", (3, 4)),
("▁", (4, 5)),
("▁friend!", (5, 13)),
]
);
}
#[test]
fn non_legacy_meta_space() {
assert_eq!(
Metaspace::new('▁', true),
Metaspace::new_with_prepend_scheme('▁', true, PrependScheme::Always)
);
let mut pretok = Metaspace::new('▁', true);
pretok.set_prepend_scheme(PrependScheme::Always);
assert_eq!(
pretok,
Metaspace::new_with_prepend_scheme('▁', true, PrependScheme::Always)
);
pretok.set_prepend_scheme(PrependScheme::Never);
assert_eq!(
pretok,
Metaspace::new_with_prepend_scheme('▁', true, PrependScheme::Never)
);
pretok.set_prepend_scheme(PrependScheme::First);
assert_eq!(
pretok,
Metaspace::new_with_prepend_scheme('▁', true, PrependScheme::First)
);
let mut pretokenized = PreTokenizedString::from("Hey my friend <s>how▁are you");
let re_ref = Regex::new(r"(<s>)").unwrap();
pretokenized
.split(|_, sequence| sequence.split(&re_ref, SplitDelimiterBehavior::Isolated))
.expect("Bad split");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁my", (6, 11)),
("▁friend", (11, 20)),
("▁", (20, 23)),
("<s>", (23, 26)),
("how", (26, 29)),
("▁are", (29, 35)),
("▁you", (35, 41))
]
);
pretok.set_prepend_scheme(PrependScheme::Always);
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁my", (6, 11)),
("▁friend", (11, 20)),
("▁", (20, 23)),
("▁<s>", (23, 29)),
("▁how", (29, 35)),
("▁are", (35, 41)),
("▁you", (41, 47))
]
);
pretok.set_prepend_scheme(PrependScheme::First);
let mut pretokenized = PreTokenizedString::from(" Hey <s>how"); // test with prefix
pretokenized
.split(|_, sequence| sequence.split(&re_ref, SplitDelimiterBehavior::Isolated))
.expect("Bad split");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁", (6, 9)),
("<s>", (9, 12)),
("how", (12, 15))
]
);
let mut pretokenized = PreTokenizedString::from(" Hey <s>how <s>are <s> you"); // test with many splits
pretokenized
.split(|_, sequence| sequence.split(&re_ref, SplitDelimiterBehavior::Isolated))
.expect("Bad split");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("▁Hey", (0, 6)),
("▁", (6, 9)),
("<s>", (9, 12)),
("how", (12, 15)),
("▁", (15, 18)),
("<s>", (18, 21)),
("are", (21, 24)),
("▁", (24, 27)),
("<s>", (27, 30)),
("▁you", (30, 36))
]
);
}
#[test]
fn decode() {
let decoder = Metaspace::new('▁', true);
let res = decoder
.decode_chain(vec!["▁Hey".into(), "▁friend!".into()])
.unwrap();
assert_eq!(res, vec!["Hey", " friend!"])
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/byte_level.rs | use std::collections::{HashMap, HashSet};
use crate::utils::SysRegex;
use serde::{Deserialize, Serialize};
use crate::tokenizer::{
Decoder, Encoding, PostProcessor, PreTokenizedString, PreTokenizer, Result,
SplitDelimiterBehavior,
};
use crate::utils::macro_rules_attribute;
fn bytes_char() -> HashMap<u8, char> {
let mut bs: Vec<u8> = vec![];
bs.extend(b'!'..=b'~');
bs.extend(b'\xA1'..=b'\xAC');
bs.extend(b'\xAE'..=b'\xFF');
let mut cs: Vec<u32> = bs.iter().map(|i| *i as u32).collect();
let mut n = 0;
for b in 0..=255u8 {
if !bs.contains(&b) {
bs.push(b);
cs.push(u32::pow(2, 8) + n);
n += 1;
}
}
bs.into_iter()
.zip(cs)
.map(|(f, t)| (f, unsafe { std::char::from_u32_unchecked(t) }))
.collect()
}
lazy_static! {
static ref RE: SysRegex = SysRegex::new(
r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"
)
.unwrap();
static ref BYTES_CHAR: HashMap<u8, char> = bytes_char();
static ref CHAR_BYTES: HashMap<char, u8> =
bytes_char().into_iter().map(|(c, b)| (b, c)).collect();
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
/// Provides all the necessary steps to handle the BPE tokenization at the byte-level. Takes care
/// of all the required processing steps to transform a UTF-8 string as needed before and after the
/// BPE model does its job.
#[macro_rules_attribute(impl_serde_type!)]
#[non_exhaustive]
pub struct ByteLevel {
/// Whether to add a leading space to the first word. This allows to treat the leading word
/// just as any other word.
pub add_prefix_space: bool,
/// Whether the post processing step should trim offsets to avoid including whitespaces.
pub trim_offsets: bool,
/// Whether to use the standard GPT2 regex for whitespace splitting
/// Set it to False if you want to use your own splitting.
#[serde(default = "default_true")]
pub use_regex: bool,
}
fn default_true() -> bool {
true
}
impl Default for ByteLevel {
fn default() -> Self {
Self {
add_prefix_space: true,
trim_offsets: true,
use_regex: true,
}
}
}
impl ByteLevel {
pub fn new(add_prefix_space: bool, trim_offsets: bool, use_regex: bool) -> Self {
Self {
add_prefix_space,
trim_offsets,
use_regex,
}
}
pub fn alphabet() -> HashSet<char> {
BYTES_CHAR.values().copied().collect()
}
#[must_use]
pub fn add_prefix_space(mut self, v: bool) -> Self {
self.add_prefix_space = v;
self
}
#[must_use]
pub fn trim_offsets(mut self, v: bool) -> Self {
self.trim_offsets = v;
self
}
#[must_use]
pub fn use_regex(mut self, v: bool) -> Self {
self.use_regex = v;
self
}
}
/// As a `PreTokenizer`, `ByteLevel` is in charge of transforming all the unicode characters into
/// their byte-level counterpart. It also splits the input according to the configured regex.
// TODO: Give the ability to modify this regex
impl PreTokenizer for ByteLevel {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
let re_ref: &SysRegex = &RE;
pretokenized.split(|_, mut normalized| {
if self.add_prefix_space && !normalized.get().starts_with(' ') {
normalized.prepend(" ");
}
if self.use_regex {
normalized.split(re_ref, SplitDelimiterBehavior::Isolated)
} else {
Ok(vec![normalized])
}
})?;
pretokenized.normalize(|normalized| {
let s = normalized.get();
let mut transformations: Vec<(char, isize)> = Vec::with_capacity(s.len());
let mut i = 0;
for cur_char in s.chars() {
let size = cur_char.len_utf8();
let bytes = s[i..i + size].as_bytes();
i += size;
transformations.extend(
bytes
.iter()
.enumerate()
.map(|(i, b)| (BYTES_CHAR[b], isize::from(i > 0))),
);
}
normalized.transform(transformations, 0);
Ok(())
})
}
}
/// As a `Decoder`, `ByteLevel` is in charge of converting any byte-level characters to their
/// unicode counterpart, before merging everything back into a single String.
/// This decoder will consume the tokens and merge them in one step to alleviate
/// the fact that single token decoded might be a byte not representable as
/// as String.
impl Decoder for ByteLevel {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
let toks = tokens
.into_iter()
.flat_map(|t| {
t.chars()
.try_fold(vec![], |mut acc, c| {
CHAR_BYTES.get(&c).map(|b| {
acc.push(*b);
acc
})
})
.unwrap_or_else(|| t.as_bytes().to_vec())
})
.collect::<Vec<u8>>();
Ok(vec![String::from_utf8_lossy(&toks).to_string()])
}
}
/// As a `PostProcessor`, `ByteLevel` is in charge of trimming the offsets if necessary.
impl PostProcessor for ByteLevel {
fn added_tokens(&self, _is_pair: bool) -> usize {
0
}
fn process_encodings(
&self,
mut encodings: Vec<Encoding>,
_add_special_tokens: bool,
) -> Result<Vec<Encoding>> {
if self.trim_offsets {
for encoding in encodings.iter_mut() {
process_offsets(encoding, self.add_prefix_space);
encoding
.get_overflowing_mut()
.iter_mut()
.for_each(|encoding| process_offsets(encoding, self.add_prefix_space));
}
}
for (i, encoding) in encodings.iter_mut().enumerate() {
encoding.set_sequence_id(i);
}
Ok(encodings)
//<dyn PostProcessor>::default_process(encodings, add_special_tokens)
}
}
pub fn process_offsets(encoding: &mut Encoding, add_prefix_space: bool) {
encoding.process_tokens_with_offsets_mut(|(i, (token, offsets))| {
let mut leading_spaces = token
.chars()
.take_while(|c| *c == BYTES_CHAR[&b' '] || c.is_whitespace())
.count();
let trailing_spaces = token
.chars()
.rev()
.take_while(|c| *c == BYTES_CHAR[&b' '] || c.is_whitespace())
.count();
if leading_spaces > 0 || trailing_spaces > 0 {
if leading_spaces > 0 {
// If user uses `is_pretokenized=True` we might have
// offsets that might begin at the start of the string but are
// NOT the first token.
let is_first = i == 0 || offsets.0 == 0;
if is_first && add_prefix_space && leading_spaces == 1 {
// If we are processing the first pair of offsets, with `add_prefix_space`,
// then we shouldn't remove anything we added. If there are more than one
// leading spaces though, it means we didn't add them, and they should be
// removed.
leading_spaces = 0;
}
offsets.0 = std::cmp::min(offsets.0 + leading_spaces, offsets.1);
}
if trailing_spaces > 0 && offsets.1 >= trailing_spaces {
offsets.1 = std::cmp::max(offsets.1 - trailing_spaces, offsets.0);
}
}
});
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tokenizer::{
Decoder, Encoding, OffsetReferential, OffsetType, PostProcessor, PreTokenizedString,
PreTokenizer,
};
use std::iter::FromIterator;
#[test]
fn pre_tokenization() {
let bytelevel = ByteLevel::default().add_prefix_space(false);
let mut pretokenized: PreTokenizedString = "Hello my friend, how is your day going?".into();
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ġmy", (5, 8)),
("Ġfriend", (8, 15)),
(",", (15, 16)),
("Ġhow", (16, 20)),
("Ġis", (20, 23)),
("Ġyour", (23, 28)),
("Ġday", (28, 32)),
("Ġgoing", (32, 38)),
("?", (38, 39))
]
);
}
#[test]
fn pre_tokenization_no_regex() {
let bytelevel = ByteLevel::default().use_regex(false);
let mut pretokenized: PreTokenizedString = "Hello my friend, how is your day going?".into();
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("ĠHelloĠmyĠfriend,ĠhowĠisĠyourĠdayĠgoing?", (0, 39))]
);
}
#[test]
fn decoding() {
let bytelevel = ByteLevel::default().add_prefix_space(false);
assert_eq!(
bytelevel
.decode_chain(
vec![
"Hello", "Ġmy", "Ġfriend", ",", "Ġhow", "Ġis", "Ġyour", "Ġday", "Ġgoing",
"?"
]
.into_iter()
.map(|s| s.into())
.collect::<Vec<String>>()
)
.unwrap(),
vec!["Hello my friend, how is your day going?"]
);
}
#[test]
fn add_prefix_space() {
let bytelevel = ByteLevel::default().add_prefix_space(true);
for s in &[
" Hello my friend, how is your day going?",
"Hello my friend, how is your day going?",
] {
let mut pretokenized = PreTokenizedString::from(*s);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("ĠHello", (0, 7)),
("Ġmy", (7, 11)),
("Ġfriend", (11, 19)),
(",", (19, 20)),
("Ġhow", (20, 25)),
("Ġis", (25, 29)),
("Ġyour", (29, 35)),
("Ġday", (35, 40)),
("Ġgoing", (40, 47)),
("?", (47, 48))
]
);
}
}
#[test]
fn decode_works_on_separated_tokens() {
let samples = vec![
"A Nuskhuri abbreviation of იესუ ქრისტე ( iesu kriste ) \" Jesus Christ \"",
"An equal number have descenders , like p or q in English \
: გ , დ , ე , ვ , კ , ლ , ჟ , ტ , უ , ფ , ღ , ყ , ც",
];
let bytelevel = ByteLevel::default().add_prefix_space(false);
for sample in samples {
let mut pretokenized = PreTokenizedString::from(sample);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
let separated_tokens = pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.iter()
.flat_map(|(s, _, _)| s.split("").map(|t| t.into()))
.collect::<Vec<_>>();
assert_eq!(
sample,
bytelevel.decode_chain(separated_tokens).unwrap().join("")
);
}
}
#[test]
fn handling_of_newlines() {
let mut pretokenized = PreTokenizedString::from("Hello there\nHello there");
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ġthere", (5, 11)),
("Ċ", (11, 12)),
("Hello", (12, 17)),
("Ġthere", (17, 23))
]
);
}
#[test]
fn handling_of_multiple_whitespaces() {
let mut pretokenized = PreTokenizedString::from("Hello there dear");
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ġthere", (5, 11)),
("ĠĠĠĠĠĠ", (11, 17)),
("Ġdear", (17, 22))
]
);
}
#[test]
fn offsets_when_char_split_up() {
let input = "i⭢j";
let mut pretokenized = PreTokenizedString::from(input);
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("i", (0, 1)), ("âŃ¢", (1, 4)), ("j", (4, 5))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("i", (0, 1)), ("âŃ¢", (1, 7)), ("j", (7, 8))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(_, o, _)| &input[o.0..o.1])
.collect::<Vec<_>>(),
vec!["i", "⭢", "j"]
);
}
#[test]
fn processor_trims_offsets_pre_tokenized() {
// If user uses `is_pretokenized=True` we might have
// offsets that might begin at the start of the string but are
// NOT the first token.
let mut encoding = Encoding::new(
vec![0; 5],
vec![],
vec!["Ġl".into(), "ove".into(), "Ġl".into(), "ove".into()],
vec![],
vec![(0, 1), (1, 4), (0, 1), (1, 4)],
vec![],
vec![],
vec![],
HashMap::new(),
);
process_offsets(&mut encoding, true);
assert_eq!(
encoding,
Encoding::new(
vec![0; 5],
vec![],
vec!["Ġl".into(), "ove".into(), "Ġl".into(), "ove".into()],
vec![],
vec![(0, 1), (1, 4), (0, 1), (1, 4)],
vec![],
vec![],
vec![],
HashMap::new(),
)
);
}
#[test]
fn processor_trims_offsets() {
let start = Encoding::new(
vec![0; 5],
vec![],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![(0, 1), (0, 11), (11, 18), (18, 25), (25, 29)],
vec![],
vec![],
vec![],
HashMap::new(),
);
let expected = Encoding::new(
vec![0; 5],
vec![0; 5],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![(0, 0), (4, 9), (13, 18), (18, 23), (29, 29)],
vec![],
vec![],
vec![],
HashMap::from_iter(vec![(0, 0..5)]),
);
let bytelevel = ByteLevel::default().trim_offsets(true);
assert_eq!(
expected,
bytelevel.process(start.clone(), None, false).unwrap()
);
let pair_expected = Encoding::new(
vec![0; 10],
vec![0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
vec![
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
"Ġ".into(),
"ĠĠĠĠHelloĠĠ".into(),
"ĠĠHello".into(),
"HelloĠĠ".into(),
"ĠĠĠĠ".into(),
],
vec![],
vec![
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
],
vec![],
vec![],
vec![],
HashMap::from_iter(vec![(0, 0..5), (1, 5..10)]),
);
assert_eq!(
pair_expected,
bytelevel
.process(start.clone(), Some(start), false)
.unwrap()
);
}
#[test]
fn decode_unknown_characters() {
let byte_level = ByteLevel::default();
assert_eq!(
byte_level
.decode_chain(vec![
"Hello".into(),
"Ġthere".into(),
"Ġdear".into(),
"Ġfriend!".into(),
"Ġ".into(),
"[PA D]".into()
])
.unwrap(),
vec!["Hello there dear friend! [PA D]"]
);
}
#[test]
fn deserialization() {
// Before use_regex
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false}"#,
)
.unwrap();
assert!(byte_level.use_regex);
// Loading works, new future BC test.
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false, "use_regex": true}"#,
)
.unwrap();
assert!(byte_level.use_regex);
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false, "use_regex": false}"#,
)
.unwrap();
assert!(!byte_level.use_regex);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/mod.rs | pub mod bert;
pub mod byte_level;
pub mod delimiter;
pub mod digits;
pub mod metaspace;
pub mod punctuation;
pub mod sequence;
pub mod split;
pub mod unicode_scripts;
pub mod whitespace;
use serde::{Deserialize, Serialize};
use crate::pre_tokenizers::bert::BertPreTokenizer;
use crate::pre_tokenizers::byte_level::ByteLevel;
use crate::pre_tokenizers::delimiter::CharDelimiterSplit;
use crate::pre_tokenizers::digits::Digits;
use crate::pre_tokenizers::metaspace::Metaspace;
use crate::pre_tokenizers::punctuation::Punctuation;
use crate::pre_tokenizers::sequence::Sequence;
use crate::pre_tokenizers::split::Split;
use crate::pre_tokenizers::unicode_scripts::UnicodeScripts;
use crate::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit};
use crate::{PreTokenizedString, PreTokenizer};
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
#[serde(untagged)]
pub enum PreTokenizerWrapper {
BertPreTokenizer(BertPreTokenizer),
ByteLevel(ByteLevel),
Delimiter(CharDelimiterSplit),
Metaspace(Metaspace),
Whitespace(Whitespace),
Sequence(Sequence),
Split(Split),
Punctuation(Punctuation),
WhitespaceSplit(WhitespaceSplit),
Digits(Digits),
UnicodeScripts(UnicodeScripts),
}
impl PreTokenizer for PreTokenizerWrapper {
fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> crate::Result<()> {
match self {
Self::BertPreTokenizer(bpt) => bpt.pre_tokenize(normalized),
Self::ByteLevel(bpt) => bpt.pre_tokenize(normalized),
Self::Delimiter(dpt) => dpt.pre_tokenize(normalized),
Self::Metaspace(mspt) => mspt.pre_tokenize(normalized),
Self::Whitespace(wspt) => wspt.pre_tokenize(normalized),
Self::Punctuation(tok) => tok.pre_tokenize(normalized),
Self::Sequence(tok) => tok.pre_tokenize(normalized),
Self::Split(tok) => tok.pre_tokenize(normalized),
Self::WhitespaceSplit(wspt) => wspt.pre_tokenize(normalized),
Self::Digits(wspt) => wspt.pre_tokenize(normalized),
Self::UnicodeScripts(us) => us.pre_tokenize(normalized),
}
}
}
impl_enum_from!(BertPreTokenizer, PreTokenizerWrapper, BertPreTokenizer);
impl_enum_from!(ByteLevel, PreTokenizerWrapper, ByteLevel);
impl_enum_from!(CharDelimiterSplit, PreTokenizerWrapper, Delimiter);
impl_enum_from!(Whitespace, PreTokenizerWrapper, Whitespace);
impl_enum_from!(Punctuation, PreTokenizerWrapper, Punctuation);
impl_enum_from!(Sequence, PreTokenizerWrapper, Sequence);
impl_enum_from!(Split, PreTokenizerWrapper, Split);
impl_enum_from!(Metaspace, PreTokenizerWrapper, Metaspace);
impl_enum_from!(WhitespaceSplit, PreTokenizerWrapper, WhitespaceSplit);
impl_enum_from!(Digits, PreTokenizerWrapper, Digits);
impl_enum_from!(UnicodeScripts, PreTokenizerWrapper, UnicodeScripts);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_deserialize() {
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","str_rep":"▁","add_prefix_space":true}]}"#).unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Sequence(Sequence::new(vec![
PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}),
PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true))
]))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true}"#,
)
.unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true}]}"#).unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Sequence(Sequence::new(vec![
PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}),
PreTokenizerWrapper::Metaspace(Metaspace::new('▁', true))
]))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true, "prepend_scheme":"first"}"#,
)
.unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Metaspace(Metaspace::new_with_prepend_scheme(
'▁',
true,
metaspace::PrependScheme::First
))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true, "prepend_scheme":"always"}"#,
)
.unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Metaspace(Metaspace::new_with_prepend_scheme(
'▁',
true,
metaspace::PrependScheme::Always
))
);
}
#[test]
fn test_deserialize_whitespace_split() {
let pre_tokenizer: PreTokenizerWrapper =
serde_json::from_str(r#"{"type":"WhitespaceSplit"}"#).unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {})
);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs | use regex::Regex;
use crate::tokenizer::{
pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior,
};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Whitespace;
impl Default for Whitespace {
fn default() -> Self {
Self
}
}
impl PreTokenizer for Whitespace {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
lazy_static! {
static ref RE: Regex = Regex::new(r"\w+|[^\w\s]+").unwrap();
}
let re_ref: &Regex = &RE;
pretokenized.split(|_, normalized| {
normalized.split(Invert(re_ref), SplitDelimiterBehavior::Removed)
})
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct WhitespaceSplit;
impl PreTokenizer for WhitespaceSplit {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, normalized| {
normalized.split(char::is_whitespace, SplitDelimiterBehavior::Removed)
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType, PreTokenizer};
#[test]
fn basic() {
let tests = vec![
(
"Hey man!",
vec![("Hey", (0, 3)), ("man", (4, 7)), ("!", (7, 8))],
),
(
"How are you doing?",
vec![
("How", (0, 3)),
("are", (4, 7)),
("you", (8, 11)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
("\n", vec![]),
];
let pretok = Whitespace {};
for (s, res) in tests {
let mut pretokenized = PreTokenizedString::from(s);
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
res
);
}
}
#[test]
fn whitespace_split() {
let tests = vec![
("Hey man!", vec![("Hey", (0, 3)), ("man!", (4, 8))]),
(
"Hey, man, Good?",
vec![("Hey,", (0, 4)), ("man,", (5, 9)), ("Good?", (10, 15))],
),
];
let pretok = WhitespaceSplit;
for (s, res) in tests {
let mut pretokenized = PreTokenizedString::from(s);
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
res
);
}
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/delimiter.rs | use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[non_exhaustive]
#[macro_rules_attribute(impl_serde_type!)]
pub struct CharDelimiterSplit {
pub delimiter: char,
}
impl CharDelimiterSplit {
pub fn new(delimiter: char) -> Self {
Self { delimiter }
}
}
impl PreTokenizer for CharDelimiterSplit {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
// TODO: Maybe add the option to specify the behavior
pretokenized.split(|_, normalized| {
normalized.split(self.delimiter, SplitDelimiterBehavior::Removed)
})
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/digits.rs | use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Debug, PartialEq, Eq)]
/// Pre tokenizes the numbers into single tokens. If individual_digits is set
/// to true, then all digits are splitted into individual tokens.
#[non_exhaustive]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Digits {
pub individual_digits: bool,
}
impl Digits {
pub fn new(individual_digits: bool) -> Self {
Self { individual_digits }
}
}
impl Default for Digits {
fn default() -> Self {
Self::new(false)
}
}
impl PreTokenizer for Digits {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
if self.individual_digits {
pretokenized.split(|_, normalized| {
normalized.split(char::is_numeric, SplitDelimiterBehavior::Isolated)
})
} else {
pretokenized.split(|_, normalized| {
normalized.split(char::is_numeric, SplitDelimiterBehavior::Contiguous)
})
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn numbers() {
let pretok = Digits::new(false);
let mut pretokenized = PreTokenizedString::from("Hey 123 friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Hey ", (0, 4)), ("123", (4, 7)), (" friend!", (7, 15))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Hey ", (0, 4)), ("123", (4, 7)), (" friend!", (7, 15))]
);
}
#[test]
fn individual_digits() {
let pretok = Digits::new(true);
let mut pretokenized = PreTokenizedString::from("Hey 123 friend!");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey ", (0, 4)),
("1", (4, 5)),
("2", (5, 6)),
("3", (6, 7)),
(" friend!", (7, 15))
]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey ", (0, 4)),
("1", (4, 5)),
("2", (5, 6)),
("3", (6, 7)),
(" friend!", (7, 15))
]
);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/bert.rs | use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
use unicode_categories::UnicodeCategories;
fn is_bert_punc(x: char) -> bool {
char::is_ascii_punctuation(&x) || x.is_punctuation()
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct BertPreTokenizer;
impl PreTokenizer for BertPreTokenizer {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, s| s.split(char::is_whitespace, SplitDelimiterBehavior::Removed))?;
pretokenized.split(|_, s| s.split(is_bert_punc, SplitDelimiterBehavior::Isolated))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{NormalizedString, OffsetReferential, OffsetType};
#[test]
fn basic() {
let pretok = BertPreTokenizer;
let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey", (0, 3)),
("friend", (4, 10)),
("!", (10, 11)),
("How", (16, 19)),
("are", (20, 23)),
("you", (24, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
);
}
#[test]
fn chinese_chars() {
let mut n = NormalizedString::from("野口里佳 Noguchi Rika");
n.transform(
n.get().to_owned().chars().flat_map(|c| {
if (c as usize) > 0x4E00 {
vec![(' ', 0), (c, 1), (' ', 1)]
} else {
vec![(c, 0)]
}
}),
0,
);
let mut pretokenized = n.into();
let pretok = BertPreTokenizer;
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("野", (0, 3)),
("口", (3, 6)),
("里", (6, 9)),
("佳", (9, 12)),
("Noguchi", (13, 20)),
("Rika", (21, 25))
]
);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/split.rs | use crate::utils::SysRegex;
use serde::{Deserialize, Deserializer, Serialize};
use crate::tokenizer::{
pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior,
};
/// Represents the different patterns that `Split` can use
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)]
pub enum SplitPattern {
String(String),
Regex(String),
}
impl From<String> for SplitPattern {
fn from(v: String) -> Self {
Self::String(v)
}
}
impl From<&str> for SplitPattern {
fn from(v: &str) -> Self {
Self::String(v.to_owned())
}
}
#[derive(Debug, Serialize)]
#[serde(tag = "type")]
pub struct Split {
pattern: SplitPattern,
#[serde(skip)]
regex: SysRegex,
behavior: SplitDelimiterBehavior,
invert: bool,
}
impl<'de> Deserialize<'de> for Split {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
enum Type {
Split,
}
#[derive(Deserialize)]
pub struct SplitHelper {
#[serde(rename = "type")]
_type: Type,
pattern: SplitPattern,
behavior: SplitDelimiterBehavior,
invert: bool,
}
let helper = SplitHelper::deserialize(deserializer)?;
Self::new(helper.pattern, helper.behavior, helper.invert).map_err(serde::de::Error::custom)
}
}
impl Clone for Split {
fn clone(&self) -> Self {
Self::new(self.pattern.clone(), self.behavior, self.invert).unwrap()
}
}
impl PartialEq for Split {
fn eq(&self, other: &Self) -> bool {
self.pattern == other.pattern
&& self.behavior == other.behavior
&& self.invert == other.invert
}
}
impl Split {
pub fn new<I: Into<SplitPattern>>(
pattern: I,
behavior: SplitDelimiterBehavior,
invert: bool,
) -> Result<Self> {
let pattern: SplitPattern = pattern.into();
let regex = match &pattern {
SplitPattern::String(s) => SysRegex::new(®ex::escape(s))?,
SplitPattern::Regex(r) => SysRegex::new(r)?,
};
Ok(Self {
pattern,
regex,
behavior,
invert,
})
}
}
impl PreTokenizer for Split {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
if self.invert {
pretokenized.split(|_, normalized| normalized.split(Invert(&self.regex), self.behavior))
} else {
pretokenized.split(|_, normalized| normalized.split(&self.regex, self.behavior))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType, PreTokenizer};
use SplitDelimiterBehavior::*;
#[test]
fn basic() {
let tests = vec![
(
Removed,
"How are you doing?",
vec![
("How", (0, 3)),
("are", (4, 7)),
("you", (8, 11)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
(
Isolated,
"How are you doing?",
vec![
("How", (0, 3)),
(" ", (3, 4)),
("are", (4, 7)),
(" ", (7, 8)),
("you", (8, 11)),
(" ", (11, 12)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
(
MergedWithPrevious,
"How are you doing?",
vec![
("How ", (0, 4)),
("are ", (4, 8)),
("you ", (8, 12)),
("doing", (12, 17)),
("?", (17, 18)),
],
),
(
MergedWithNext,
"How are you doing?",
vec![
("How", (0, 3)),
(" are", (3, 7)),
(" you", (7, 11)),
(" doing", (11, 17)),
("?", (17, 18)),
],
),
(
Contiguous,
"How are you doing?",
vec![
("How", (0, 3)),
(" ", (3, 4)),
("are", (4, 7)),
(" ", (7, 8)),
("you", (8, 11)),
(" ", (11, 12)),
("doing?", (12, 18)),
],
),
];
// use whitespace regex
let regex = SplitPattern::Regex(r"\w+|[^\w\s]+".into());
for (behavior, s, res) in tests {
let mut pretokenized = PreTokenizedString::from(s);
let pretok = Split::new(regex.clone(), behavior, true).unwrap();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
res
);
}
}
#[test]
fn regex_string() {
let mut pretok_str_for_regex = PreTokenizedString::from("Hey, man!");
let mut pretok_str_for_string = pretok_str_for_regex.clone();
// pre-tokenizer splits on " " - one from Regex, one from string
let pretokenizer_regex = Split::new(
SplitPattern::Regex(r"\s+".into()),
SplitDelimiterBehavior::Removed,
false,
)
.unwrap();
let pretokenizer_string = Split::new(" ", SplitDelimiterBehavior::Removed, false).unwrap();
pretokenizer_regex
.pre_tokenize(&mut pretok_str_for_regex)
.unwrap();
pretokenizer_string
.pre_tokenize(&mut pretok_str_for_string)
.unwrap();
assert_eq!(pretok_str_for_regex, pretok_str_for_string);
}
#[test]
fn invert() {
let mut pretok_str = PreTokenizedString::from("Hello Hello Hello");
let mut pretok_str_for_invert = pretok_str.clone();
// one pre-tokenizer splits on " " - one splits inverted on "Hello"
let pretokenizer = Split::new(" ", SplitDelimiterBehavior::Removed, false).unwrap();
let pretokenizer_invert =
Split::new("Hello", SplitDelimiterBehavior::Removed, true).unwrap();
pretokenizer.pre_tokenize(&mut pretok_str).unwrap();
pretokenizer_invert
.pre_tokenize(&mut pretok_str_for_invert)
.unwrap();
assert_eq!(pretok_str, pretok_str_for_invert);
}
#[test]
fn serialization() {
use SplitDelimiterBehavior::*;
let split = Split::new("Hello", Removed, true).unwrap();
let split_s =
r#"{"type":"Split","pattern":{"String":"Hello"},"behavior":"Removed","invert":true}"#;
assert_eq!(serde_json::to_string(&split).unwrap(), split_s);
assert_eq!(serde_json::from_str::<Split>(split_s).unwrap(), split);
let split = Split::new(SplitPattern::Regex(r"\s+".into()), Isolated, false).unwrap();
let split_s =
r#"{"type":"Split","pattern":{"Regex":"\\s+"},"behavior":"Isolated","invert":false}"#;
assert_eq!(serde_json::to_string(&split).unwrap(), split_s);
assert_eq!(serde_json::from_str::<Split>(split_s).unwrap(), split);
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/mod.rs | mod pre_tokenizer;
mod scripts;
// Re-export the PreTokenizer
pub use pre_tokenizer::UnicodeScripts;
| 0 |
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/scripts.rs | // Generated by modified Perl script at https://github.com/google/sentencepiece/blob/master/data/gen_unicode_scripts_code.pl
// Unicode scripts : https://gist.github.com/Narsil/07556f26dc84a6baeff4d499e68d3cd2
// Rust adaptation : https://gist.github.com/Narsil/1df9fbbf5296a8d4d62de55dcb2fe700
#[derive(PartialEq, Debug, Clone, Copy, Eq)]
pub enum Script {
Any,
Adlam,
Ahom,
AnatolianHieroglyphs,
Arabic,
Armenian,
Avestan,
Balinese,
Bamum,
BassaVah,
Batak,
Bengali,
Bhaiksuki,
Bopomofo,
Brahmi,
Braille,
Buginese,
Buhid,
CanadianAboriginal,
Carian,
CaucasianAlbanian,
Chakma,
Cham,
Cherokee,
Common,
Coptic,
Cuneiform,
Cypriot,
Cyrillic,
Deseret,
Devanagari,
Duployan,
EgyptianHieroglyphs,
Elbasan,
Ethiopic,
Georgian,
Glagolitic,
Gothic,
Grantha,
Greek,
Gujarati,
Gurmukhi,
Han,
Hangul,
Hanunoo,
Hatran,
Hebrew,
Hiragana,
ImperialAramaic,
Inherited,
InscriptionalPahlavi,
InscriptionalParthian,
Javanese,
Kaithi,
Kannada,
Katakana,
KayahLi,
Kharoshthi,
Khmer,
Khojki,
Khudawadi,
Lao,
Latin,
Lepcha,
Limbu,
LinearA,
LinearB,
Lisu,
Lycian,
Lydian,
Mahajani,
Malayalam,
Mandaic,
Manichaean,
Marchen,
MeeteiMayek,
MendeKikakui,
MeroiticCursive,
MeroiticHieroglyphs,
Miao,
Modi,
Mongolian,
Mro,
Multani,
Myanmar,
Nabataean,
NewTaiLue,
Newa,
Nko,
Ogham,
OlChiki,
OldHungarian,
OldItalic,
OldNorthArabian,
OldPermic,
OldPersian,
OldSouthArabian,
OldTurkic,
Oriya,
Osage,
Osmanya,
PahawhHmong,
Palmyrene,
PauCinHau,
PhagsPa,
Phoenician,
PsalterPahlavi,
Rejang,
Runic,
Samaritan,
Saurashtra,
Sharada,
Shavian,
Siddham,
SignWriting,
Sinhala,
SoraSompeng,
Sundanese,
SylotiNagri,
Syriac,
Tagalog,
Tagbanwa,
TaiLe,
TaiTham,
TaiViet,
Takri,
Tamil,
Tangut,
Telugu,
Thaana,
Thai,
Tibetan,
Tifinagh,
Tirhuta,
Ugaritic,
Vai,
WarangCiti,
Yi,
}
pub fn get_script(c: char) -> Script {
match c as u32 {
0x0000..=0x001F => Script::Common,
0x0020 => Script::Common,
0x0021..=0x0023 => Script::Common,
0x0024 => Script::Common,
0x0025..=0x0027 => Script::Common,
0x0028 => Script::Common,
0x0029 => Script::Common,
0x002A => Script::Common,
0x002B => Script::Common,
0x002C => Script::Common,
0x002D => Script::Common,
0x002E..=0x002F => Script::Common,
0x0030..=0x0039 => Script::Common,
0x003A..=0x003B => Script::Common,
0x003C..=0x003E => Script::Common,
0x003F..=0x0040 => Script::Common,
0x005B => Script::Common,
0x005C => Script::Common,
0x005D => Script::Common,
0x005E => Script::Common,
0x005F => Script::Common,
0x0060 => Script::Common,
0x007B => Script::Common,
0x007C => Script::Common,
0x007D => Script::Common,
0x007E => Script::Common,
0x007F..=0x009F => Script::Common,
0x00A0 => Script::Common,
0x00A1 => Script::Common,
0x00A2..=0x00A5 => Script::Common,
0x00A6 => Script::Common,
0x00A7 => Script::Common,
0x00A8 => Script::Common,
0x00A9 => Script::Common,
0x00AB => Script::Common,
0x00AC => Script::Common,
0x00AD => Script::Common,
0x00AE => Script::Common,
0x00AF => Script::Common,
0x00B0 => Script::Common,
0x00B1 => Script::Common,
0x00B2..=0x00B3 => Script::Common,
0x00B4 => Script::Common,
0x00B5 => Script::Common,
0x00B6..=0x00B7 => Script::Common,
0x00B8 => Script::Common,
0x00B9 => Script::Common,
0x00BB => Script::Common,
0x00BC..=0x00BE => Script::Common,
0x00BF => Script::Common,
0x00D7 => Script::Common,
0x00F7 => Script::Common,
0x02B9..=0x02C1 => Script::Common,
0x02C2..=0x02C5 => Script::Common,
0x02C6..=0x02D1 => Script::Common,
0x02D2..=0x02DF => Script::Common,
0x02E5..=0x02E9 => Script::Common,
0x02EC => Script::Common,
0x02ED => Script::Common,
0x02EE => Script::Common,
0x02EF..=0x02FF => Script::Common,
0x0374 => Script::Common,
0x037E => Script::Common,
0x0385 => Script::Common,
0x0387 => Script::Common,
0x0589 => Script::Common,
0x0605 => Script::Common,
0x060C => Script::Common,
0x061B => Script::Common,
0x061C => Script::Common,
0x061F => Script::Common,
0x0640 => Script::Common,
0x06DD => Script::Common,
0x08E2 => Script::Common,
0x0964..=0x0965 => Script::Common,
0x0E3F => Script::Common,
0x0FD5..=0x0FD8 => Script::Common,
0x10FB => Script::Common,
0x16EB..=0x16ED => Script::Common,
0x1735..=0x1736 => Script::Common,
0x1802..=0x1803 => Script::Common,
0x1805 => Script::Common,
0x1CD3 => Script::Common,
0x1CE1 => Script::Common,
0x1CE9..=0x1CEC => Script::Common,
0x1CEE..=0x1CF1 => Script::Common,
0x1CF2..=0x1CF3 => Script::Common,
0x1CF5..=0x1CF6 => Script::Common,
0x2000..=0x200A => Script::Common,
0x200B => Script::Common,
0x200E..=0x200F => Script::Common,
0x2010..=0x2015 => Script::Common,
0x2016..=0x2017 => Script::Common,
0x2018 => Script::Common,
0x2019 => Script::Common,
0x201A => Script::Common,
0x201B..=0x201C => Script::Common,
0x201D => Script::Common,
0x201E => Script::Common,
0x201F => Script::Common,
0x2020..=0x2027 => Script::Common,
0x2028 => Script::Common,
0x2029 => Script::Common,
0x202A..=0x202E => Script::Common,
0x202F => Script::Common,
0x2030..=0x2038 => Script::Common,
0x2039 => Script::Common,
0x203A => Script::Common,
0x203B..=0x203E => Script::Common,
0x203F..=0x2040 => Script::Common,
0x2041..=0x2043 => Script::Common,
0x2044 => Script::Common,
0x2045 => Script::Common,
0x2046 => Script::Common,
0x2047..=0x2051 => Script::Common,
0x2052 => Script::Common,
0x2053 => Script::Common,
0x2054 => Script::Common,
0x2055..=0x205E => Script::Common,
0x205F => Script::Common,
0x2060..=0x2064 => Script::Common,
0x2066..=0x206F => Script::Common,
0x2070 => Script::Common,
0x2074..=0x2079 => Script::Common,
0x207A..=0x207C => Script::Common,
0x207D => Script::Common,
0x207E => Script::Common,
0x2080..=0x2089 => Script::Common,
0x208A..=0x208C => Script::Common,
0x208D => Script::Common,
0x208E => Script::Common,
0x20A0..=0x20BE => Script::Common,
0x2100..=0x2101 => Script::Common,
0x2102 => Script::Common,
0x2103..=0x2106 => Script::Common,
0x2107 => Script::Common,
0x2108..=0x2109 => Script::Common,
0x210A..=0x2113 => Script::Common,
0x2114 => Script::Common,
0x2115 => Script::Common,
0x2116..=0x2117 => Script::Common,
0x2118 => Script::Common,
0x2119..=0x211D => Script::Common,
0x211E..=0x2123 => Script::Common,
0x2124 => Script::Common,
0x2125 => Script::Common,
0x2127 => Script::Common,
0x2128 => Script::Common,
0x2129 => Script::Common,
0x212C..=0x212D => Script::Common,
0x212E => Script::Common,
0x212F..=0x2131 => Script::Common,
0x2133..=0x2134 => Script::Common,
0x2135..=0x2138 => Script::Common,
0x2139 => Script::Common,
0x213A..=0x213B => Script::Common,
0x213C..=0x213F => Script::Common,
0x2140..=0x2144 => Script::Common,
0x2145..=0x2149 => Script::Common,
0x214A => Script::Common,
0x214B => Script::Common,
0x214C..=0x214D => Script::Common,
0x214F => Script::Common,
0x2150..=0x215F => Script::Common,
0x2189 => Script::Common,
0x218A..=0x218B => Script::Common,
0x2190..=0x2194 => Script::Common,
0x2195..=0x2199 => Script::Common,
0x219A..=0x219B => Script::Common,
0x219C..=0x219F => Script::Common,
0x21A0 => Script::Common,
0x21A1..=0x21A2 => Script::Common,
0x21A3 => Script::Common,
0x21A4..=0x21A5 => Script::Common,
0x21A6 => Script::Common,
0x21A7..=0x21AD => Script::Common,
0x21AE => Script::Common,
0x21AF..=0x21CD => Script::Common,
0x21CE..=0x21CF => Script::Common,
0x21D0..=0x21D1 => Script::Common,
0x21D2 => Script::Common,
0x21D3 => Script::Common,
0x21D4 => Script::Common,
0x21D5..=0x21F3 => Script::Common,
0x21F4..=0x22FF => Script::Common,
0x2300..=0x2307 => Script::Common,
0x2308 => Script::Common,
0x2309 => Script::Common,
0x230A => Script::Common,
0x230B => Script::Common,
0x230C..=0x231F => Script::Common,
0x2320..=0x2321 => Script::Common,
0x2322..=0x2328 => Script::Common,
0x2329 => Script::Common,
0x232A => Script::Common,
0x232B..=0x237B => Script::Common,
0x237C => Script::Common,
0x237D..=0x239A => Script::Common,
0x239B..=0x23B3 => Script::Common,
0x23B4..=0x23DB => Script::Common,
0x23DC..=0x23E1 => Script::Common,
0x23E2..=0x23FE => Script::Common,
0x2400..=0x2426 => Script::Common,
0x2440..=0x244A => Script::Common,
0x2460..=0x249B => Script::Common,
0x249C..=0x24E9 => Script::Common,
0x24EA..=0x24FF => Script::Common,
0x2500..=0x25B6 => Script::Common,
0x25B7 => Script::Common,
0x25B8..=0x25C0 => Script::Common,
0x25C1 => Script::Common,
0x25C2..=0x25F7 => Script::Common,
0x25F8..=0x25FF => Script::Common,
0x2600..=0x266E => Script::Common,
0x266F => Script::Common,
0x2670..=0x2767 => Script::Common,
0x2768 => Script::Common,
0x2769 => Script::Common,
0x276A => Script::Common,
0x276B => Script::Common,
0x276C => Script::Common,
0x276D => Script::Common,
0x276E => Script::Common,
0x276F => Script::Common,
0x2770 => Script::Common,
0x2771 => Script::Common,
0x2772 => Script::Common,
0x2773 => Script::Common,
0x2774 => Script::Common,
0x2775 => Script::Common,
0x2776..=0x2793 => Script::Common,
0x2794..=0x27BF => Script::Common,
0x27C0..=0x27C4 => Script::Common,
0x27C5 => Script::Common,
0x27C6 => Script::Common,
0x27C7..=0x27E5 => Script::Common,
0x27E6 => Script::Common,
0x27E7 => Script::Common,
0x27E8 => Script::Common,
0x27E9 => Script::Common,
0x27EA => Script::Common,
0x27EB => Script::Common,
0x27EC => Script::Common,
0x27ED => Script::Common,
0x27EE => Script::Common,
0x27EF => Script::Common,
0x27F0..=0x27FF => Script::Common,
0x2900..=0x2982 => Script::Common,
0x2983 => Script::Common,
0x2984 => Script::Common,
0x2985 => Script::Common,
0x2986 => Script::Common,
0x2987 => Script::Common,
0x2988 => Script::Common,
0x2989 => Script::Common,
0x298A => Script::Common,
0x298B => Script::Common,
0x298C => Script::Common,
0x298D => Script::Common,
0x298E => Script::Common,
0x298F => Script::Common,
0x2990 => Script::Common,
0x2991 => Script::Common,
0x2992 => Script::Common,
0x2993 => Script::Common,
0x2994 => Script::Common,
0x2995 => Script::Common,
0x2996 => Script::Common,
0x2997 => Script::Common,
0x2998 => Script::Common,
0x2999..=0x29D7 => Script::Common,
0x29D8 => Script::Common,
0x29D9 => Script::Common,
0x29DA => Script::Common,
0x29DB => Script::Common,
0x29DC..=0x29FB => Script::Common,
0x29FC => Script::Common,
0x29FD => Script::Common,
0x29FE..=0x2AFF => Script::Common,
0x2B00..=0x2B2F => Script::Common,
0x2B30..=0x2B44 => Script::Common,
0x2B45..=0x2B46 => Script::Common,
0x2B47..=0x2B4C => Script::Common,
0x2B4D..=0x2B73 => Script::Common,
0x2B76..=0x2B95 => Script::Common,
0x2B98..=0x2BB9 => Script::Common,
0x2BBD..=0x2BC8 => Script::Common,
0x2BCA..=0x2BD1 => Script::Common,
0x2BEC..=0x2BEF => Script::Common,
0x2E00..=0x2E01 => Script::Common,
0x2E02 => Script::Common,
0x2E03 => Script::Common,
0x2E04 => Script::Common,
0x2E05 => Script::Common,
0x2E06..=0x2E08 => Script::Common,
0x2E09 => Script::Common,
0x2E0A => Script::Common,
0x2E0B => Script::Common,
0x2E0C => Script::Common,
0x2E0D => Script::Common,
0x2E0E..=0x2E16 => Script::Common,
0x2E17 => Script::Common,
0x2E18..=0x2E19 => Script::Common,
0x2E1A => Script::Common,
0x2E1B => Script::Common,
0x2E1C => Script::Common,
0x2E1D => Script::Common,
0x2E1E..=0x2E1F => Script::Common,
0x2E20 => Script::Common,
0x2E21 => Script::Common,
0x2E22 => Script::Common,
0x2E23 => Script::Common,
0x2E24 => Script::Common,
0x2E25 => Script::Common,
0x2E26 => Script::Common,
0x2E27 => Script::Common,
0x2E28 => Script::Common,
0x2E29 => Script::Common,
0x2E2A..=0x2E2E => Script::Common,
0x2E2F => Script::Common,
0x2E30..=0x2E39 => Script::Common,
0x2E3A..=0x2E3B => Script::Common,
0x2E3C..=0x2E3F => Script::Common,
0x2E40 => Script::Common,
0x2E41 => Script::Common,
0x2E42 => Script::Common,
0x2E43..=0x2E44 => Script::Common,
0x2FF0..=0x2FFB => Script::Common,
0x3000 => Script::Common,
0x3001..=0x3003 => Script::Common,
0x3004 => Script::Common,
0x3006 => Script::Common,
0x3008 => Script::Common,
0x3009 => Script::Common,
0x300A => Script::Common,
0x300B => Script::Common,
0x300C => Script::Common,
0x300D => Script::Common,
0x300E => Script::Common,
0x300F => Script::Common,
0x3010 => Script::Common,
0x3011 => Script::Common,
0x3012..=0x3013 => Script::Common,
0x3014 => Script::Common,
0x3015 => Script::Common,
0x3016 => Script::Common,
0x3017 => Script::Common,
0x3018 => Script::Common,
0x3019 => Script::Common,
0x301A => Script::Common,
0x301B => Script::Common,
0x301C => Script::Common,
0x301D => Script::Common,
0x301E..=0x301F => Script::Common,
0x3020 => Script::Common,
0x3030 => Script::Common,
0x3031..=0x3035 => Script::Common,
0x3036..=0x3037 => Script::Common,
0x303C => Script::Common,
0x303D => Script::Common,
0x303E..=0x303F => Script::Common,
0x309B..=0x309C => Script::Common,
0x30A0 => Script::Common,
0x30FB => Script::Common,
0x30FC => Script::Common,
0x3190..=0x3191 => Script::Common,
0x3192..=0x3195 => Script::Common,
0x3196..=0x319F => Script::Common,
0x31C0..=0x31E3 => Script::Common,
0x3220..=0x3229 => Script::Common,
0x322A..=0x3247 => Script::Common,
0x3248..=0x324F => Script::Common,
0x3250 => Script::Common,
0x3251..=0x325F => Script::Common,
0x327F => Script::Common,
0x3280..=0x3289 => Script::Common,
0x328A..=0x32B0 => Script::Common,
0x32B1..=0x32BF => Script::Common,
0x32C0..=0x32CF => Script::Common,
0x3358..=0x33FF => Script::Common,
0x4DC0..=0x4DFF => Script::Common,
0xA700..=0xA716 => Script::Common,
0xA717..=0xA71F => Script::Common,
0xA720..=0xA721 => Script::Common,
0xA788 => Script::Common,
0xA789..=0xA78A => Script::Common,
0xA830..=0xA835 => Script::Common,
0xA836..=0xA837 => Script::Common,
0xA838 => Script::Common,
0xA839 => Script::Common,
0xA92E => Script::Common,
0xA9CF => Script::Common,
0xAB5B => Script::Common,
0xFD3E => Script::Common,
0xFD3F => Script::Common,
0xFE10..=0xFE16 => Script::Common,
0xFE17 => Script::Common,
0xFE18 => Script::Common,
0xFE19 => Script::Common,
0xFE30 => Script::Common,
0xFE31..=0xFE32 => Script::Common,
0xFE33..=0xFE34 => Script::Common,
0xFE35 => Script::Common,
0xFE36 => Script::Common,
0xFE37 => Script::Common,
0xFE38 => Script::Common,
0xFE39 => Script::Common,
0xFE3A => Script::Common,
0xFE3B => Script::Common,
0xFE3C => Script::Common,
0xFE3D => Script::Common,
0xFE3E => Script::Common,
0xFE3F => Script::Common,
0xFE40 => Script::Common,
0xFE41 => Script::Common,
0xFE42 => Script::Common,
0xFE43 => Script::Common,
0xFE44 => Script::Common,
0xFE45..=0xFE46 => Script::Common,
0xFE47 => Script::Common,
0xFE48 => Script::Common,
0xFE49..=0xFE4C => Script::Common,
0xFE4D..=0xFE4F => Script::Common,
0xFE50..=0xFE52 => Script::Common,
0xFE54..=0xFE57 => Script::Common,
0xFE58 => Script::Common,
0xFE59 => Script::Common,
0xFE5A => Script::Common,
0xFE5B => Script::Common,
0xFE5C => Script::Common,
0xFE5D => Script::Common,
0xFE5E => Script::Common,
0xFE5F..=0xFE61 => Script::Common,
0xFE62 => Script::Common,
0xFE63 => Script::Common,
0xFE64..=0xFE66 => Script::Common,
0xFE68 => Script::Common,
0xFE69 => Script::Common,
0xFE6A..=0xFE6B => Script::Common,
0xFEFF => Script::Common,
0xFF01..=0xFF03 => Script::Common,
0xFF04 => Script::Common,
0xFF05..=0xFF07 => Script::Common,
0xFF08 => Script::Common,
0xFF09 => Script::Common,
0xFF0A => Script::Common,
0xFF0B => Script::Common,
0xFF0C => Script::Common,
0xFF0D => Script::Common,
0xFF0E..=0xFF0F => Script::Common,
0xFF10..=0xFF19 => Script::Common,
0xFF1A..=0xFF1B => Script::Common,
0xFF1C..=0xFF1E => Script::Common,
0xFF1F..=0xFF20 => Script::Common,
0xFF3B => Script::Common,
0xFF3C => Script::Common,
0xFF3D => Script::Common,
0xFF3E => Script::Common,
0xFF3F => Script::Common,
0xFF40 => Script::Common,
0xFF5B => Script::Common,
0xFF5C => Script::Common,
0xFF5D => Script::Common,
0xFF5E => Script::Common,
0xFF5F => Script::Common,
0xFF60 => Script::Common,
0xFF61 => Script::Common,
0xFF62 => Script::Common,
0xFF63 => Script::Common,
0xFF64..=0xFF65 => Script::Common,
0xFF70 => Script::Common,
0xFF9E..=0xFF9F => Script::Common,
0xFFE0..=0xFFE1 => Script::Common,
0xFFE2 => Script::Common,
0xFFE3 => Script::Common,
0xFFE4 => Script::Common,
0xFFE5..=0xFFE6 => Script::Common,
0xFFE8 => Script::Common,
0xFFE9..=0xFFEC => Script::Common,
0xFFED..=0xFFEE => Script::Common,
0xFFF9..=0xFFFB => Script::Common,
0xFFFC..=0xFFFD => Script::Common,
0x10100..=0x10102 => Script::Common,
0x10107..=0x10133 => Script::Common,
0x10137..=0x1013F => Script::Common,
0x10190..=0x1019B => Script::Common,
0x101D0..=0x101FC => Script::Common,
0x102E1..=0x102FB => Script::Common,
0x1BCA0..=0x1BCA3 => Script::Common,
0x1D000..=0x1D0F5 => Script::Common,
0x1D100..=0x1D126 => Script::Common,
0x1D129..=0x1D164 => Script::Common,
0x1D165..=0x1D166 => Script::Common,
0x1D16A..=0x1D16C => Script::Common,
0x1D16D..=0x1D172 => Script::Common,
0x1D173..=0x1D17A => Script::Common,
0x1D183..=0x1D184 => Script::Common,
0x1D18C..=0x1D1A9 => Script::Common,
0x1D1AE..=0x1D1E8 => Script::Common,
0x1D300..=0x1D356 => Script::Common,
0x1D360..=0x1D371 => Script::Common,
0x1D400..=0x1D454 => Script::Common,
0x1D456..=0x1D49C => Script::Common,
0x1D49E..=0x1D49F => Script::Common,
0x1D4A2 => Script::Common,
0x1D4A5..=0x1D4A6 => Script::Common,
0x1D4A9..=0x1D4AC => Script::Common,
0x1D4AE..=0x1D4B9 => Script::Common,
0x1D4BB => Script::Common,
0x1D4BD..=0x1D4C3 => Script::Common,
0x1D4C5..=0x1D505 => Script::Common,
0x1D507..=0x1D50A => Script::Common,
0x1D50D..=0x1D514 => Script::Common,
0x1D516..=0x1D51C => Script::Common,
0x1D51E..=0x1D539 => Script::Common,
0x1D53B..=0x1D53E => Script::Common,
0x1D540..=0x1D544 => Script::Common,
0x1D546 => Script::Common,
0x1D54A..=0x1D550 => Script::Common,
0x1D552..=0x1D6A5 => Script::Common,
0x1D6A8..=0x1D6C0 => Script::Common,
0x1D6C1 => Script::Common,
0x1D6C2..=0x1D6DA => Script::Common,
0x1D6DB => Script::Common,
0x1D6DC..=0x1D6FA => Script::Common,
0x1D6FB => Script::Common,
0x1D6FC..=0x1D714 => Script::Common,
0x1D715 => Script::Common,
0x1D716..=0x1D734 => Script::Common,
0x1D735 => Script::Common,
0x1D736..=0x1D74E => Script::Common,
0x1D74F => Script::Common,
0x1D750..=0x1D76E => Script::Common,
0x1D76F => Script::Common,
0x1D770..=0x1D788 => Script::Common,
0x1D789 => Script::Common,
0x1D78A..=0x1D7A8 => Script::Common,
0x1D7A9 => Script::Common,
0x1D7AA..=0x1D7C2 => Script::Common,
0x1D7C3 => Script::Common,
0x1D7C4..=0x1D7CB => Script::Common,
0x1D7CE..=0x1D7FF => Script::Common,
0x1F000..=0x1F02B => Script::Common,
0x1F030..=0x1F093 => Script::Common,
0x1F0A0..=0x1F0AE => Script::Common,
0x1F0B1..=0x1F0BF => Script::Common,
0x1F0C1..=0x1F0CF => Script::Common,
0x1F0D1..=0x1F0F5 => Script::Common,
0x1F100..=0x1F10C => Script::Common,
0x1F110..=0x1F12E => Script::Common,
0x1F130..=0x1F16B => Script::Common,
0x1F170..=0x1F1AC => Script::Common,
0x1F1E6..=0x1F1FF => Script::Common,
0x1F201..=0x1F202 => Script::Common,
0x1F210..=0x1F23B => Script::Common,
0x1F240..=0x1F248 => Script::Common,
0x1F250..=0x1F251 => Script::Common,
0x1F300..=0x1F3FA => Script::Common,
0x1F3FB..=0x1F3FF => Script::Common,
0x1F400..=0x1F6D2 => Script::Common,
0x1F6E0..=0x1F6EC => Script::Common,
0x1F6F0..=0x1F6F6 => Script::Common,
0x1F700..=0x1F773 => Script::Common,
0x1F780..=0x1F7D4 => Script::Common,
0x1F800..=0x1F80B => Script::Common,
0x1F810..=0x1F847 => Script::Common,
0x1F850..=0x1F859 => Script::Common,
0x1F860..=0x1F887 => Script::Common,
0x1F890..=0x1F8AD => Script::Common,
0x1F910..=0x1F91E => Script::Common,
0x1F920..=0x1F927 => Script::Common,
0x1F930 => Script::Common,
0x1F933..=0x1F93E => Script::Common,
0x1F940..=0x1F94B => Script::Common,
0x1F950..=0x1F95E => Script::Common,
0x1F980..=0x1F991 => Script::Common,
0x1F9C0 => Script::Common,
0xE0001 => Script::Common,
0xE0020..=0xE007F => Script::Common,
0x0041..=0x005A => Script::Latin,
0x0061..=0x007A => Script::Latin,
0x00AA => Script::Latin,
0x00BA => Script::Latin,
0x00C0..=0x00D6 => Script::Latin,
0x00D8..=0x00F6 => Script::Latin,
0x00F8..=0x01BA => Script::Latin,
0x01BB => Script::Latin,
0x01BC..=0x01BF => Script::Latin,
0x01C0..=0x01C3 => Script::Latin,
0x01C4..=0x0293 => Script::Latin,
0x0294 => Script::Latin,
0x0295..=0x02AF => Script::Latin,
0x02B0..=0x02B8 => Script::Latin,
0x02E0..=0x02E4 => Script::Latin,
0x1D00..=0x1D25 => Script::Latin,
0x1D2C..=0x1D5C => Script::Latin,
0x1D62..=0x1D65 => Script::Latin,
0x1D6B..=0x1D77 => Script::Latin,
0x1D79..=0x1D9A => Script::Latin,
0x1D9B..=0x1DBE => Script::Latin,
0x1E00..=0x1EFF => Script::Latin,
0x2071 => Script::Latin,
0x207F => Script::Latin,
0x2090..=0x209C => Script::Latin,
0x212A..=0x212B => Script::Latin,
0x2132 => Script::Latin,
0x214E => Script::Latin,
0x2160..=0x2182 => Script::Latin,
0x2183..=0x2184 => Script::Latin,
0x2185..=0x2188 => Script::Latin,
0x2C60..=0x2C7B => Script::Latin,
0x2C7C..=0x2C7D => Script::Latin,
0x2C7E..=0x2C7F => Script::Latin,
0xA722..=0xA76F => Script::Latin,
0xA770 => Script::Latin,
0xA771..=0xA787 => Script::Latin,
0xA78B..=0xA78E => Script::Latin,
0xA78F => Script::Latin,
0xA790..=0xA7AE => Script::Latin,
0xA7B0..=0xA7B7 => Script::Latin,
0xA7F7 => Script::Latin,
0xA7F8..=0xA7F9 => Script::Latin,
0xA7FA => Script::Latin,
0xA7FB..=0xA7FF => Script::Latin,
0xAB30..=0xAB5A => Script::Latin,
0xAB5C..=0xAB5F => Script::Latin,
0xAB60..=0xAB64 => Script::Latin,
0xFB00..=0xFB06 => Script::Latin,
0xFF21..=0xFF3A => Script::Latin,
0xFF41..=0xFF5A => Script::Latin,
0x0370..=0x0373 => Script::Greek,
0x0375 => Script::Greek,
0x0376..=0x0377 => Script::Greek,
0x037A => Script::Greek,
0x037B..=0x037D => Script::Greek,
0x037F => Script::Greek,
0x0384 => Script::Greek,
0x0386 => Script::Greek,
0x0388..=0x038A => Script::Greek,
0x038C => Script::Greek,
0x038E..=0x03A1 => Script::Greek,
0x03A3..=0x03E1 => Script::Greek,
0x03F0..=0x03F5 => Script::Greek,
0x03F6 => Script::Greek,
0x03F7..=0x03FF => Script::Greek,
0x1D26..=0x1D2A => Script::Greek,
0x1D5D..=0x1D61 => Script::Greek,
0x1D66..=0x1D6A => Script::Greek,
0x1DBF => Script::Greek,
0x1F00..=0x1F15 => Script::Greek,
0x1F18..=0x1F1D => Script::Greek,
0x1F20..=0x1F45 => Script::Greek,
0x1F48..=0x1F4D => Script::Greek,
0x1F50..=0x1F57 => Script::Greek,
0x1F59 => Script::Greek,
0x1F5B => Script::Greek,
0x1F5D => Script::Greek,
0x1F5F..=0x1F7D => Script::Greek,
0x1F80..=0x1FB4 => Script::Greek,
0x1FB6..=0x1FBC => Script::Greek,
0x1FBD => Script::Greek,
0x1FBE => Script::Greek,
0x1FBF..=0x1FC1 => Script::Greek,
0x1FC2..=0x1FC4 => Script::Greek,
0x1FC6..=0x1FCC => Script::Greek,
0x1FCD..=0x1FCF => Script::Greek,
0x1FD0..=0x1FD3 => Script::Greek,
0x1FD6..=0x1FDB => Script::Greek,
0x1FDD..=0x1FDF => Script::Greek,
0x1FE0..=0x1FEC => Script::Greek,
0x1FED..=0x1FEF => Script::Greek,
0x1FF2..=0x1FF4 => Script::Greek,
0x1FF6..=0x1FFC => Script::Greek,
0x1FFD..=0x1FFE => Script::Greek,
0x2126 => Script::Greek,
0xAB65 => Script::Greek,
0x10140..=0x10174 => Script::Greek,
0x10175..=0x10178 => Script::Greek,
0x10179..=0x10189 => Script::Greek,
0x1018A..=0x1018B => Script::Greek,
0x1018C..=0x1018E => Script::Greek,
0x101A0 => Script::Greek,
0x1D200..=0x1D241 => Script::Greek,
0x1D242..=0x1D244 => Script::Greek,
0x1D245 => Script::Greek,
0x0400..=0x0481 => Script::Cyrillic,
0x0482 => Script::Cyrillic,
0x0483..=0x0484 => Script::Cyrillic,
0x0487 => Script::Cyrillic,
0x0488..=0x0489 => Script::Cyrillic,
0x048A..=0x052F => Script::Cyrillic,
0x1C80..=0x1C88 => Script::Cyrillic,
0x1D2B => Script::Cyrillic,
0x1D78 => Script::Cyrillic,
0x2DE0..=0x2DFF => Script::Cyrillic,
0xA640..=0xA66D => Script::Cyrillic,
0xA66E => Script::Cyrillic,
0xA66F => Script::Cyrillic,
0xA670..=0xA672 => Script::Cyrillic,
0xA673 => Script::Cyrillic,
0xA674..=0xA67D => Script::Cyrillic,
0xA67E => Script::Cyrillic,
0xA67F => Script::Cyrillic,
0xA680..=0xA69B => Script::Cyrillic,
0xA69C..=0xA69D => Script::Cyrillic,
0xA69E..=0xA69F => Script::Cyrillic,
0xFE2E..=0xFE2F => Script::Cyrillic,
0x0531..=0x0556 => Script::Armenian,
0x0559 => Script::Armenian,
0x055A..=0x055F => Script::Armenian,
0x0561..=0x0587 => Script::Armenian,
0x058A => Script::Armenian,
0x058D..=0x058E => Script::Armenian,
0x058F => Script::Armenian,
0xFB13..=0xFB17 => Script::Armenian,
0x0591..=0x05BD => Script::Hebrew,
0x05BE => Script::Hebrew,
0x05BF => Script::Hebrew,
0x05C0 => Script::Hebrew,
0x05C1..=0x05C2 => Script::Hebrew,
0x05C3 => Script::Hebrew,
0x05C4..=0x05C5 => Script::Hebrew,
0x05C6 => Script::Hebrew,
0x05C7 => Script::Hebrew,
0x05D0..=0x05EA => Script::Hebrew,
0x05F0..=0x05F2 => Script::Hebrew,
0x05F3..=0x05F4 => Script::Hebrew,
0xFB1D => Script::Hebrew,
0xFB1E => Script::Hebrew,
0xFB1F..=0xFB28 => Script::Hebrew,
0xFB29 => Script::Hebrew,
0xFB2A..=0xFB36 => Script::Hebrew,
0xFB38..=0xFB3C => Script::Hebrew,
0xFB3E => Script::Hebrew,
0xFB40..=0xFB41 => Script::Hebrew,
0xFB43..=0xFB44 => Script::Hebrew,
0xFB46..=0xFB4F => Script::Hebrew,
0x0600..=0x0604 => Script::Arabic,
0x0606..=0x0608 => Script::Arabic,
0x0609..=0x060A => Script::Arabic,
0x060B => Script::Arabic,
0x060D => Script::Arabic,
0x060E..=0x060F => Script::Arabic,
0x0610..=0x061A => Script::Arabic,
0x061E => Script::Arabic,
0x0620..=0x063F => Script::Arabic,
0x0641..=0x064A => Script::Arabic,
0x0656..=0x065F => Script::Arabic,
0x0660..=0x0669 => Script::Arabic,
0x066A..=0x066D => Script::Arabic,
0x066E..=0x066F => Script::Arabic,
0x0671..=0x06D3 => Script::Arabic,
0x06D4 => Script::Arabic,
0x06D5 => Script::Arabic,
0x06D6..=0x06DC => Script::Arabic,
0x06DE => Script::Arabic,
0x06DF..=0x06E4 => Script::Arabic,
0x06E5..=0x06E6 => Script::Arabic,
0x06E7..=0x06E8 => Script::Arabic,
0x06E9 => Script::Arabic,
0x06EA..=0x06ED => Script::Arabic,
0x06EE..=0x06EF => Script::Arabic,
0x06F0..=0x06F9 => Script::Arabic,
0x06FA..=0x06FC => Script::Arabic,
0x06FD..=0x06FE => Script::Arabic,
0x06FF => Script::Arabic,
0x0750..=0x077F => Script::Arabic,
0x08A0..=0x08B4 => Script::Arabic,
0x08B6..=0x08BD => Script::Arabic,
0x08D4..=0x08E1 => Script::Arabic,
0x08E3..=0x08FF => Script::Arabic,
0xFB50..=0xFBB1 => Script::Arabic,
0xFBB2..=0xFBC1 => Script::Arabic,
0xFBD3..=0xFD3D => Script::Arabic,
0xFD50..=0xFD8F => Script::Arabic,
0xFD92..=0xFDC7 => Script::Arabic,
0xFDF0..=0xFDFB => Script::Arabic,
0xFDFC => Script::Arabic,
0xFDFD => Script::Arabic,
0xFE70..=0xFE74 => Script::Arabic,
0xFE76..=0xFEFC => Script::Arabic,
0x10E60..=0x10E7E => Script::Arabic,
0x1EE00..=0x1EE03 => Script::Arabic,
0x1EE05..=0x1EE1F => Script::Arabic,
0x1EE21..=0x1EE22 => Script::Arabic,
0x1EE24 => Script::Arabic,
0x1EE27 => Script::Arabic,
0x1EE29..=0x1EE32 => Script::Arabic,
0x1EE34..=0x1EE37 => Script::Arabic,
0x1EE39 => Script::Arabic,
0x1EE3B => Script::Arabic,
0x1EE42 => Script::Arabic,
0x1EE47 => Script::Arabic,
0x1EE49 => Script::Arabic,
0x1EE4B => Script::Arabic,
0x1EE4D..=0x1EE4F => Script::Arabic,
0x1EE51..=0x1EE52 => Script::Arabic,
0x1EE54 => Script::Arabic,
0x1EE57 => Script::Arabic,
0x1EE59 => Script::Arabic,
0x1EE5B => Script::Arabic,
0x1EE5D => Script::Arabic,
0x1EE5F => Script::Arabic,
0x1EE61..=0x1EE62 => Script::Arabic,
0x1EE64 => Script::Arabic,
0x1EE67..=0x1EE6A => Script::Arabic,
0x1EE6C..=0x1EE72 => Script::Arabic,
0x1EE74..=0x1EE77 => Script::Arabic,
0x1EE79..=0x1EE7C => Script::Arabic,
0x1EE7E => Script::Arabic,
0x1EE80..=0x1EE89 => Script::Arabic,
0x1EE8B..=0x1EE9B => Script::Arabic,
0x1EEA1..=0x1EEA3 => Script::Arabic,
0x1EEA5..=0x1EEA9 => Script::Arabic,
0x1EEAB..=0x1EEBB => Script::Arabic,
0x1EEF0..=0x1EEF1 => Script::Arabic,
0x0700..=0x070D => Script::Syriac,
0x070F => Script::Syriac,
0x0710 => Script::Syriac,
0x0711 => Script::Syriac,
0x0712..=0x072F => Script::Syriac,
0x0730..=0x074A => Script::Syriac,
0x074D..=0x074F => Script::Syriac,
0x0780..=0x07A5 => Script::Thaana,
0x07A6..=0x07B0 => Script::Thaana,
0x07B1 => Script::Thaana,
0x0900..=0x0902 => Script::Devanagari,
0x0903 => Script::Devanagari,
0x0904..=0x0939 => Script::Devanagari,
0x093A => Script::Devanagari,
0x093B => Script::Devanagari,
0x093C => Script::Devanagari,
0x093D => Script::Devanagari,
0x093E..=0x0940 => Script::Devanagari,
0x0941..=0x0948 => Script::Devanagari,
0x0949..=0x094C => Script::Devanagari,
0x094D => Script::Devanagari,
0x094E..=0x094F => Script::Devanagari,
0x0950 => Script::Devanagari,
0x0953..=0x0957 => Script::Devanagari,
0x0958..=0x0961 => Script::Devanagari,
0x0962..=0x0963 => Script::Devanagari,
0x0966..=0x096F => Script::Devanagari,
0x0970 => Script::Devanagari,
0x0971 => Script::Devanagari,
0x0972..=0x097F => Script::Devanagari,
0xA8E0..=0xA8F1 => Script::Devanagari,
0xA8F2..=0xA8F7 => Script::Devanagari,
0xA8F8..=0xA8FA => Script::Devanagari,
0xA8FB => Script::Devanagari,
0xA8FC => Script::Devanagari,
0xA8FD => Script::Devanagari,
0x0980 => Script::Bengali,
0x0981 => Script::Bengali,
0x0982..=0x0983 => Script::Bengali,
0x0985..=0x098C => Script::Bengali,
0x098F..=0x0990 => Script::Bengali,
0x0993..=0x09A8 => Script::Bengali,
0x09AA..=0x09B0 => Script::Bengali,
0x09B2 => Script::Bengali,
0x09B6..=0x09B9 => Script::Bengali,
0x09BC => Script::Bengali,
0x09BD => Script::Bengali,
0x09BE..=0x09C0 => Script::Bengali,
0x09C1..=0x09C4 => Script::Bengali,
0x09C7..=0x09C8 => Script::Bengali,
0x09CB..=0x09CC => Script::Bengali,
0x09CD => Script::Bengali,
0x09CE => Script::Bengali,
0x09D7 => Script::Bengali,
0x09DC..=0x09DD => Script::Bengali,
0x09DF..=0x09E1 => Script::Bengali,
0x09E2..=0x09E3 => Script::Bengali,
0x09E6..=0x09EF => Script::Bengali,
0x09F0..=0x09F1 => Script::Bengali,
0x09F2..=0x09F3 => Script::Bengali,
0x09F4..=0x09F9 => Script::Bengali,
0x09FA => Script::Bengali,
0x09FB => Script::Bengali,
0x0A01..=0x0A02 => Script::Gurmukhi,
0x0A03 => Script::Gurmukhi,
0x0A05..=0x0A0A => Script::Gurmukhi,
0x0A0F..=0x0A10 => Script::Gurmukhi,
0x0A13..=0x0A28 => Script::Gurmukhi,
0x0A2A..=0x0A30 => Script::Gurmukhi,
0x0A32..=0x0A33 => Script::Gurmukhi,
0x0A35..=0x0A36 => Script::Gurmukhi,
0x0A38..=0x0A39 => Script::Gurmukhi,
0x0A3C => Script::Gurmukhi,
0x0A3E..=0x0A40 => Script::Gurmukhi,
0x0A41..=0x0A42 => Script::Gurmukhi,
0x0A47..=0x0A48 => Script::Gurmukhi,
0x0A4B..=0x0A4D => Script::Gurmukhi,
0x0A51 => Script::Gurmukhi,
0x0A59..=0x0A5C => Script::Gurmukhi,
0x0A5E => Script::Gurmukhi,
0x0A66..=0x0A6F => Script::Gurmukhi,
0x0A70..=0x0A71 => Script::Gurmukhi,
0x0A72..=0x0A74 => Script::Gurmukhi,
0x0A75 => Script::Gurmukhi,
0x0A81..=0x0A82 => Script::Gujarati,
0x0A83 => Script::Gujarati,
0x0A85..=0x0A8D => Script::Gujarati,
0x0A8F..=0x0A91 => Script::Gujarati,
0x0A93..=0x0AA8 => Script::Gujarati,
0x0AAA..=0x0AB0 => Script::Gujarati,
0x0AB2..=0x0AB3 => Script::Gujarati,
0x0AB5..=0x0AB9 => Script::Gujarati,
0x0ABC => Script::Gujarati,
0x0ABD => Script::Gujarati,
0x0ABE..=0x0AC0 => Script::Gujarati,
0x0AC1..=0x0AC5 => Script::Gujarati,
0x0AC7..=0x0AC8 => Script::Gujarati,
0x0AC9 => Script::Gujarati,
0x0ACB..=0x0ACC => Script::Gujarati,
0x0ACD => Script::Gujarati,
0x0AD0 => Script::Gujarati,
0x0AE0..=0x0AE1 => Script::Gujarati,
0x0AE2..=0x0AE3 => Script::Gujarati,
0x0AE6..=0x0AEF => Script::Gujarati,
0x0AF0 => Script::Gujarati,
0x0AF1 => Script::Gujarati,
0x0AF9 => Script::Gujarati,
0x0B01 => Script::Oriya,
0x0B02..=0x0B03 => Script::Oriya,
0x0B05..=0x0B0C => Script::Oriya,
0x0B0F..=0x0B10 => Script::Oriya,
0x0B13..=0x0B28 => Script::Oriya,
0x0B2A..=0x0B30 => Script::Oriya,
0x0B32..=0x0B33 => Script::Oriya,
0x0B35..=0x0B39 => Script::Oriya,
0x0B3C => Script::Oriya,
0x0B3D => Script::Oriya,
0x0B3E => Script::Oriya,
0x0B3F => Script::Oriya,
0x0B40 => Script::Oriya,
0x0B41..=0x0B44 => Script::Oriya,
0x0B47..=0x0B48 => Script::Oriya,
0x0B4B..=0x0B4C => Script::Oriya,
0x0B4D => Script::Oriya,
0x0B56 => Script::Oriya,
0x0B57 => Script::Oriya,
0x0B5C..=0x0B5D => Script::Oriya,
0x0B5F..=0x0B61 => Script::Oriya,
0x0B62..=0x0B63 => Script::Oriya,
0x0B66..=0x0B6F => Script::Oriya,
0x0B70 => Script::Oriya,
0x0B71 => Script::Oriya,
0x0B72..=0x0B77 => Script::Oriya,
0x0B82 => Script::Tamil,
0x0B83 => Script::Tamil,
0x0B85..=0x0B8A => Script::Tamil,
0x0B8E..=0x0B90 => Script::Tamil,
0x0B92..=0x0B95 => Script::Tamil,
0x0B99..=0x0B9A => Script::Tamil,
0x0B9C => Script::Tamil,
0x0B9E..=0x0B9F => Script::Tamil,
0x0BA3..=0x0BA4 => Script::Tamil,
0x0BA8..=0x0BAA => Script::Tamil,
0x0BAE..=0x0BB9 => Script::Tamil,
0x0BBE..=0x0BBF => Script::Tamil,
0x0BC0 => Script::Tamil,
0x0BC1..=0x0BC2 => Script::Tamil,
0x0BC6..=0x0BC8 => Script::Tamil,
0x0BCA..=0x0BCC => Script::Tamil,
0x0BCD => Script::Tamil,
0x0BD0 => Script::Tamil,
0x0BD7 => Script::Tamil,
0x0BE6..=0x0BEF => Script::Tamil,
0x0BF0..=0x0BF2 => Script::Tamil,
0x0BF3..=0x0BF8 => Script::Tamil,
0x0BF9 => Script::Tamil,
0x0BFA => Script::Tamil,
0x0C00 => Script::Telugu,
0x0C01..=0x0C03 => Script::Telugu,
0x0C05..=0x0C0C => Script::Telugu,
0x0C0E..=0x0C10 => Script::Telugu,
0x0C12..=0x0C28 => Script::Telugu,
0x0C2A..=0x0C39 => Script::Telugu,
0x0C3D => Script::Telugu,
0x0C3E..=0x0C40 => Script::Telugu,
0x0C41..=0x0C44 => Script::Telugu,
0x0C46..=0x0C48 => Script::Telugu,
0x0C4A..=0x0C4D => Script::Telugu,
0x0C55..=0x0C56 => Script::Telugu,
0x0C58..=0x0C5A => Script::Telugu,
0x0C60..=0x0C61 => Script::Telugu,
0x0C62..=0x0C63 => Script::Telugu,
0x0C66..=0x0C6F => Script::Telugu,
0x0C78..=0x0C7E => Script::Telugu,
0x0C7F => Script::Telugu,
0x0C80 => Script::Kannada,
0x0C81 => Script::Kannada,
0x0C82..=0x0C83 => Script::Kannada,
0x0C85..=0x0C8C => Script::Kannada,
0x0C8E..=0x0C90 => Script::Kannada,
0x0C92..=0x0CA8 => Script::Kannada,
0x0CAA..=0x0CB3 => Script::Kannada,
0x0CB5..=0x0CB9 => Script::Kannada,
0x0CBC => Script::Kannada,
0x0CBD => Script::Kannada,
0x0CBE => Script::Kannada,
0x0CBF => Script::Kannada,
0x0CC0..=0x0CC4 => Script::Kannada,
0x0CC6 => Script::Kannada,
0x0CC7..=0x0CC8 => Script::Kannada,
0x0CCA..=0x0CCB => Script::Kannada,
0x0CCC..=0x0CCD => Script::Kannada,
0x0CD5..=0x0CD6 => Script::Kannada,
0x0CDE => Script::Kannada,
0x0CE0..=0x0CE1 => Script::Kannada,
0x0CE2..=0x0CE3 => Script::Kannada,
0x0CE6..=0x0CEF => Script::Kannada,
0x0CF1..=0x0CF2 => Script::Kannada,
0x0D01 => Script::Malayalam,
0x0D02..=0x0D03 => Script::Malayalam,
0x0D05..=0x0D0C => Script::Malayalam,
0x0D0E..=0x0D10 => Script::Malayalam,
0x0D12..=0x0D3A => Script::Malayalam,
0x0D3D => Script::Malayalam,
0x0D3E..=0x0D40 => Script::Malayalam,
0x0D41..=0x0D44 => Script::Malayalam,
0x0D46..=0x0D48 => Script::Malayalam,
0x0D4A..=0x0D4C => Script::Malayalam,
0x0D4D => Script::Malayalam,
0x0D4E => Script::Malayalam,
0x0D4F => Script::Malayalam,
0x0D54..=0x0D56 => Script::Malayalam,
0x0D57 => Script::Malayalam,
0x0D58..=0x0D5E => Script::Malayalam,
0x0D5F..=0x0D61 => Script::Malayalam,
0x0D62..=0x0D63 => Script::Malayalam,
0x0D66..=0x0D6F => Script::Malayalam,
0x0D70..=0x0D78 => Script::Malayalam,
0x0D79 => Script::Malayalam,
0x0D7A..=0x0D7F => Script::Malayalam,
0x0D82..=0x0D83 => Script::Sinhala,
0x0D85..=0x0D96 => Script::Sinhala,
0x0D9A..=0x0DB1 => Script::Sinhala,
0x0DB3..=0x0DBB => Script::Sinhala,
0x0DBD => Script::Sinhala,
0x0DC0..=0x0DC6 => Script::Sinhala,
0x0DCA => Script::Sinhala,
0x0DCF..=0x0DD1 => Script::Sinhala,
0x0DD2..=0x0DD4 => Script::Sinhala,
0x0DD6 => Script::Sinhala,
0x0DD8..=0x0DDF => Script::Sinhala,
0x0DE6..=0x0DEF => Script::Sinhala,
0x0DF2..=0x0DF3 => Script::Sinhala,
0x0DF4 => Script::Sinhala,
0x111E1..=0x111F4 => Script::Sinhala,
0x0E01..=0x0E30 => Script::Thai,
0x0E31 => Script::Thai,
0x0E32..=0x0E33 => Script::Thai,
0x0E34..=0x0E3A => Script::Thai,
0x0E40..=0x0E45 => Script::Thai,
0x0E46 => Script::Thai,
0x0E47..=0x0E4E => Script::Thai,
0x0E4F => Script::Thai,
0x0E50..=0x0E59 => Script::Thai,
0x0E5A..=0x0E5B => Script::Thai,
0x0E81..=0x0E82 => Script::Lao,
0x0E84 => Script::Lao,
0x0E87..=0x0E88 => Script::Lao,
0x0E8A => Script::Lao,
0x0E8D => Script::Lao,
0x0E94..=0x0E97 => Script::Lao,
0x0E99..=0x0E9F => Script::Lao,
0x0EA1..=0x0EA3 => Script::Lao,
0x0EA5 => Script::Lao,
0x0EA7 => Script::Lao,
0x0EAA..=0x0EAB => Script::Lao,
0x0EAD..=0x0EB0 => Script::Lao,
0x0EB1 => Script::Lao,
0x0EB2..=0x0EB3 => Script::Lao,
0x0EB4..=0x0EB9 => Script::Lao,
0x0EBB..=0x0EBC => Script::Lao,
0x0EBD => Script::Lao,
0x0EC0..=0x0EC4 => Script::Lao,
0x0EC6 => Script::Lao,
0x0EC8..=0x0ECD => Script::Lao,
0x0ED0..=0x0ED9 => Script::Lao,
0x0EDC..=0x0EDF => Script::Lao,
0x0F00 => Script::Tibetan,
0x0F01..=0x0F03 => Script::Tibetan,
0x0F04..=0x0F12 => Script::Tibetan,
0x0F13 => Script::Tibetan,
0x0F14 => Script::Tibetan,
0x0F15..=0x0F17 => Script::Tibetan,
0x0F18..=0x0F19 => Script::Tibetan,
0x0F1A..=0x0F1F => Script::Tibetan,
0x0F20..=0x0F29 => Script::Tibetan,
0x0F2A..=0x0F33 => Script::Tibetan,
0x0F34 => Script::Tibetan,
0x0F35 => Script::Tibetan,
0x0F36 => Script::Tibetan,
0x0F37 => Script::Tibetan,
0x0F38 => Script::Tibetan,
0x0F39 => Script::Tibetan,
0x0F3A => Script::Tibetan,
0x0F3B => Script::Tibetan,
0x0F3C => Script::Tibetan,
0x0F3D => Script::Tibetan,
0x0F3E..=0x0F3F => Script::Tibetan,
0x0F40..=0x0F47 => Script::Tibetan,
0x0F49..=0x0F6C => Script::Tibetan,
0x0F71..=0x0F7E => Script::Tibetan,
0x0F7F => Script::Tibetan,
0x0F80..=0x0F84 => Script::Tibetan,
0x0F85 => Script::Tibetan,
0x0F86..=0x0F87 => Script::Tibetan,
0x0F88..=0x0F8C => Script::Tibetan,
0x0F8D..=0x0F97 => Script::Tibetan,
0x0F99..=0x0FBC => Script::Tibetan,
0x0FBE..=0x0FC5 => Script::Tibetan,
0x0FC6 => Script::Tibetan,
0x0FC7..=0x0FCC => Script::Tibetan,
0x0FCE..=0x0FCF => Script::Tibetan,
0x0FD0..=0x0FD4 => Script::Tibetan,
0x0FD9..=0x0FDA => Script::Tibetan,
0x1000..=0x102A => Script::Myanmar,
0x102B..=0x102C => Script::Myanmar,
0x102D..=0x1030 => Script::Myanmar,
0x1031 => Script::Myanmar,
0x1032..=0x1037 => Script::Myanmar,
0x1038 => Script::Myanmar,
0x1039..=0x103A => Script::Myanmar,
0x103B..=0x103C => Script::Myanmar,
0x103D..=0x103E => Script::Myanmar,
0x103F => Script::Myanmar,
0x1040..=0x1049 => Script::Myanmar,
0x104A..=0x104F => Script::Myanmar,
0x1050..=0x1055 => Script::Myanmar,
0x1056..=0x1057 => Script::Myanmar,
0x1058..=0x1059 => Script::Myanmar,
0x105A..=0x105D => Script::Myanmar,
0x105E..=0x1060 => Script::Myanmar,
0x1061 => Script::Myanmar,
0x1062..=0x1064 => Script::Myanmar,
0x1065..=0x1066 => Script::Myanmar,
0x1067..=0x106D => Script::Myanmar,
0x106E..=0x1070 => Script::Myanmar,
0x1071..=0x1074 => Script::Myanmar,
0x1075..=0x1081 => Script::Myanmar,
0x1082 => Script::Myanmar,
0x1083..=0x1084 => Script::Myanmar,
0x1085..=0x1086 => Script::Myanmar,
0x1087..=0x108C => Script::Myanmar,
0x108D => Script::Myanmar,
0x108E => Script::Myanmar,
0x108F => Script::Myanmar,
0x1090..=0x1099 => Script::Myanmar,
0x109A..=0x109C => Script::Myanmar,
0x109D => Script::Myanmar,
0x109E..=0x109F => Script::Myanmar,
0xA9E0..=0xA9E4 => Script::Myanmar,
0xA9E5 => Script::Myanmar,
0xA9E6 => Script::Myanmar,
0xA9E7..=0xA9EF => Script::Myanmar,
0xA9F0..=0xA9F9 => Script::Myanmar,
0xA9FA..=0xA9FE => Script::Myanmar,
0xAA60..=0xAA6F => Script::Myanmar,
0xAA70 => Script::Myanmar,
0xAA71..=0xAA76 => Script::Myanmar,
0xAA77..=0xAA79 => Script::Myanmar,
0xAA7A => Script::Myanmar,
0xAA7B => Script::Myanmar,
0xAA7C => Script::Myanmar,
0xAA7D => Script::Myanmar,
0xAA7E..=0xAA7F => Script::Myanmar,
0x10A0..=0x10C5 => Script::Georgian,
0x10C7 => Script::Georgian,
0x10CD => Script::Georgian,
0x10D0..=0x10FA => Script::Georgian,
0x10FC => Script::Georgian,
0x10FD..=0x10FF => Script::Georgian,
0x2D00..=0x2D25 => Script::Georgian,
0x2D27 => Script::Georgian,
0x2D2D => Script::Georgian,
0x1100..=0x11FF => Script::Hangul,
0x302E..=0x302F => Script::Hangul,
0x3131..=0x318E => Script::Hangul,
0x3200..=0x321E => Script::Hangul,
0x3260..=0x327E => Script::Hangul,
0xA960..=0xA97C => Script::Hangul,
0xAC00..=0xD7A3 => Script::Hangul,
0xD7B0..=0xD7C6 => Script::Hangul,
0xD7CB..=0xD7FB => Script::Hangul,
0xFFA0..=0xFFBE => Script::Hangul,
0xFFC2..=0xFFC7 => Script::Hangul,
0xFFCA..=0xFFCF => Script::Hangul,
0xFFD2..=0xFFD7 => Script::Hangul,
0xFFDA..=0xFFDC => Script::Hangul,
0x1200..=0x1248 => Script::Ethiopic,
0x124A..=0x124D => Script::Ethiopic,
0x1250..=0x1256 => Script::Ethiopic,
0x1258 => Script::Ethiopic,
0x125A..=0x125D => Script::Ethiopic,
0x1260..=0x1288 => Script::Ethiopic,
0x128A..=0x128D => Script::Ethiopic,
0x1290..=0x12B0 => Script::Ethiopic,
0x12B2..=0x12B5 => Script::Ethiopic,
0x12B8..=0x12BE => Script::Ethiopic,
0x12C0 => Script::Ethiopic,
0x12C2..=0x12C5 => Script::Ethiopic,
0x12C8..=0x12D6 => Script::Ethiopic,
0x12D8..=0x1310 => Script::Ethiopic,
0x1312..=0x1315 => Script::Ethiopic,
0x1318..=0x135A => Script::Ethiopic,
0x135D..=0x135F => Script::Ethiopic,
0x1360..=0x1368 => Script::Ethiopic,
0x1369..=0x137C => Script::Ethiopic,
0x1380..=0x138F => Script::Ethiopic,
0x1390..=0x1399 => Script::Ethiopic,
0x2D80..=0x2D96 => Script::Ethiopic,
0x2DA0..=0x2DA6 => Script::Ethiopic,
0x2DA8..=0x2DAE => Script::Ethiopic,
0x2DB0..=0x2DB6 => Script::Ethiopic,
0x2DB8..=0x2DBE => Script::Ethiopic,
0x2DC0..=0x2DC6 => Script::Ethiopic,
0x2DC8..=0x2DCE => Script::Ethiopic,
0x2DD0..=0x2DD6 => Script::Ethiopic,
0x2DD8..=0x2DDE => Script::Ethiopic,
0xAB01..=0xAB06 => Script::Ethiopic,
0xAB09..=0xAB0E => Script::Ethiopic,
0xAB11..=0xAB16 => Script::Ethiopic,
0xAB20..=0xAB26 => Script::Ethiopic,
0xAB28..=0xAB2E => Script::Ethiopic,
0x13A0..=0x13F5 => Script::Cherokee,
0x13F8..=0x13FD => Script::Cherokee,
0xAB70..=0xABBF => Script::Cherokee,
0x1400 => Script::CanadianAboriginal,
0x1401..=0x166C => Script::CanadianAboriginal,
0x166D..=0x166E => Script::CanadianAboriginal,
0x166F..=0x167F => Script::CanadianAboriginal,
0x18B0..=0x18F5 => Script::CanadianAboriginal,
0x1680 => Script::Ogham,
0x1681..=0x169A => Script::Ogham,
0x169B => Script::Ogham,
0x169C => Script::Ogham,
0x16A0..=0x16EA => Script::Runic,
0x16EE..=0x16F0 => Script::Runic,
0x16F1..=0x16F8 => Script::Runic,
0x1780..=0x17B3 => Script::Khmer,
0x17B4..=0x17B5 => Script::Khmer,
0x17B6 => Script::Khmer,
0x17B7..=0x17BD => Script::Khmer,
0x17BE..=0x17C5 => Script::Khmer,
0x17C6 => Script::Khmer,
0x17C7..=0x17C8 => Script::Khmer,
0x17C9..=0x17D3 => Script::Khmer,
0x17D4..=0x17D6 => Script::Khmer,
0x17D7 => Script::Khmer,
0x17D8..=0x17DA => Script::Khmer,
0x17DB => Script::Khmer,
0x17DC => Script::Khmer,
0x17DD => Script::Khmer,
0x17E0..=0x17E9 => Script::Khmer,
0x17F0..=0x17F9 => Script::Khmer,
0x19E0..=0x19FF => Script::Khmer,
0x1800..=0x1801 => Script::Mongolian,
0x1804 => Script::Mongolian,
0x1806 => Script::Mongolian,
0x1807..=0x180A => Script::Mongolian,
0x180B..=0x180D => Script::Mongolian,
0x180E => Script::Mongolian,
0x1810..=0x1819 => Script::Mongolian,
0x1820..=0x1842 => Script::Mongolian,
0x1843 => Script::Mongolian,
0x1844..=0x1877 => Script::Mongolian,
0x1880..=0x1884 => Script::Mongolian,
0x1885..=0x1886 => Script::Mongolian,
0x1887..=0x18A8 => Script::Mongolian,
0x18A9 => Script::Mongolian,
0x18AA => Script::Mongolian,
0x11660..=0x1166C => Script::Mongolian,
0x3041..=0x3096 => Script::Hiragana,
0x309D..=0x309E => Script::Hiragana,
0x309F => Script::Hiragana,
0x1B001 => Script::Hiragana,
0x1F200 => Script::Hiragana,
0x30A1..=0x30FA => Script::Katakana,
0x30FD..=0x30FE => Script::Katakana,
0x30FF => Script::Katakana,
0x31F0..=0x31FF => Script::Katakana,
0x32D0..=0x32FE => Script::Katakana,
0x3300..=0x3357 => Script::Katakana,
0xFF66..=0xFF6F => Script::Katakana,
0xFF71..=0xFF9D => Script::Katakana,
0x1B000 => Script::Katakana,
0x02EA..=0x02EB => Script::Bopomofo,
0x3105..=0x312D => Script::Bopomofo,
0x31A0..=0x31BA => Script::Bopomofo,
0x2E80..=0x2E99 => Script::Han,
0x2E9B..=0x2EF3 => Script::Han,
0x2F00..=0x2FD5 => Script::Han,
0x3005 => Script::Han,
0x3007 => Script::Han,
0x3021..=0x3029 => Script::Han,
0x3038..=0x303A => Script::Han,
0x303B => Script::Han,
0x3400..=0x4DB5 => Script::Han,
0x4E00..=0x9FD5 => Script::Han,
0xF900..=0xFA6D => Script::Han,
0xFA70..=0xFAD9 => Script::Han,
0x20000..=0x2A6D6 => Script::Han,
0x2A700..=0x2B734 => Script::Han,
0x2B740..=0x2B81D => Script::Han,
0x2B820..=0x2CEA1 => Script::Han,
0x2F800..=0x2FA1D => Script::Han,
0xA000..=0xA014 => Script::Yi,
0xA015 => Script::Yi,
0xA016..=0xA48C => Script::Yi,
0xA490..=0xA4C6 => Script::Yi,
0x10300..=0x1031F => Script::OldItalic,
0x10320..=0x10323 => Script::OldItalic,
0x10330..=0x10340 => Script::Gothic,
0x10341 => Script::Gothic,
0x10342..=0x10349 => Script::Gothic,
0x1034A => Script::Gothic,
0x10400..=0x1044F => Script::Deseret,
0x0300..=0x036F => Script::Inherited,
0x0485..=0x0486 => Script::Inherited,
0x064B..=0x0655 => Script::Inherited,
0x0670 => Script::Inherited,
0x0951..=0x0952 => Script::Inherited,
0x1AB0..=0x1ABD => Script::Inherited,
0x1ABE => Script::Inherited,
0x1CD0..=0x1CD2 => Script::Inherited,
0x1CD4..=0x1CE0 => Script::Inherited,
0x1CE2..=0x1CE8 => Script::Inherited,
0x1CED => Script::Inherited,
0x1CF4 => Script::Inherited,
0x1CF8..=0x1CF9 => Script::Inherited,
0x1DC0..=0x1DF5 => Script::Inherited,
0x1DFB..=0x1DFF => Script::Inherited,
0x200C..=0x200D => Script::Inherited,
0x20D0..=0x20DC => Script::Inherited,
0x20DD..=0x20E0 => Script::Inherited,
0x20E1 => Script::Inherited,
0x20E2..=0x20E4 => Script::Inherited,
0x20E5..=0x20F0 => Script::Inherited,
0x302A..=0x302D => Script::Inherited,
0x3099..=0x309A => Script::Inherited,
0xFE00..=0xFE0F => Script::Inherited,
0xFE20..=0xFE2D => Script::Inherited,
0x101FD => Script::Inherited,
0x102E0 => Script::Inherited,
0x1D167..=0x1D169 => Script::Inherited,
0x1D17B..=0x1D182 => Script::Inherited,
0x1D185..=0x1D18B => Script::Inherited,
0x1D1AA..=0x1D1AD => Script::Inherited,
0xE0100..=0xE01EF => Script::Inherited,
0x1700..=0x170C => Script::Tagalog,
0x170E..=0x1711 => Script::Tagalog,
0x1712..=0x1714 => Script::Tagalog,
0x1720..=0x1731 => Script::Hanunoo,
0x1732..=0x1734 => Script::Hanunoo,
0x1740..=0x1751 => Script::Buhid,
0x1752..=0x1753 => Script::Buhid,
0x1760..=0x176C => Script::Tagbanwa,
0x176E..=0x1770 => Script::Tagbanwa,
0x1772..=0x1773 => Script::Tagbanwa,
0x1900..=0x191E => Script::Limbu,
0x1920..=0x1922 => Script::Limbu,
0x1923..=0x1926 => Script::Limbu,
0x1927..=0x1928 => Script::Limbu,
0x1929..=0x192B => Script::Limbu,
0x1930..=0x1931 => Script::Limbu,
0x1932 => Script::Limbu,
0x1933..=0x1938 => Script::Limbu,
0x1939..=0x193B => Script::Limbu,
0x1940 => Script::Limbu,
0x1944..=0x1945 => Script::Limbu,
0x1946..=0x194F => Script::Limbu,
0x1950..=0x196D => Script::TaiLe,
0x1970..=0x1974 => Script::TaiLe,
0x10000..=0x1000B => Script::LinearB,
0x1000D..=0x10026 => Script::LinearB,
0x10028..=0x1003A => Script::LinearB,
0x1003C..=0x1003D => Script::LinearB,
0x1003F..=0x1004D => Script::LinearB,
0x10050..=0x1005D => Script::LinearB,
0x10080..=0x100FA => Script::LinearB,
0x10380..=0x1039D => Script::Ugaritic,
0x1039F => Script::Ugaritic,
0x10450..=0x1047F => Script::Shavian,
0x10480..=0x1049D => Script::Osmanya,
0x104A0..=0x104A9 => Script::Osmanya,
0x10800..=0x10805 => Script::Cypriot,
0x10808 => Script::Cypriot,
0x1080A..=0x10835 => Script::Cypriot,
0x10837..=0x10838 => Script::Cypriot,
0x1083C => Script::Cypriot,
0x1083F => Script::Cypriot,
0x2800..=0x28FF => Script::Braille,
0x1A00..=0x1A16 => Script::Buginese,
0x1A17..=0x1A18 => Script::Buginese,
0x1A19..=0x1A1A => Script::Buginese,
0x1A1B => Script::Buginese,
0x1A1E..=0x1A1F => Script::Buginese,
0x03E2..=0x03EF => Script::Coptic,
0x2C80..=0x2CE4 => Script::Coptic,
0x2CE5..=0x2CEA => Script::Coptic,
0x2CEB..=0x2CEE => Script::Coptic,
0x2CEF..=0x2CF1 => Script::Coptic,
0x2CF2..=0x2CF3 => Script::Coptic,
0x2CF9..=0x2CFC => Script::Coptic,
0x2CFD => Script::Coptic,
0x2CFE..=0x2CFF => Script::Coptic,
0x1980..=0x19AB => Script::NewTaiLue,
0x19B0..=0x19C9 => Script::NewTaiLue,
0x19D0..=0x19D9 => Script::NewTaiLue,
0x19DA => Script::NewTaiLue,
0x19DE..=0x19DF => Script::NewTaiLue,
0x2C00..=0x2C2E => Script::Glagolitic,
0x2C30..=0x2C5E => Script::Glagolitic,
0x1E000..=0x1E006 => Script::Glagolitic,
0x1E008..=0x1E018 => Script::Glagolitic,
0x1E01B..=0x1E021 => Script::Glagolitic,
0x1E023..=0x1E024 => Script::Glagolitic,
0x1E026..=0x1E02A => Script::Glagolitic,
0x2D30..=0x2D67 => Script::Tifinagh,
0x2D6F => Script::Tifinagh,
0x2D70 => Script::Tifinagh,
0x2D7F => Script::Tifinagh,
0xA800..=0xA801 => Script::SylotiNagri,
0xA802 => Script::SylotiNagri,
0xA803..=0xA805 => Script::SylotiNagri,
0xA806 => Script::SylotiNagri,
0xA807..=0xA80A => Script::SylotiNagri,
0xA80B => Script::SylotiNagri,
0xA80C..=0xA822 => Script::SylotiNagri,
0xA823..=0xA824 => Script::SylotiNagri,
0xA825..=0xA826 => Script::SylotiNagri,
0xA827 => Script::SylotiNagri,
0xA828..=0xA82B => Script::SylotiNagri,
0x103A0..=0x103C3 => Script::OldPersian,
0x103C8..=0x103CF => Script::OldPersian,
0x103D0 => Script::OldPersian,
0x103D1..=0x103D5 => Script::OldPersian,
0x10A00 => Script::Kharoshthi,
0x10A01..=0x10A03 => Script::Kharoshthi,
0x10A05..=0x10A06 => Script::Kharoshthi,
0x10A0C..=0x10A0F => Script::Kharoshthi,
0x10A10..=0x10A13 => Script::Kharoshthi,
0x10A15..=0x10A17 => Script::Kharoshthi,
0x10A19..=0x10A33 => Script::Kharoshthi,
0x10A38..=0x10A3A => Script::Kharoshthi,
0x10A3F => Script::Kharoshthi,
0x10A40..=0x10A47 => Script::Kharoshthi,
0x10A50..=0x10A58 => Script::Kharoshthi,
0x1B00..=0x1B03 => Script::Balinese,
0x1B04 => Script::Balinese,
0x1B05..=0x1B33 => Script::Balinese,
0x1B34 => Script::Balinese,
0x1B35 => Script::Balinese,
0x1B36..=0x1B3A => Script::Balinese,
0x1B3B => Script::Balinese,
0x1B3C => Script::Balinese,
0x1B3D..=0x1B41 => Script::Balinese,
0x1B42 => Script::Balinese,
0x1B43..=0x1B44 => Script::Balinese,
0x1B45..=0x1B4B => Script::Balinese,
0x1B50..=0x1B59 => Script::Balinese,
0x1B5A..=0x1B60 => Script::Balinese,
0x1B61..=0x1B6A => Script::Balinese,
0x1B6B..=0x1B73 => Script::Balinese,
0x1B74..=0x1B7C => Script::Balinese,
0x12000..=0x12399 => Script::Cuneiform,
0x12400..=0x1246E => Script::Cuneiform,
0x12470..=0x12474 => Script::Cuneiform,
0x12480..=0x12543 => Script::Cuneiform,
0x10900..=0x10915 => Script::Phoenician,
0x10916..=0x1091B => Script::Phoenician,
0x1091F => Script::Phoenician,
0xA840..=0xA873 => Script::PhagsPa,
0xA874..=0xA877 => Script::PhagsPa,
0x07C0..=0x07C9 => Script::Nko,
0x07CA..=0x07EA => Script::Nko,
0x07EB..=0x07F3 => Script::Nko,
0x07F4..=0x07F5 => Script::Nko,
0x07F6 => Script::Nko,
0x07F7..=0x07F9 => Script::Nko,
0x07FA => Script::Nko,
0x1B80..=0x1B81 => Script::Sundanese,
0x1B82 => Script::Sundanese,
0x1B83..=0x1BA0 => Script::Sundanese,
0x1BA1 => Script::Sundanese,
0x1BA2..=0x1BA5 => Script::Sundanese,
0x1BA6..=0x1BA7 => Script::Sundanese,
0x1BA8..=0x1BA9 => Script::Sundanese,
0x1BAA => Script::Sundanese,
0x1BAB..=0x1BAD => Script::Sundanese,
0x1BAE..=0x1BAF => Script::Sundanese,
0x1BB0..=0x1BB9 => Script::Sundanese,
0x1BBA..=0x1BBF => Script::Sundanese,
0x1CC0..=0x1CC7 => Script::Sundanese,
0x1C00..=0x1C23 => Script::Lepcha,
0x1C24..=0x1C2B => Script::Lepcha,
0x1C2C..=0x1C33 => Script::Lepcha,
0x1C34..=0x1C35 => Script::Lepcha,
0x1C36..=0x1C37 => Script::Lepcha,
0x1C3B..=0x1C3F => Script::Lepcha,
0x1C40..=0x1C49 => Script::Lepcha,
0x1C4D..=0x1C4F => Script::Lepcha,
0x1C50..=0x1C59 => Script::OlChiki,
0x1C5A..=0x1C77 => Script::OlChiki,
0x1C78..=0x1C7D => Script::OlChiki,
0x1C7E..=0x1C7F => Script::OlChiki,
0xA500..=0xA60B => Script::Vai,
0xA60C => Script::Vai,
0xA60D..=0xA60F => Script::Vai,
0xA610..=0xA61F => Script::Vai,
0xA620..=0xA629 => Script::Vai,
0xA62A..=0xA62B => Script::Vai,
0xA880..=0xA881 => Script::Saurashtra,
0xA882..=0xA8B3 => Script::Saurashtra,
0xA8B4..=0xA8C3 => Script::Saurashtra,
0xA8C4..=0xA8C5 => Script::Saurashtra,
0xA8CE..=0xA8CF => Script::Saurashtra,
0xA8D0..=0xA8D9 => Script::Saurashtra,
0xA900..=0xA909 => Script::KayahLi,
0xA90A..=0xA925 => Script::KayahLi,
0xA926..=0xA92D => Script::KayahLi,
0xA92F => Script::KayahLi,
0xA930..=0xA946 => Script::Rejang,
0xA947..=0xA951 => Script::Rejang,
0xA952..=0xA953 => Script::Rejang,
0xA95F => Script::Rejang,
0x10280..=0x1029C => Script::Lycian,
0x102A0..=0x102D0 => Script::Carian,
0x10920..=0x10939 => Script::Lydian,
0x1093F => Script::Lydian,
0xAA00..=0xAA28 => Script::Cham,
0xAA29..=0xAA2E => Script::Cham,
0xAA2F..=0xAA30 => Script::Cham,
0xAA31..=0xAA32 => Script::Cham,
0xAA33..=0xAA34 => Script::Cham,
0xAA35..=0xAA36 => Script::Cham,
0xAA40..=0xAA42 => Script::Cham,
0xAA43 => Script::Cham,
0xAA44..=0xAA4B => Script::Cham,
0xAA4C => Script::Cham,
0xAA4D => Script::Cham,
0xAA50..=0xAA59 => Script::Cham,
0xAA5C..=0xAA5F => Script::Cham,
0x1A20..=0x1A54 => Script::TaiTham,
0x1A55 => Script::TaiTham,
0x1A56 => Script::TaiTham,
0x1A57 => Script::TaiTham,
0x1A58..=0x1A5E => Script::TaiTham,
0x1A60 => Script::TaiTham,
0x1A61 => Script::TaiTham,
0x1A62 => Script::TaiTham,
0x1A63..=0x1A64 => Script::TaiTham,
0x1A65..=0x1A6C => Script::TaiTham,
0x1A6D..=0x1A72 => Script::TaiTham,
0x1A73..=0x1A7C => Script::TaiTham,
0x1A7F => Script::TaiTham,
0x1A80..=0x1A89 => Script::TaiTham,
0x1A90..=0x1A99 => Script::TaiTham,
0x1AA0..=0x1AA6 => Script::TaiTham,
0x1AA7 => Script::TaiTham,
0x1AA8..=0x1AAD => Script::TaiTham,
0xAA80..=0xAAAF => Script::TaiViet,
0xAAB0 => Script::TaiViet,
0xAAB1 => Script::TaiViet,
0xAAB2..=0xAAB4 => Script::TaiViet,
0xAAB5..=0xAAB6 => Script::TaiViet,
0xAAB7..=0xAAB8 => Script::TaiViet,
0xAAB9..=0xAABD => Script::TaiViet,
0xAABE..=0xAABF => Script::TaiViet,
0xAAC0 => Script::TaiViet,
0xAAC1 => Script::TaiViet,
0xAAC2 => Script::TaiViet,
0xAADB..=0xAADC => Script::TaiViet,
0xAADD => Script::TaiViet,
0xAADE..=0xAADF => Script::TaiViet,
0x10B00..=0x10B35 => Script::Avestan,
0x10B39..=0x10B3F => Script::Avestan,
0x13000..=0x1342E => Script::EgyptianHieroglyphs,
0x0800..=0x0815 => Script::Samaritan,
0x0816..=0x0819 => Script::Samaritan,
0x081A => Script::Samaritan,
0x081B..=0x0823 => Script::Samaritan,
0x0824 => Script::Samaritan,
0x0825..=0x0827 => Script::Samaritan,
0x0828 => Script::Samaritan,
0x0829..=0x082D => Script::Samaritan,
0x0830..=0x083E => Script::Samaritan,
0xA4D0..=0xA4F7 => Script::Lisu,
0xA4F8..=0xA4FD => Script::Lisu,
0xA4FE..=0xA4FF => Script::Lisu,
0xA6A0..=0xA6E5 => Script::Bamum,
0xA6E6..=0xA6EF => Script::Bamum,
0xA6F0..=0xA6F1 => Script::Bamum,
0xA6F2..=0xA6F7 => Script::Bamum,
0x16800..=0x16A38 => Script::Bamum,
0xA980..=0xA982 => Script::Javanese,
0xA983 => Script::Javanese,
0xA984..=0xA9B2 => Script::Javanese,
0xA9B3 => Script::Javanese,
0xA9B4..=0xA9B5 => Script::Javanese,
0xA9B6..=0xA9B9 => Script::Javanese,
0xA9BA..=0xA9BB => Script::Javanese,
0xA9BC => Script::Javanese,
0xA9BD..=0xA9C0 => Script::Javanese,
0xA9C1..=0xA9CD => Script::Javanese,
0xA9D0..=0xA9D9 => Script::Javanese,
0xA9DE..=0xA9DF => Script::Javanese,
0xAAE0..=0xAAEA => Script::MeeteiMayek,
0xAAEB => Script::MeeteiMayek,
0xAAEC..=0xAAED => Script::MeeteiMayek,
0xAAEE..=0xAAEF => Script::MeeteiMayek,
0xAAF0..=0xAAF1 => Script::MeeteiMayek,
0xAAF2 => Script::MeeteiMayek,
0xAAF3..=0xAAF4 => Script::MeeteiMayek,
0xAAF5 => Script::MeeteiMayek,
0xAAF6 => Script::MeeteiMayek,
0xABC0..=0xABE2 => Script::MeeteiMayek,
0xABE3..=0xABE4 => Script::MeeteiMayek,
0xABE5 => Script::MeeteiMayek,
0xABE6..=0xABE7 => Script::MeeteiMayek,
0xABE8 => Script::MeeteiMayek,
0xABE9..=0xABEA => Script::MeeteiMayek,
0xABEB => Script::MeeteiMayek,
0xABEC => Script::MeeteiMayek,
0xABED => Script::MeeteiMayek,
0xABF0..=0xABF9 => Script::MeeteiMayek,
0x10840..=0x10855 => Script::ImperialAramaic,
0x10857 => Script::ImperialAramaic,
0x10858..=0x1085F => Script::ImperialAramaic,
0x10A60..=0x10A7C => Script::OldSouthArabian,
0x10A7D..=0x10A7E => Script::OldSouthArabian,
0x10A7F => Script::OldSouthArabian,
0x10B40..=0x10B55 => Script::InscriptionalParthian,
0x10B58..=0x10B5F => Script::InscriptionalParthian,
0x10B60..=0x10B72 => Script::InscriptionalPahlavi,
0x10B78..=0x10B7F => Script::InscriptionalPahlavi,
0x10C00..=0x10C48 => Script::OldTurkic,
0x11080..=0x11081 => Script::Kaithi,
0x11082 => Script::Kaithi,
0x11083..=0x110AF => Script::Kaithi,
0x110B0..=0x110B2 => Script::Kaithi,
0x110B3..=0x110B6 => Script::Kaithi,
0x110B7..=0x110B8 => Script::Kaithi,
0x110B9..=0x110BA => Script::Kaithi,
0x110BB..=0x110BC => Script::Kaithi,
0x110BD => Script::Kaithi,
0x110BE..=0x110C1 => Script::Kaithi,
0x1BC0..=0x1BE5 => Script::Batak,
0x1BE6 => Script::Batak,
0x1BE7 => Script::Batak,
0x1BE8..=0x1BE9 => Script::Batak,
0x1BEA..=0x1BEC => Script::Batak,
0x1BED => Script::Batak,
0x1BEE => Script::Batak,
0x1BEF..=0x1BF1 => Script::Batak,
0x1BF2..=0x1BF3 => Script::Batak,
0x1BFC..=0x1BFF => Script::Batak,
0x11000 => Script::Brahmi,
0x11001 => Script::Brahmi,
0x11002 => Script::Brahmi,
0x11003..=0x11037 => Script::Brahmi,
0x11038..=0x11046 => Script::Brahmi,
0x11047..=0x1104D => Script::Brahmi,
0x11052..=0x11065 => Script::Brahmi,
0x11066..=0x1106F => Script::Brahmi,
0x1107F => Script::Brahmi,
0x0840..=0x0858 => Script::Mandaic,
0x0859..=0x085B => Script::Mandaic,
0x085E => Script::Mandaic,
0x11100..=0x11102 => Script::Chakma,
0x11103..=0x11126 => Script::Chakma,
0x11127..=0x1112B => Script::Chakma,
0x1112C => Script::Chakma,
0x1112D..=0x11134 => Script::Chakma,
0x11136..=0x1113F => Script::Chakma,
0x11140..=0x11143 => Script::Chakma,
0x109A0..=0x109B7 => Script::MeroiticCursive,
0x109BC..=0x109BD => Script::MeroiticCursive,
0x109BE..=0x109BF => Script::MeroiticCursive,
0x109C0..=0x109CF => Script::MeroiticCursive,
0x109D2..=0x109FF => Script::MeroiticCursive,
0x10980..=0x1099F => Script::MeroiticHieroglyphs,
0x16F00..=0x16F44 => Script::Miao,
0x16F50 => Script::Miao,
0x16F51..=0x16F7E => Script::Miao,
0x16F8F..=0x16F92 => Script::Miao,
0x16F93..=0x16F9F => Script::Miao,
0x11180..=0x11181 => Script::Sharada,
0x11182 => Script::Sharada,
0x11183..=0x111B2 => Script::Sharada,
0x111B3..=0x111B5 => Script::Sharada,
0x111B6..=0x111BE => Script::Sharada,
0x111BF..=0x111C0 => Script::Sharada,
0x111C1..=0x111C4 => Script::Sharada,
0x111C5..=0x111C9 => Script::Sharada,
0x111CA..=0x111CC => Script::Sharada,
0x111CD => Script::Sharada,
0x111D0..=0x111D9 => Script::Sharada,
0x111DA => Script::Sharada,
0x111DB => Script::Sharada,
0x111DC => Script::Sharada,
0x111DD..=0x111DF => Script::Sharada,
0x110D0..=0x110E8 => Script::SoraSompeng,
0x110F0..=0x110F9 => Script::SoraSompeng,
0x11680..=0x116AA => Script::Takri,
0x116AB => Script::Takri,
0x116AC => Script::Takri,
0x116AD => Script::Takri,
0x116AE..=0x116AF => Script::Takri,
0x116B0..=0x116B5 => Script::Takri,
0x116B6 => Script::Takri,
0x116B7 => Script::Takri,
0x116C0..=0x116C9 => Script::Takri,
0x10530..=0x10563 => Script::CaucasianAlbanian,
0x1056F => Script::CaucasianAlbanian,
0x16AD0..=0x16AED => Script::BassaVah,
0x16AF0..=0x16AF4 => Script::BassaVah,
0x16AF5 => Script::BassaVah,
0x1BC00..=0x1BC6A => Script::Duployan,
0x1BC70..=0x1BC7C => Script::Duployan,
0x1BC80..=0x1BC88 => Script::Duployan,
0x1BC90..=0x1BC99 => Script::Duployan,
0x1BC9C => Script::Duployan,
0x1BC9D..=0x1BC9E => Script::Duployan,
0x1BC9F => Script::Duployan,
0x10500..=0x10527 => Script::Elbasan,
0x11300..=0x11301 => Script::Grantha,
0x11302..=0x11303 => Script::Grantha,
0x11305..=0x1130C => Script::Grantha,
0x1130F..=0x11310 => Script::Grantha,
0x11313..=0x11328 => Script::Grantha,
0x1132A..=0x11330 => Script::Grantha,
0x11332..=0x11333 => Script::Grantha,
0x11335..=0x11339 => Script::Grantha,
0x1133C => Script::Grantha,
0x1133D => Script::Grantha,
0x1133E..=0x1133F => Script::Grantha,
0x11340 => Script::Grantha,
0x11341..=0x11344 => Script::Grantha,
0x11347..=0x11348 => Script::Grantha,
0x1134B..=0x1134D => Script::Grantha,
0x11350 => Script::Grantha,
0x11357 => Script::Grantha,
0x1135D..=0x11361 => Script::Grantha,
0x11362..=0x11363 => Script::Grantha,
0x11366..=0x1136C => Script::Grantha,
0x11370..=0x11374 => Script::Grantha,
0x16B00..=0x16B2F => Script::PahawhHmong,
0x16B30..=0x16B36 => Script::PahawhHmong,
0x16B37..=0x16B3B => Script::PahawhHmong,
0x16B3C..=0x16B3F => Script::PahawhHmong,
0x16B40..=0x16B43 => Script::PahawhHmong,
0x16B44 => Script::PahawhHmong,
0x16B45 => Script::PahawhHmong,
0x16B50..=0x16B59 => Script::PahawhHmong,
0x16B5B..=0x16B61 => Script::PahawhHmong,
0x16B63..=0x16B77 => Script::PahawhHmong,
0x16B7D..=0x16B8F => Script::PahawhHmong,
0x11200..=0x11211 => Script::Khojki,
0x11213..=0x1122B => Script::Khojki,
0x1122C..=0x1122E => Script::Khojki,
0x1122F..=0x11231 => Script::Khojki,
0x11232..=0x11233 => Script::Khojki,
0x11234 => Script::Khojki,
0x11235 => Script::Khojki,
0x11236..=0x11237 => Script::Khojki,
0x11238..=0x1123D => Script::Khojki,
0x1123E => Script::Khojki,
0x10600..=0x10736 => Script::LinearA,
0x10740..=0x10755 => Script::LinearA,
0x10760..=0x10767 => Script::LinearA,
0x11150..=0x11172 => Script::Mahajani,
0x11173 => Script::Mahajani,
0x11174..=0x11175 => Script::Mahajani,
0x11176 => Script::Mahajani,
0x10AC0..=0x10AC7 => Script::Manichaean,
0x10AC8 => Script::Manichaean,
0x10AC9..=0x10AE4 => Script::Manichaean,
0x10AE5..=0x10AE6 => Script::Manichaean,
0x10AEB..=0x10AEF => Script::Manichaean,
0x10AF0..=0x10AF6 => Script::Manichaean,
0x1E800..=0x1E8C4 => Script::MendeKikakui,
0x1E8C7..=0x1E8CF => Script::MendeKikakui,
0x1E8D0..=0x1E8D6 => Script::MendeKikakui,
0x11600..=0x1162F => Script::Modi,
0x11630..=0x11632 => Script::Modi,
0x11633..=0x1163A => Script::Modi,
0x1163B..=0x1163C => Script::Modi,
0x1163D => Script::Modi,
0x1163E => Script::Modi,
0x1163F..=0x11640 => Script::Modi,
0x11641..=0x11643 => Script::Modi,
0x11644 => Script::Modi,
0x11650..=0x11659 => Script::Modi,
0x16A40..=0x16A5E => Script::Mro,
0x16A60..=0x16A69 => Script::Mro,
0x16A6E..=0x16A6F => Script::Mro,
0x10A80..=0x10A9C => Script::OldNorthArabian,
0x10A9D..=0x10A9F => Script::OldNorthArabian,
0x10880..=0x1089E => Script::Nabataean,
0x108A7..=0x108AF => Script::Nabataean,
0x10860..=0x10876 => Script::Palmyrene,
0x10877..=0x10878 => Script::Palmyrene,
0x10879..=0x1087F => Script::Palmyrene,
0x11AC0..=0x11AF8 => Script::PauCinHau,
0x10350..=0x10375 => Script::OldPermic,
0x10376..=0x1037A => Script::OldPermic,
0x10B80..=0x10B91 => Script::PsalterPahlavi,
0x10B99..=0x10B9C => Script::PsalterPahlavi,
0x10BA9..=0x10BAF => Script::PsalterPahlavi,
0x11580..=0x115AE => Script::Siddham,
0x115AF..=0x115B1 => Script::Siddham,
0x115B2..=0x115B5 => Script::Siddham,
0x115B8..=0x115BB => Script::Siddham,
0x115BC..=0x115BD => Script::Siddham,
0x115BE => Script::Siddham,
0x115BF..=0x115C0 => Script::Siddham,
0x115C1..=0x115D7 => Script::Siddham,
0x115D8..=0x115DB => Script::Siddham,
0x115DC..=0x115DD => Script::Siddham,
0x112B0..=0x112DE => Script::Khudawadi,
0x112DF => Script::Khudawadi,
0x112E0..=0x112E2 => Script::Khudawadi,
0x112E3..=0x112EA => Script::Khudawadi,
0x112F0..=0x112F9 => Script::Khudawadi,
0x11480..=0x114AF => Script::Tirhuta,
0x114B0..=0x114B2 => Script::Tirhuta,
0x114B3..=0x114B8 => Script::Tirhuta,
0x114B9 => Script::Tirhuta,
0x114BA => Script::Tirhuta,
0x114BB..=0x114BE => Script::Tirhuta,
0x114BF..=0x114C0 => Script::Tirhuta,
0x114C1 => Script::Tirhuta,
0x114C2..=0x114C3 => Script::Tirhuta,
0x114C4..=0x114C5 => Script::Tirhuta,
0x114C6 => Script::Tirhuta,
0x114C7 => Script::Tirhuta,
0x114D0..=0x114D9 => Script::Tirhuta,
0x118A0..=0x118DF => Script::WarangCiti,
0x118E0..=0x118E9 => Script::WarangCiti,
0x118EA..=0x118F2 => Script::WarangCiti,
0x118FF => Script::WarangCiti,
0x11700..=0x11719 => Script::Ahom,
0x1171D..=0x1171F => Script::Ahom,
0x11720..=0x11721 => Script::Ahom,
0x11722..=0x11725 => Script::Ahom,
0x11726 => Script::Ahom,
0x11727..=0x1172B => Script::Ahom,
0x11730..=0x11739 => Script::Ahom,
0x1173A..=0x1173B => Script::Ahom,
0x1173C..=0x1173E => Script::Ahom,
0x1173F => Script::Ahom,
0x14400..=0x14646 => Script::AnatolianHieroglyphs,
0x108E0..=0x108F2 => Script::Hatran,
0x108F4..=0x108F5 => Script::Hatran,
0x108FB..=0x108FF => Script::Hatran,
0x11280..=0x11286 => Script::Multani,
0x11288 => Script::Multani,
0x1128A..=0x1128D => Script::Multani,
0x1128F..=0x1129D => Script::Multani,
0x1129F..=0x112A8 => Script::Multani,
0x112A9 => Script::Multani,
0x10C80..=0x10CB2 => Script::OldHungarian,
0x10CC0..=0x10CF2 => Script::OldHungarian,
0x10CFA..=0x10CFF => Script::OldHungarian,
0x1D800..=0x1D9FF => Script::SignWriting,
0x1DA00..=0x1DA36 => Script::SignWriting,
0x1DA37..=0x1DA3A => Script::SignWriting,
0x1DA3B..=0x1DA6C => Script::SignWriting,
0x1DA6D..=0x1DA74 => Script::SignWriting,
0x1DA75 => Script::SignWriting,
0x1DA76..=0x1DA83 => Script::SignWriting,
0x1DA84 => Script::SignWriting,
0x1DA85..=0x1DA86 => Script::SignWriting,
0x1DA87..=0x1DA8B => Script::SignWriting,
0x1DA9B..=0x1DA9F => Script::SignWriting,
0x1DAA1..=0x1DAAF => Script::SignWriting,
0x1E900..=0x1E943 => Script::Adlam,
0x1E944..=0x1E94A => Script::Adlam,
0x1E950..=0x1E959 => Script::Adlam,
0x1E95E..=0x1E95F => Script::Adlam,
0x11C00..=0x11C08 => Script::Bhaiksuki,
0x11C0A..=0x11C2E => Script::Bhaiksuki,
0x11C2F => Script::Bhaiksuki,
0x11C30..=0x11C36 => Script::Bhaiksuki,
0x11C38..=0x11C3D => Script::Bhaiksuki,
0x11C3E => Script::Bhaiksuki,
0x11C3F => Script::Bhaiksuki,
0x11C40 => Script::Bhaiksuki,
0x11C41..=0x11C45 => Script::Bhaiksuki,
0x11C50..=0x11C59 => Script::Bhaiksuki,
0x11C5A..=0x11C6C => Script::Bhaiksuki,
0x11C70..=0x11C71 => Script::Marchen,
0x11C72..=0x11C8F => Script::Marchen,
0x11C92..=0x11CA7 => Script::Marchen,
0x11CA9 => Script::Marchen,
0x11CAA..=0x11CB0 => Script::Marchen,
0x11CB1 => Script::Marchen,
0x11CB2..=0x11CB3 => Script::Marchen,
0x11CB4 => Script::Marchen,
0x11CB5..=0x11CB6 => Script::Marchen,
0x11400..=0x11434 => Script::Newa,
0x11435..=0x11437 => Script::Newa,
0x11438..=0x1143F => Script::Newa,
0x11440..=0x11441 => Script::Newa,
0x11442..=0x11444 => Script::Newa,
0x11445 => Script::Newa,
0x11446 => Script::Newa,
0x11447..=0x1144A => Script::Newa,
0x1144B..=0x1144F => Script::Newa,
0x11450..=0x11459 => Script::Newa,
0x1145B => Script::Newa,
0x1145D => Script::Newa,
0x104B0..=0x104D3 => Script::Osage,
0x104D8..=0x104FB => Script::Osage,
0x16FE0 => Script::Tangut,
0x17000..=0x187EC => Script::Tangut,
0x18800..=0x18AF2 => Script::Tangut,
_ => Script::Any,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_unicode_script() {
assert_eq!(Script::Han, get_script('京'));
assert_eq!(Script::Han, get_script('太'));
assert_eq!(Script::Hiragana, get_script('い'));
assert_eq!(Script::Katakana, get_script('グ'));
assert_eq!(Script::Common, get_script('ー'));
assert_eq!(Script::Latin, get_script('a'));
assert_eq!(Script::Latin, get_script('A'));
assert_eq!(Script::Common, get_script('0'));
assert_eq!(Script::Common, get_script('$'));
assert_eq!(Script::Common, get_script('@'));
assert_eq!(Script::Common, get_script('-'));
assert_eq!(Script::Common, get_script(' '));
assert_eq!(Script::Common, get_script('�'));
}
}
| 0 |
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/pre_tokenizer.rs | use crate::pre_tokenizers::unicode_scripts::scripts::{get_script, Script};
use crate::tokenizer::{normalizer::Range, PreTokenizedString, PreTokenizer, Result};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct UnicodeScripts;
impl UnicodeScripts {
pub fn new() -> Self {
Self {}
}
}
impl Default for UnicodeScripts {
fn default() -> Self {
Self::new()
}
}
// This code exists in the Unigram default IsValidSentencePiece.
// It could be integrated directly within `get_script` but I
// think it's kind of tricky to see those modifications later
// I am guessing release mode will optimize this away anyway.
fn fixed_script(c: char) -> Script {
let raw_script = get_script(c);
if c as u32 == 0x30FC {
Script::Han
} else if c == ' ' {
Script::Any
} else {
match raw_script {
Script::Hiragana => Script::Han,
Script::Katakana => Script::Han,
script => script,
}
}
}
impl PreTokenizer for UnicodeScripts {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, normalized| {
let mut last_script = None;
let mut offset = 0;
let mut ranges: Vec<_> = normalized
.get()
.chars()
.filter_map(|c| {
let script = Some(fixed_script(c));
let result = if script != Some(Script::Any)
&& last_script != Some(Script::Any)
&& last_script != script
{
Some(offset)
} else {
None
};
offset += c.len_utf8();
if script != Some(Script::Any) {
last_script = script;
}
result
})
.collect();
ranges.push(normalized.get().len());
Ok(ranges
.windows(2)
.map(|item| {
normalized
.slice(Range::Normalized(item[0]..item[1]))
.expect("NormalizedString bad split")
})
.collect::<Vec<_>>())
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::OffsetReferential;
use crate::OffsetType;
#[test]
fn basic() {
let pretok = UnicodeScripts {};
let mut pretokenized = PreTokenizedString::from("どこで生れ。Yes");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("どこで生れ", (0, 15)), ("。", (15, 18)), ("Yes", (18, 21))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("どこで生れ", (0, 15)), ("。", (15, 18)), ("Yes", (18, 21))]
);
}
#[test]
fn spaces_are_included_in_every_script() {
let pretok = UnicodeScripts {};
let mut pretokenized = PreTokenizedString::from("Apples are りんご 林檎");
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Apples are ", (0, 11)), ("りんご 林檎", (11, 27))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Apples are ", (0, 11)), ("りんご 林檎", (11, 27))]
);
}
#[test]
fn test_unicode_script() {
assert_eq!(Script::Han, fixed_script('京'));
assert_eq!(Script::Han, fixed_script('太'));
assert_eq!(Script::Han, fixed_script('い'));
assert_eq!(Script::Han, fixed_script('グ'));
assert_eq!(Script::Han, fixed_script('ー'));
assert_eq!(Script::Latin, fixed_script('a'));
assert_eq!(Script::Latin, fixed_script('A'));
assert_eq!(Script::Common, fixed_script('0'));
assert_eq!(Script::Common, fixed_script('$'));
assert_eq!(Script::Common, fixed_script('@'));
assert_eq!(Script::Common, fixed_script('-'));
assert_eq!(Script::Any, fixed_script(' '));
}
}
| 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/docs/Makefile | # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for those with `?=`
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
BUILDDIR ?= build
SOURCEDIR = source
# Put it first so that "make" without argument is like "make html_all".
html_all:
@echo "Generating doc for Rust"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/rust" $(SPHINXOPTS) $(O) -t rust
@echo "Generating doc for Python"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/python" $(SPHINXOPTS) $(O) -t python
@echo "Generating doc for Node.js"
@$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)/node" $(SPHINXOPTS) $(O) -t node
.PHONY: html_all Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
| 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/docs/README.md | ## Requirements
In order to generate the documentation, it is necessary to have a Python environment with the
following:
```python
pip install sphinx sphinx_rtd_theme setuptools_rust
```
It is also necessary to have the `tokenizers` library in this same environment, for Sphinx to
generate all the API Reference and links properly. If you want to visualize the documentation with
some modifications made to the Python bindings, make sure you build it from source.
## Building the documentation
Once everything is setup, you can build the documentation automatically for all the languages
using the following command in the `/docs` folder:
```bash
make html_all
```
If you want to build only for a specific language, you can use:
```bash
make html O="-t python"
```
(Replacing `python` by the target language among `rust`, `node`, and `python`)
**NOTE**
If you are making any structural change to the documentation, it is recommended to clean the build
directory before rebuilding:
```bash
make clean && make html_all
```
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/index.rst | Tokenizers
====================================================================================================
Fast State-of-the-art tokenizers, optimized for both research and production
`🤗 Tokenizers`_ provides an implementation of today's most used tokenizers, with
a focus on performance and versatility. These tokenizers are also used in
`🤗 Transformers`_.
.. _🤗 Tokenizers: https://github.com/huggingface/tokenizers
.. _🤗 Transformers: https://github.com/huggingface/transformers
Main features:
----------------------------------------------------------------------------------------------------
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for both research and production.
- Full alignment tracking. Even with destructive normalization, it's always possible to get
the part of the original sentence that corresponds to any token.
- Does all the pre-processing: Truncation, Padding, add the special tokens your model needs.
.. toctree::
:maxdepth: 2
:caption: Getting Started
quicktour
installation/main
pipeline
components
.. toctree-tags::
:maxdepth: 3
:caption: Using 🤗 Tokenizers
:glob:
:python:tutorials/python/*
.. toctree::
:maxdepth: 3
:caption: API Reference
api/reference
.. include:: entities.inc
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/entities.inc | .. entities:: python
:global:
class
class
classmethod
class method
Tokenizer
:class:`~tokenizers.Tokenizer`
Tokenizer.train
:meth:`~tokenizers.Tokenizer.train`
Tokenizer.save
:meth:`~tokenizers.Tokenizer.save`
Tokenizer.from_file
:meth:`~tokenizers.Tokenizer.from_file`
Tokenizer.encode
:meth:`~tokenizers.Tokenizer.encode`
Tokenizer.encode_batch
:meth:`~tokenizers.Tokenizer.encode_batch`
Tokenizer.decode
:meth:`~tokenizers.Tokenizer.decode`
Tokenizer.decode_batch
:meth:`~tokenizers.Tokenizer.decode_batch`
Tokenizer.token_to_id
:meth:`~tokenizers.Tokenizer.token_to_id`
Tokenizer.enable_padding
:meth:`~tokenizers.Tokenizer.enable_padding`
Encoding
:class:`~tokenizers.Encoding`
TemplateProcessing
:class:`~tokenizers.processors.TemplateProcessing`
Normalizer
:class:`~tokenizers.normalizers.Normalizer`
normalizers.Sequence
:class:`~tokenizers.normalizers.Sequence`
pre_tokenizers.Whitespace
:class:`~tokenizers.pre_tokenizers.Whitespace`
PreTokenizer
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
models.BPE
:class:`~tokenizers.models.BPE`
models.Unigram
:class:`~tokenizers.models.Unigram`
models.WordLevel
:class:`~tokenizers.models.WordLevel`
models.WordPiece
:class:`~tokenizers.models.WordPiece`
Decoder
:class:`~tokenizers.decoders.Decoder`
.. entities:: rust
:global:
class
struct
classmethod
static method
Tokenizer
:rust_struct:`~tokenizers::tokenizer::Tokenizer`
Tokenizer.train
:rust_meth:`~tokenizers::tokenizer::Tokenizer::train`
Tokenizer.save
:rust_meth:`~tokenizers::tokenizer::Tokenizer::save`
Tokenizer.from_file
:rust_meth:`~tokenizers::tokenizer::Tokenizer::from_file`
Tokenizer.encode
:rust_meth:`~tokenizers::tokenizer::Tokenizer::encode`
Tokenizer.encode_batch
:rust_meth:`~tokenizers::tokenizer::Tokenizer::encode_batch`
Tokenizer.decode
:rust_meth:`~tokenizers::tokenizer::Tokenizer::decode`
Tokenizer.decode_batch
:rust_meth:`~tokenizers::tokenizer::Tokenizer::decode_batch`
Tokenizer.token_to_id
:rust_meth:`~tokenizers::tokenizer::Tokenizer::token_to_id`
Tokenizer.enable_padding
:rust_meth:`~tokenizers::tokenizer::Tokenizer::enable_padding`
Encoding
:rust_struct:`~tokenizers::tokenizer::Encoding`
TemplateProcessing
:rust_struct:`~tokenizers::processors::template::TemplateProcessing`
Normalizer
:rust_trait:`~tokenizers::tokenizer::Normalizer`
normalizers.Sequence
:rust_struct:`~tokenizers::normalizers::utils::Sequence`
pre_tokenizers.Whitespace
:rust_struct:`~tokenizers::normalizers::whitespace::Whitespace`
PreTokenizer
:rust_trait:`~tokenizers::tokenizer::PreTokenizer`
models.BPE
:rust_struct:`~tokenizers::models::bpe::BPE`
models.Unigram
:rust_struct:`~tokenizers::models::unigram::Unigram`
models.WordLevel
:rust_struct:`~tokenizers::models::wordlevel::WordLevel`
models.WordPiece
:rust_struct:`~tokenizers::models::wordpiece::WordPiece`
Decoder
:rust_trait:`~tokenizers::tokenizer::Decoder`
.. entities:: node
:global:
class
class
classmethod
static method
Tokenizer
:obj:`Tokenizer`
Tokenizer.train
:obj:`Tokenizer.train()`
Tokenizer.save
:obj:`Tokenizer.save()`
Tokenizer.from_file
:obj:`Tokenizer.fromFile()`
Tokenizer.encode
:obj:`Tokenizer.encode()`
Tokenizer.encode_batch
:obj:`Tokenizer.encodeBatch()`
Tokenizer.decode
:obj:`Tokenizer.decode()`
Tokenizer.decode_batch
:obj:`Tokenizer.decodeBatch()`
Tokenizer.token_to_id
:obj:`Tokenizer.tokenToId()`
Tokenizer.enable_padding
:obj:`Tokenizer.setPadding()`
Encoding
:obj:`Encoding`
TemplateProcessing
:obj:`TemplateProcessing`
Normalizer
:obj:`Normalizer`
normalizers.Sequence
:obj:`Sequence`
pre_tokenizers.Whitespace
:obj:`Whitespace`
PreTokenizer
:obj:`PreTokenizer`
models.BPE
:obj:`BPE`
models.Unigram
:obj:`Unigram`
models.WordLevel
:obj:`WordLevel`
models.WordPiece
:obj:`WordPiece`
Decoder
:obj:`Decoder`
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("./_ext"))
sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "tokenizers"
copyright = "2020, huggingface"
author = "huggingface"
# The full version, including alpha/beta/rc tags
release = ""
# -- Custom information ------------------------------------------------------
# The possible values for languages (used by `_ext/entities`)
languages = ["node", "rust", "python"]
# This defines the version used to generate links to docs.rs
rust_version = "latest"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "entities", "rust_doc", "toctree_tags"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"analytics_id": "UA-83738774-2"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
def setup(app):
for language in languages:
if not tags.has(language):
exclude_patterns.append(f"tutorials/{language}/*")
app.add_css_file("css/huggingface.css")
app.add_css_file("css/code-snippets.css")
app.add_js_file("js/custom.js")
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/quicktour.rst | Quicktour
====================================================================================================
Let's have a quick look at the 🤗 Tokenizers library features. The library provides an
implementation of today's most used tokenizers that is both easy to use and blazing fast.
.. only:: python
It can be used to instantiate a :ref:`pretrained tokenizer <pretrained>` but we will start our
quicktour by building one from scratch and see how we can train it.
Build a tokenizer from scratch
----------------------------------------------------------------------------------------------------
To illustrate how fast the 🤗 Tokenizers library is, let's train a new tokenizer on `wikitext-103
<https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/>`__ (516M of
text) in just a few seconds. First things first, you will need to download this dataset and unzip it
with:
.. code-block:: bash
wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip
unzip wikitext-103-raw-v1.zip
Training the tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. entities:: python
BpeTrainer
:class:`~tokenizers.trainers.BpeTrainer`
vocab_size
:obj:`vocab_size`
min_frequency
:obj:`min_frequency`
special_tokens
:obj:`special_tokens`
unk_token
:obj:`unk_token`
pad_token
:obj:`pad_token`
.. entities:: rust
BpeTrainer
:rust_struct:`~tokenizers::models::bpe::BpeTrainer`
vocab_size
:obj:`vocab_size`
min_frequency
:obj:`min_frequency`
special_tokens
:obj:`special_tokens`
unk_token
:obj:`unk_token`
pad_token
:obj:`pad_token`
.. entities:: node
BpeTrainer
BpeTrainer
vocab_size
:obj:`vocabSize`
min_frequency
:obj:`minFrequency`
special_tokens
:obj:`specialTokens`
unk_token
:obj:`unkToken`
pad_token
:obj:`padToken`
In this tour, we will build and train a Byte-Pair Encoding (BPE) tokenizer. For more information
about the different type of tokenizers, check out this `guide
<https://huggingface.co/docs/transformers/main/en/tokenizer_summary#summary-of-the-tokenizers>`__ in the 🤗 Transformers
documentation. Here, training the tokenizer means it will learn merge rules by:
- Start with all the characters present in the training corpus as tokens.
- Identify the most common pair of tokens and merge it into one token.
- Repeat until the vocabulary (e.g., the number of tokens) has reached the size we want.
The main API of the library is the :entity:`class` :entity:`Tokenizer`, here is how we instantiate
one with a BPE model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_tokenizer
:end-before: END init_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_tokenizer
:end-before: END quicktour_init_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_tokenizer
:end-before: END init_tokenizer
:dedent: 4
To train our tokenizer on the wikitext files, we will need to instantiate a `trainer`, in this case
a :entity:`BpeTrainer`
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_trainer
:end-before: END init_trainer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_trainer
:end-before: END quicktour_init_trainer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_trainer
:end-before: END init_trainer
:dedent: 4
We can set the training arguments like :entity:`vocab_size` or :entity:`min_frequency` (here left at
their default values of 30,000 and 0) but the most important part is to give the
:entity:`special_tokens` we plan to use later on (they are not used at all during training) so that
they get inserted in the vocabulary.
.. note::
The order in which you write the special tokens list matters: here :obj:`"[UNK]"` will get the
ID 0, :obj:`"[CLS]"` will get the ID 1 and so forth.
We could train our tokenizer right now, but it wouldn't be optimal. Without a pre-tokenizer that
will split our inputs into words, we might get tokens that overlap several words: for instance we
could get an :obj:`"it is"` token since those two words often appear next to each other. Using a
pre-tokenizer will ensure no token is bigger than a word returned by the pre-tokenizer. Here we want
to train a subword BPE tokenizer, and we will use the easiest pre-tokenizer possible by splitting
on whitespace.
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_pretok
:end-before: END init_pretok
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_pretok
:end-before: END quicktour_init_pretok
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_pretok
:end-before: END init_pretok
:dedent: 4
Now, we can just call the :entity:`Tokenizer.train` method with any list of files we want
to use:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START train
:end-before: END train
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_train
:end-before: END quicktour_train
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START train
:end-before: END train
:dedent: 4
This should only take a few seconds to train our tokenizer on the full wikitext dataset!
To save the tokenizer in one file that contains all its configuration and vocabulary, just use the
:entity:`Tokenizer.save` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START save
:end-before: END save
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_save
:end-before: END quicktour_save
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START save
:end-before: END save
:dedent: 4
and you can reload your tokenizer from that file with the :entity:`Tokenizer.from_file`
:entity:`classmethod`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 12
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_reload_tokenizer
:end-before: END quicktour_reload_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 4
Using the tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Now that we have trained a tokenizer, we can use it on any text we want with the
:entity:`Tokenizer.encode` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode
:end-before: END encode
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode
:end-before: END quicktour_encode
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode
:end-before: END encode
:dedent: 4
This applied the full pipeline of the tokenizer on the text, returning an
:entity:`Encoding` object. To learn more about this pipeline, and how to apply (or
customize) parts of it, check out :doc:`this page <pipeline>`.
This :entity:`Encoding` object then has all the attributes you need for your deep
learning model (or other). The :obj:`tokens` attribute contains the segmentation of your text in
tokens:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_tokens
:end-before: END print_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_tokens
:end-before: END quicktour_print_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_tokens
:end-before: END print_tokens
:dedent: 4
Similarly, the :obj:`ids` attribute will contain the index of each of those tokens in the
tokenizer's vocabulary:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_ids
:end-before: END print_ids
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_ids
:end-before: END quicktour_print_ids
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_ids
:end-before: END print_ids
:dedent: 4
An important feature of the 🤗 Tokenizers library is that it comes with full alignment tracking,
meaning you can always get the part of your original sentence that corresponds to a given token.
Those are stored in the :obj:`offsets` attribute of our :entity:`Encoding` object. For
instance, let's assume we would want to find back what caused the :obj:`"[UNK]"` token to appear,
which is the token at index 9 in the list, we can just ask for the offset at the index:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_offsets
:end-before: END print_offsets
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_offsets
:end-before: END quicktour_print_offsets
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_offsets
:end-before: END print_offsets
:dedent: 4
and those are the indices that correspond to the emoji in the original sentence:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START use_offsets
:end-before: END use_offsets
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_use_offsets
:end-before: END quicktour_use_offsets
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START use_offsets
:end-before: END use_offsets
:dedent: 4
Post-processing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We might want our tokenizer to automatically add special tokens, like :obj:`"[CLS]"` or
:obj:`"[SEP]"`. To do this, we use a post-processor. :entity:`TemplateProcessing` is the
most commonly used, you just have to specify a template for the processing of single sentences and
pairs of sentences, along with the special tokens and their IDs.
When we built our tokenizer, we set :obj:`"[CLS]"` and :obj:`"[SEP]"` in positions 1 and 2 of our
list of special tokens, so this should be their IDs. To double-check, we can use the
:entity:`Tokenizer.token_to_id` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START check_sep
:end-before: END check_sep
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_check_sep
:end-before: END quicktour_check_sep
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START check_sep
:end-before: END check_sep
:dedent: 4
Here is how we can set the post-processing to give us the traditional BERT inputs:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START init_template_processing
:end-before: END init_template_processing
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_init_template_processing
:end-before: END quicktour_init_template_processing
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START init_template_processing
:end-before: END init_template_processing
:dedent: 4
Let's go over this snippet of code in more details. First we specify the template for single
sentences: those should have the form :obj:`"[CLS] $A [SEP]"` where :obj:`$A` represents our
sentence.
Then, we specify the template for sentence pairs, which should have the form
:obj:`"[CLS] $A [SEP] $B [SEP]"` where :obj:`$A` represents the first sentence and :obj:`$B` the
second one. The :obj:`:1` added in the template represent the `type IDs` we want for each part of
our input: it defaults to 0 for everything (which is why we don't have :obj:`$A:0`) and here we set
it to 1 for the tokens of the second sentence and the last :obj:`"[SEP]"` token.
Lastly, we specify the special tokens we used and their IDs in our tokenizer's vocabulary.
To check out this worked properly, let's try to encode the same sentence as before:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_special_tokens
:end-before: END print_special_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_special_tokens
:end-before: END quicktour_print_special_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_special_tokens
:end-before: END print_special_tokens
:dedent: 4
To check the results on a pair of sentences, we just pass the two sentences to
:entity:`Tokenizer.encode`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_special_tokens_pair
:end-before: END print_special_tokens_pair
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_special_tokens_pair
:end-before: END quicktour_print_special_tokens_pair
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_special_tokens_pair
:end-before: END print_special_tokens_pair
:dedent: 4
You can then check the type IDs attributed to each token is correct with
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_type_ids
:end-before: END print_type_ids
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_type_ids
:end-before: END quicktour_print_type_ids
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_type_ids
:end-before: END print_type_ids
:dedent: 4
If you save your tokenizer with :entity:`Tokenizer.save`, the post-processor will be saved along.
Encoding multiple sentences in a batch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To get the full speed of the 🤗 Tokenizers library, it's best to process your texts by batches by
using the :entity:`Tokenizer.encode_batch` method:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode_batch
:end-before: END encode_batch
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode_batch
:end-before: END quicktour_encode_batch
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode_batch
:end-before: END encode_batch
:dedent: 4
The output is then a list of :entity:`Encoding` objects like the ones we saw before. You
can process together as many texts as you like, as long as it fits in memory.
To process a batch of sentences pairs, pass two lists to the
:entity:`Tokenizer.encode_batch` method: the list of sentences A and the list of sentences
B:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START encode_batch_pair
:end-before: END encode_batch_pair
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_encode_batch_pair
:end-before: END quicktour_encode_batch_pair
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START encode_batch_pair
:end-before: END encode_batch_pair
:dedent: 4
When encoding multiple sentences, you can automatically pad the outputs to the longest sentence
present by using :entity:`Tokenizer.enable_padding`, with the :entity:`pad_token` and its ID
(which we can double-check the id for the padding token with
:entity:`Tokenizer.token_to_id` like before):
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START enable_padding
:end-before: END enable_padding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_enable_padding
:end-before: END quicktour_enable_padding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START enable_padding
:end-before: END enable_padding
:dedent: 4
We can set the :obj:`direction` of the padding (defaults to the right) or a given :obj:`length` if
we want to pad every sample to that specific number (here we leave it unset to pad to the size of
the longest text).
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_batch_tokens
:end-before: END print_batch_tokens
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_batch_tokens
:end-before: END quicktour_print_batch_tokens
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_batch_tokens
:end-before: END print_batch_tokens
:dedent: 4
In this case, the `attention mask` generated by the tokenizer takes the padding into account:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py
:language: python
:start-after: START print_attention_mask
:end-before: END print_attention_mask
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START quicktour_print_attention_mask
:end-before: END quicktour_print_attention_mask
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts
:language: javascript
:start-after: START print_attention_mask
:end-before: END print_attention_mask
:dedent: 4
.. _pretrained:
.. only:: python
Using a pretrained tokenizer
------------------------------------------------------------------------------------------------
You can load any tokenizer from the Hugging Face Hub as long as a `tokenizer.json` file is
available in the repository.
.. code-block:: python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-uncased")
Importing a pretrained tokenizer from legacy vocabulary files
------------------------------------------------------------------------------------------------
You can also import a pretrained tokenizer directly in, as long as you have its vocabulary file.
For instance, here is how to import the classic pretrained BERT tokenizer:
.. code-block:: python
from tokenizers import BertWordPieceTokenizer
tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True)
as long as you have downloaded the file `bert-base-uncased-vocab.txt` with
.. code-block:: bash
wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/pipeline.rst | The tokenization pipeline
====================================================================================================
When calling :entity:`Tokenizer.encode` or :entity:`Tokenizer.encode_batch`, the input text(s) go
through the following pipeline:
- :ref:`normalization`
- :ref:`pre-tokenization`
- :ref:`model`
- :ref:`post-processing`
We'll see in details what happens during each of those steps in detail, as well as when you want to
:ref:`decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you to customize
each of those steps to your needs. If you're already familiar with those steps and want to learn by
seeing some code, jump to :ref:`our BERT from scratch example <example>`.
For the examples that require a :entity:`Tokenizer`, we will use the tokenizer we trained
in the :doc:`quicktour`, which you can load with:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_reload_tokenizer
:end-before: END pipeline_reload_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START reload_tokenizer
:end-before: END reload_tokenizer
:dedent: 8
.. _normalization:
Normalization
----------------------------------------------------------------------------------------------------
Normalization is, in a nutshell, a set of operations you apply to a raw string to make it less
random or "cleaner". Common operations include stripping whitespace, removing accented characters
or lowercasing all text. If you're familiar with `Unicode normalization
<https://unicode.org/reports/tr15>`__, it is also a very common normalization operation applied
in most tokenizers.
Each normalization operation is represented in the 🤗 Tokenizers library by a
:entity:`Normalizer`, and you can combine several of those by using a
:entity:`normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization
and removing accents as an example:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_normalizer
:end-before: END setup_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_normalizer
:end-before: END pipeline_setup_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_normalizer
:end-before: END setup_normalizer
:dedent: 8
You can manually test that normalizer by applying it to any string:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START test_normalizer
:end-before: END test_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_test_normalizer
:end-before: END pipeline_test_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START test_normalizer
:end-before: END test_normalizer
:dedent: 8
When building a :entity:`Tokenizer`, you can customize its normalizer by just changing
the corresponding attribute:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START replace_normalizer
:end-before: END replace_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_replace_normalizer
:end-before: END pipeline_replace_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START replace_normalizer
:end-before: END replace_normalizer
:dedent: 8
Of course, if you change the way a tokenizer applies normalization, you should probably retrain it
from scratch afterward.
.. _pre-tokenization:
Pre-Tokenization
----------------------------------------------------------------------------------------------------
Pre-tokenization is the act of splitting a text into smaller objects that give an upper bound to
what your tokens will be at the end of training. A good way to think of this is that the
pre-tokenizer will split your text into "words" and then, your final tokens will be parts of those
words.
An easy way to pre-tokenize inputs is to split on spaces and punctuations, which is done by the
:entity:`pre_tokenizers.Whitespace` pre-tokenizer:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_pre_tokenizer
:end-before: END setup_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_pre_tokenizer
:end-before: END pipeline_setup_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_pre_tokenizer
:end-before: END setup_pre_tokenizer
:dedent: 8
The output is a list of tuples, with each tuple containing one word and its span in the original
sentence (which is used to determine the final :obj:`offsets` of our :entity:`Encoding`).
Note that splitting on punctuation will split contractions like :obj:`"I'm"` in this example.
You can combine together any :entity:`PreTokenizer` together. For
instance, here is a pre-tokenizer that will split on space, punctuation and digits, separating
numbers in their individual digits:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START combine_pre_tokenizer
:end-before: END combine_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_combine_pre_tokenizer
:end-before: END pipeline_combine_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START combine_pre_tokenizer
:end-before: END combine_pre_tokenizer
:dedent: 8
As we saw in the :doc:`quicktour`, you can customize the pre-tokenizer of a
:entity:`Tokenizer` by just changing the corresponding attribute:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START replace_pre_tokenizer
:end-before: END replace_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_replace_pre_tokenizer
:end-before: END pipeline_replace_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START replace_pre_tokenizer
:end-before: END replace_pre_tokenizer
:dedent: 8
Of course, if you change the way the pre-tokenizer, you should probably retrain your tokenizer from
scratch afterward.
.. _model:
The Model
----------------------------------------------------------------------------------------------------
Once the input texts are normalized and pre-tokenized, the :entity:`Tokenizer` applies the model on
the pre-tokens. This is the part of the pipeline that needs training on your corpus (or that has
been trained if you are using a pretrained tokenizer).
The role of the model is to split your "words" into tokens, using the rules it has learned. It's
also responsible for mapping those tokens to their corresponding IDs in the vocabulary of the model.
This model is passed along when intializing the :entity:`Tokenizer` so you already know
how to customize this part. Currently, the 🤗 Tokenizers library supports:
- :entity:`models.BPE`
- :entity:`models.Unigram`
- :entity:`models.WordLevel`
- :entity:`models.WordPiece`
For more details about each model and its behavior, you can check `here <components#models>`__
.. _post-processing:
Post-Processing
----------------------------------------------------------------------------------------------------
Post-processing is the last step of the tokenization pipeline, to perform any additional
transformation to the :entity:`Encoding` before it's returned, like adding potential
special tokens.
As we saw in the quick tour, we can customize the post processor of a :entity:`Tokenizer`
by setting the corresponding attribute. For instance, here is how we can post-process to make the
inputs suitable for the BERT model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START setup_processor
:end-before: END setup_processor
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_setup_processor
:end-before: END pipeline_setup_processor
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START setup_processor
:end-before: END setup_processor
:dedent: 8
Note that contrarily to the pre-tokenizer or the normalizer, you don't need to retrain a tokenizer
after changing its post-processor.
.. _example:
All together: a BERT tokenizer from scratch
----------------------------------------------------------------------------------------------------
Let's put all those pieces together to build a BERT tokenizer. First, BERT relies on WordPiece, so
we instantiate a new :entity:`Tokenizer` with this model:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_tokenizer
:end-before: END bert_setup_tokenizer
:dedent: 8
Then we know that BERT preprocesses texts by removing accents and lowercasing. We also use a unicode
normalizer:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_normalizer
:end-before: END bert_setup_normalizer
:dedent: 8
The pre-tokenizer is just splitting on whitespace and punctuation:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_pre_tokenizer
:end-before: END bert_setup_pre_tokenizer
:dedent: 8
And the post-processing uses the template we saw in the previous section:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_setup_processor
:end-before: END bert_setup_processor
:dedent: 8
We can use this tokenizer and train on it on wikitext like in the :doc:`quicktour`:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_train_tokenizer
:end-before: END bert_train_tokenizer
:dedent: 8
.. _decoding:
Decoding
----------------------------------------------------------------------------------------------------
.. entities:: python
bert_tokenizer
:obj:`bert_tokenizer`
.. entities:: rust
bert_tokenizer
:obj:`bert_tokenizer`
.. entities:: node
bert_tokenizer
:obj:`bertTokenizer`
On top of encoding the input texts, a :entity:`Tokenizer` also has an API for decoding,
that is converting IDs generated by your model back to a text. This is done by the methods
:entity:`Tokenizer.decode` (for one predicted text) and :entity:`Tokenizer.decode_batch` (for a
batch of predictions).
The `decoder` will first convert the IDs back to tokens (using the tokenizer's vocabulary) and
remove all special tokens, then join those tokens with spaces:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START test_decoding
:end-before: END test_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START pipeline_test_decoding
:end-before: END pipeline_test_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START test_decoding
:end-before: END test_decoding
:dedent: 8
If you used a model that added special characters to represent subtokens of a given "word" (like
the :obj:`"##"` in WordPiece) you will need to customize the `decoder` to treat them properly. If we
take our previous :entity:`bert_tokenizer` for instance the default decoding will give:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_test_decoding
:end-before: END bert_test_decoding
:dedent: 8
But by changing it to a proper decoder, we get:
.. only:: python
.. literalinclude:: ../../bindings/python/tests/documentation/test_pipeline.py
:language: python
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 8
.. only:: rust
.. literalinclude:: ../../tokenizers/tests/documentation.rs
:language: rust
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 4
.. only:: node
.. literalinclude:: ../../bindings/node/examples/documentation/pipeline.test.ts
:language: javascript
:start-after: START bert_proper_decoding
:end-before: END bert_proper_decoding
:dedent: 8
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source/components.rst | Components
====================================================================================================
When building a Tokenizer, you can attach various types of components to this Tokenizer in order
to customize its behavior. This page lists most provided components.
.. _normalizers:
.. entities:: python
BertNormalizer.clean_text
clean_text
BertNormalizer.handle_chinese_chars
handle_chinese_chars
BertNormalizer.strip_accents
strip_accents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
``Sequence([NFKC(), Lowercase()])``
PreTokenizer.Sequence
``Sequence([Punctuation(), WhitespaceSplit()])``
SplitDelimiterBehavior.removed
:obj:`removed`
SplitDelimiterBehavior.isolated
:obj:`isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`merged_with_previous`
SplitDelimiterBehavior.merged_with_next
:obj:`merged_with_next`
SplitDelimiterBehavior.contiguous
:obj:`contiguous`
.. entities:: rust
BertNormalizer.clean_text
clean_text
BertNormalizer.handle_chinese_chars
handle_chinese_chars
BertNormalizer.strip_accents
strip_accents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
``Sequence::new(vec![NFKC, Lowercase])``
PreTokenizer.Sequence
``Sequence::new(vec![Punctuation, WhitespaceSplit])``
SplitDelimiterBehavior.removed
:obj:`Removed`
SplitDelimiterBehavior.isolated
:obj:`Isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`MergedWithPrevious`
SplitDelimiterBehavior.merged_with_next
:obj:`MergedWithNext`
SplitDelimiterBehavior.contiguous
:obj:`Contiguous`
.. entities:: node
BertNormalizer.clean_text
cleanText
BertNormalizer.handle_chinese_chars
handleChineseChars
BertNormalizer.strip_accents
stripAccents
BertNormalizer.lowercase
lowercase
Normalizer.Sequence
..
PreTokenizer.Sequence
..
SplitDelimiterBehavior.removed
:obj:`removed`
SplitDelimiterBehavior.isolated
:obj:`isolated`
SplitDelimiterBehavior.merged_with_previous
:obj:`mergedWithPrevious`
SplitDelimiterBehavior.merged_with_next
:obj:`mergedWithNext`
SplitDelimiterBehavior.contiguous
:obj:`contiguous`
Normalizers
----------------------------------------------------------------------------------------------------
A ``Normalizer`` is in charge of pre-processing the input string in order to normalize it as
relevant for a given use case. Some common examples of normalization are the Unicode normalization
algorithms (NFD, NFKD, NFC & NFKC), lowercasing etc...
The specificity of ``tokenizers`` is that we keep track of the alignment while normalizing. This
is essential to allow mapping from the generated tokens back to the input text.
The ``Normalizer`` is optional.
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - NFD
- NFD unicode normalization
-
* - NFKD
- NFKD unicode normalization
-
* - NFC
- NFC unicode normalization
-
* - NFKC
- NFKC unicode normalization
-
* - Lowercase
- Replaces all uppercase to lowercase
- Input: ``HELLO ὈΔΥΣΣΕΎΣ``
Output: ``hello ὀδυσσεύς``
* - Strip
- Removes all whitespace characters on the specified sides (left, right or both) of the input
- Input: ``" hi "``
Output: ``"hi"``
* - StripAccents
- Removes all accent symbols in unicode (to be used with NFD for consistency)
- Input: ``é``
Ouput: ``e``
* - Replace
- Replaces a custom string or regexp and changes it with given content
- ``Replace("a", "e")`` will behave like this:
Input: ``"banana"``
Ouput: ``"benene"``
* - BertNormalizer
- Provides an implementation of the Normalizer used in the original BERT. Options
that can be set are:
- :entity:`BertNormalizer.clean_text`
- :entity:`BertNormalizer.handle_chinese_chars`
- :entity:`BertNormalizer.strip_accents`
- :entity:`BertNormalizer.lowercase`
-
* - Sequence
- Composes multiple normalizers that will run in the provided order
- :entity:`Normalizer.Sequence`
.. _pre-tokenizers:
Pre tokenizers
----------------------------------------------------------------------------------------------------
The ``PreTokenizer`` takes care of splitting the input according to a set of rules. This
pre-processing lets you ensure that the underlying ``Model`` does not build tokens across multiple
"splits".
For example if you don't want to have whitespaces inside a token, then you can have a
``PreTokenizer`` that splits on these whitespaces.
You can easily combine multiple ``PreTokenizer`` together using a ``Sequence`` (see below).
The ``PreTokenizer`` is also allowed to modify the string, just like a ``Normalizer`` does. This
is necessary to allow some complicated algorithms that require to split before normalizing (e.g.
the ByteLevel)
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - ByteLevel
- Splits on whitespaces while remapping all the bytes to a set of visible characters. This
technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties:
- Since it maps on bytes, a tokenizer using this only requires **256** characters as initial
alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode
characters.
- A consequence of the previous point is that it is absolutely unnecessary to have an
unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)
- For non ascii characters, it gets completely unreadable, but it works nonetheless!
- Input: ``"Hello my friend, how are you?"``
Ouput: ``"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"``
* - Whitespace
- Splits on word boundaries (using the following regular expression: ``\w+|[^\w\s]+``
- Input: ``"Hello there!"``
Output: ``"Hello", "there", "!"``
* - WhitespaceSplit
- Splits on any whitespace character
- Input: ``"Hello there!"``
Output: ``"Hello", "there!"``
* - Punctuation
- Will isolate all punctuation characters
- Input: ``"Hello?"``
Ouput: ``"Hello", "?"``
* - Metaspace
- Splits on whitespaces and replaces them with a special char "▁" (U+2581)
- Input: ``"Hello there"``
Ouput: ``"Hello", "▁there"``
* - CharDelimiterSplit
- Splits on a given character
- Example with ``x``:
Input: ``"Helloxthere"``
Ouput: ``"Hello", "there"``
* - Digits
- Splits the numbers from any other characters.
- Input: ``"Hello123there"``
Output: ```"Hello", "123", "there"```
* - Split
- Versatile pre-tokenizer that splits on provided pattern and according to provided behavior.
The pattern can be inverted if necessary.
- pattern should be either a custom string or regexp.
- behavior should be one of:
* :entity:`SplitDelimiterBehavior.removed`
* :entity:`SplitDelimiterBehavior.isolated`
* :entity:`SplitDelimiterBehavior.merged_with_previous`
* :entity:`SplitDelimiterBehavior.merged_with_next`
* :entity:`SplitDelimiterBehavior.contiguous`
- invert should be a boolean flag.
- Example with `pattern` = :obj:`" "`, `behavior` = :obj:`"isolated"`, `invert` = :obj:`False`:
Input: ``"Hello, how are you?"``
Output: ```"Hello,", " ", "how", " ", "are", " ", "you?"```
* - Sequence
- Lets you compose multiple ``PreTokenizer`` that will be run in the given order
- :entity:`PreTokenizer.Sequence`
.. _models:
Models
----------------------------------------------------------------------------------------------------
Models are the core algorithms used to actually tokenize, and therefore, they are the only mandatory
component of a Tokenizer.
.. list-table::
:header-rows: 1
* - Name
- Description
* - WordLevel
- This is the "classic" tokenization algorithm. It let's you simply map words to IDs
without anything fancy. This has the advantage of being really simple to use and
understand, but it requires extremely large vocabularies for a good coverage.
*Using this* ``Model`` *requires the use of a* ``PreTokenizer``. *No choice will be made by
this model directly, it simply maps input tokens to IDs*
* - BPE
- One of the most popular subword tokenization algorithm. The Byte-Pair-Encoding works by
starting with characters, while merging those that are the most frequently seen together,
thus creating new tokens. It then works iteratively to build new tokens out of the most
frequent pairs it sees in a corpus.
BPE is able to build words it has never seen by using multiple subword tokens, and thus
requires smaller vocabularies, with less chances of having "unk" (unknown) tokens.
* - WordPiece
- This is a subword tokenization algorithm quite similar to BPE, used mainly by Google in
models like BERT. It uses a greedy algorithm, that tries to build long words first, splitting
in multiple tokens when entire words don't exist in the vocabulary. This is different from
BPE that starts from characters, building bigger tokens as possible.
It uses the famous ``##`` prefix to identify tokens that are part of a word (ie not starting
a word).
* - Unigram
- Unigram is also a subword tokenization algorithm, and works by trying to identify the best
set of subword tokens to maximize the probability for a given sentence. This is different
from BPE in the way that this is not deterministic based on a set of rules applied
sequentially. Instead Unigram will be able to compute multiple ways of tokenizing, while
choosing the most probable one.
.. _post-processors:
PostProcessor
----------------------------------------------------------------------------------------------------
After the whole pipeline, we sometimes want to insert some special tokens before feed
a tokenized string into a model like "[CLS] My horse is amazing [SEP]". The ``PostProcessor``
is the component doing just that.
.. list-table::
:header-rows: 1
* - Name
- Description
- Example
* - TemplateProcessing
- Let's you easily template the post processing, adding special tokens, and specifying
the ``type_id`` for each sequence/special token. The template is given two strings
representing the single sequence and the pair of sequences, as well as a set of
special tokens to use.
- Example, when specifying a template with these values:
- single: ``"[CLS] $A [SEP]"``
- pair: ``"[CLS] $A [SEP] $B [SEP]"``
- special tokens:
- ``"[CLS]"``
- ``"[SEP]"``
Input: ``("I like this", "but not this")``
Output: ``"[CLS] I like this [SEP] but not this [SEP]"``
.. _decoders:
Decoders
----------------------------------------------------------------------------------------------------
The Decoder knows how to go from the IDs used by the Tokenizer, back to a readable piece of text.
Some ``Normalizer`` and ``PreTokenizer`` use special characters or identifiers that need to be
reverted for example.
.. list-table::
:header-rows: 1
* - Name
- Description
* - ByteLevel
- Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using
a set of visible Unicode characters to represent each byte, so we need a Decoder to
revert this process and get something readable again.
* - Metaspace
- Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifer ``▁`` to
identify whitespaces, and so this Decoder helps with decoding these.
* - WordPiece
- Reverts the WordPiece Model. This model uses a special identifier ``##`` for continuing
subwords, and so this Decoder helps with decoding these.
| 0 |
hf_public_repos/tokenizers/docs/source/_static | hf_public_repos/tokenizers/docs/source/_static/css/huggingface.css | /* Our DOM objects */
/* Version control */
.selectors {
margin-bottom: 10px;
}
.dropdown-button {
display: inline-block;
width: 50%;
background-color: #6670FF;
color: white;
border: none;
padding: 5px;
font-size: 15px;
cursor: pointer;
}
.dropdown-button:hover, .dropdown-button:focus, .dropdown-button.active {
background-color: #A6B0FF;
}
.dropdown-button.active {
background-color: #7988FF;
}
.menu-dropdown {
display: none;
background-color: #7988FF;
min-width: 160px;
overflow: auto;
font-size: 15px;
padding: 10px 0;
}
.menu-dropdown a {
color: white;
padding: 3px 4px;
text-decoration: none;
display: block;
}
.menu-dropdown a:hover {
background-color: #A6B0FF;
}
.dropdown-link.active {
background-color: #A6B0FF;
}
.show {
display: block;
}
/* The literal code blocks */
.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
color: #6670FF;
}
/* To keep the logo centered */
.wy-side-scroll {
width: auto;
font-size: 20px;
}
/* The div that holds the Hugging Face logo */
.HuggingFaceDiv {
width: 100%
}
/* The research field on top of the toc tree */
.wy-side-nav-search{
padding-top: 0;
background-color: #6670FF;
}
/* The toc tree */
.wy-nav-side{
background-color: #6670FF;
padding-bottom: 0;
}
/* The section headers in the toc tree */
.wy-menu-vertical p.caption{
background-color: #4d59ff;
line-height: 40px;
}
/* The selected items in the toc tree */
.wy-menu-vertical li.current{
background-color: #A6B0FF;
}
/* When a list item that does belong to the selected block from the toc tree is hovered */
.wy-menu-vertical li.current a:hover{
background-color: #B6C0FF;
}
/* When a list item that does NOT belong to the selected block from the toc tree is hovered. */
.wy-menu-vertical li a:hover{
background-color: #A7AFFB;
}
/* The text items on the toc tree */
.wy-menu-vertical a {
color: #FFFFDD;
font-family: Calibre-Light, sans-serif;
}
.wy-menu-vertical header, .wy-menu-vertical p.caption{
color: white;
font-family: Calibre-Light, sans-serif;
}
/* The color inside the selected toc tree block */
.wy-menu-vertical li.toctree-l2 a, .wy-menu-vertical li.toctree-l3 a, .wy-menu-vertical li.toctree-l4 a {
color: black;
}
/* Inside the depth-2 selected toc tree block */
.wy-menu-vertical li.toctree-l2.current>a {
background-color: #B6C0FF
}
.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a {
background-color: #C6D0FF
}
/* Inside the depth-3 selected toc tree block */
.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{
background-color: #D6E0FF
}
/* Inside code snippets */
.rst-content dl:not(.docutils) dt{
font-size: 15px;
}
/* Links */
a {
color: #6670FF;
}
/* Content bars */
.rst-content dl:not(.docutils) dt {
background-color: rgba(251, 141, 104, 0.1);
border-right: solid 2px #FB8D68;
border-left: solid 2px #FB8D68;
color: #FB8D68;
font-family: Calibre-Light, sans-serif;
border-top: none;
font-style: normal !important;
}
/* Expand button */
.wy-menu-vertical li.toctree-l2 span.toctree-expand,
.wy-menu-vertical li.on a span.toctree-expand, .wy-menu-vertical li.current>a span.toctree-expand,
.wy-menu-vertical li.toctree-l3 span.toctree-expand{
color: black;
}
/* Max window size */
.wy-nav-content{
max-width: 1200px;
}
/* Mobile header */
.wy-nav-top{
background-color: #6670FF;
}
/* Source spans */
.rst-content .viewcode-link, .rst-content .viewcode-back{
color: #6670FF;
font-size: 110%;
letter-spacing: 2px;
text-transform: uppercase;
}
/* It would be better for table to be visible without horizontal scrolling */
.wy-table-responsive table td, .wy-table-responsive table th{
white-space: normal;
}
.footer {
margin-top: 20px;
}
.footer__Social {
display: flex;
flex-direction: row;
}
.footer__CustomImage {
margin: 2px 5px 0 0;
}
/* class and method names in doc */
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname, .rst-content dl:not(.docutils) tt.descclassname, .rst-content dl:not(.docutils) code.descclassname{
font-family: Calibre, sans-serif;
font-size: 20px !important;
}
/* class name in doc*/
.rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) tt.descname, .rst-content dl:not(.docutils) code.descname{
margin-right: 10px;
font-family: Calibre-Medium, sans-serif;
}
/* Method and class parameters */
.sig-param{
line-height: 23px;
}
/* Class introduction "class" string at beginning */
.rst-content dl:not(.docutils) .property{
font-size: 18px;
color: black;
}
/* FONTS */
body{
font-family: Calibre, sans-serif;
font-size: 16px;
}
h1 {
font-family: Calibre-Thin, sans-serif;
font-size: 70px;
}
h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend{
font-family: Calibre-Medium, sans-serif;
}
@font-face {
font-family: Calibre-Medium;
src: url(./Calibre-Medium.otf);
font-weight:400;
}
@font-face {
font-family: Calibre;
src: url(./Calibre-Regular.otf);
font-weight:400;
}
@font-face {
font-family: Calibre-Light;
src: url(./Calibre-Light.ttf);
font-weight:400;
}
@font-face {
font-family: Calibre-Thin;
src: url(./Calibre-Thin.otf);
font-weight:400;
}
/**
* Nav Links to other parts of huggingface.co
*/
div.hf-menu {
position: absolute;
top: 0;
right: 0;
padding-top: 20px;
padding-right: 20px;
z-index: 1000;
}
div.hf-menu a {
font-size: 14px;
letter-spacing: 0.3px;
text-transform: uppercase;
color: white;
-webkit-font-smoothing: antialiased;
background: linear-gradient(0deg, #6671ffb8, #9a66ffb8 50%);
padding: 10px 16px 6px 16px;
border-radius: 3px;
margin-left: 12px;
position: relative;
}
div.hf-menu a:active {
top: 1px;
}
@media (min-width: 768px) and (max-width: 1860px) {
.wy-breadcrumbs {
margin-top: 32px;
}
}
@media (max-width: 768px) {
div.hf-menu {
display: none;
}
}
| 0 |
hf_public_repos/tokenizers/docs/source/_static | hf_public_repos/tokenizers/docs/source/_static/css/code-snippets.css |
.highlight .c1, .highlight .sd{
color: #999
}
.highlight .nn, .highlight .k, .highlight .s1, .highlight .nb, .highlight .bp, .highlight .kc, .highlight .kt {
color: #FB8D68;
}
.highlight .kn, .highlight .nv, .highlight .s2, .highlight .ow, .highlight .kd, .highlight .kr, .highlight .s {
color: #6670FF;
}
.highlight .gp {
color: #FB8D68;
}
| 0 |
hf_public_repos/tokenizers/docs/source/_static | hf_public_repos/tokenizers/docs/source/_static/js/custom.js | // These three variables below need to be updated at each release for the selectors.
const languages = [ "rust", "python", "node" ];
// Last stable version for each language
const stableVersion = {
"rust": "master",
"python": "v0.10.0",
"node": "master"
}
// Dictionary doc folder to Label for each language
const versionMapping = {
"rust": {
"master": "master",
},
"python": {
"master": "master",
"v0.9.4": "v0.9.4",
"v0.10.0": "v0.10.0",
},
"node": {
"master": "master",
}
};
// Dictionnary language name to Label
const languageName = {
"rust": "Rust",
"python": "Python",
"node": "Node.js"
};
const defaultLanguage = "python";
function addIcon() {
const huggingFaceLogo =
"https://huggingface.co/landing/assets/transformers-docs/huggingface_logo.svg";
const image = document.createElement("img");
image.setAttribute("src", huggingFaceLogo);
const div = document.createElement("div");
div.appendChild(image);
div.style.textAlign = 'center';
div.style.paddingTop = '30px';
div.style.backgroundColor = '#6670FF';
const scrollDiv = document.querySelector(".wy-side-scroll");
scrollDiv.prepend(div);
}
function addCustomFooter() {
const customFooter = document.createElement("div");
const questionOrIssue = document.createElement("div");
questionOrIssue.innerHTML =
"Stuck? Read our <a href='https://medium.com/huggingface'>Blog posts</a>" +
" or <a href='https://github.com/huggingface/tokenizers'>Create an issue</a>";
customFooter.appendChild(questionOrIssue);
customFooter.classList.add("footer");
const social = document.createElement("div");
social.classList.add("footer__Social");
const imageDetails = [ {
link: "https://huggingface.co",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/website.svg"
}, {
link: "https://twitter.com/huggingface",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/twitter.svg"
}, {
link: "https://github.com/huggingface",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/github.svg"
}, {
link: "https://www.linkedin.com/company/huggingface/",
imageLink: "https://huggingface.co/landing/assets/transformers-docs/linkedin.svg"
} ];
imageDetails.forEach(imageLinks => {
const link = document.createElement("a");
const image = document.createElement("img");
image.src = imageLinks.imageLink;
link.href = imageLinks.link;
image.style.width = "30px";
image.classList.add("footer__CustomImage");
link.appendChild(image);
social.appendChild(link);
});
customFooter.appendChild(social);
document.querySelector("footer").appendChild(customFooter);
}
function addGithubButton() {
const div = `
<div class="github-repo">
<a class="github-button"
href="https://github.com/huggingface/tokenizers"
data-size="large"
data-show-count="true"
aria-label="Star huggingface/tokenizers on GitHub">
Star
</a>
</div>
`;
document.querySelector(".wy-side-nav-search .icon-home").insertAdjacentHTML('afterend', div);
}
function addVersionControl() {
// Default language and version
let language = defaultLanguage;
// To grab the version currently in view, we parse the url
let parts = location.pathname.split('/');
const languageIndex = parts.findIndex((part) => languages.includes(part));
language = parts[languageIndex];
let version = stableVersion[language];
const versionIndex = languageIndex + 1;
// If a version is specified, update it
if (parts[versionIndex] != "" && !parts[versionIndex].endsWith(".html")) {
// If `latest`, let's keep the default (should be the explicit latest version)
if (parts[versionIndex] != "latest") {
version = parts[versionIndex];
}
// Otherwise redirect to the latest (if not opening locally)
} else if (!parts[parts.length - 1].endsWith(".html")) {
return window.location.pathname = [language, version, parts.splice(versionIndex)].join("/");
// Opening locally, just don't show the version/language selector
} else {
return
}
// Language Menu
const languageMenu = document.createElement("div");
languageMenu.classList.add("menu-dropdown");
languageMenu.innerHTML = languages.map((lang) => {
let isVersion = false;
let updatedParts = parts.map((l, i) => {
if (isVersion) {
isVersion = false;
return 'latest';
}
if (i == languageIndex) {
isVersion = true;
return lang;
} else {
return l;
}
});
return `
<a class="dropdown-link ${lang == language? 'active' : ''}"
href=${updatedParts.join("/")}>
${languageName[lang]}
</a>
`;
}).join("\n");
// Version Menu
const versionMenu = document.createElement("div");
versionMenu.classList.add("menu-dropdown");
versionMenu.innerHTML = Object.entries(versionMapping[language]).map(([key, value]) => {
let updatedParts = parts.map((v, i) => {
if (i == versionIndex) {
return key;
} else {
return v
}
});
return `
<a class="dropdown-link ${key == version ? 'active' : ''}"
href="${updatedParts.join('/')}">
${value}
</a>
`;
}).join("\n");
// Language button
const languageButton = document.createElement("div");
languageButton.classList.add("dropdown-button");
languageButton.innerText = languageName[language].concat(" ▼");
languageButton.addEventListener("click", () => {
versionMenu.classList.remove("show");
languageMenu.classList.toggle("show");
languageButton.classList.toggle("active");
});
// Button for version selection
const versionButton = document.createElement("div");
versionButton.classList.add("dropdown-button");
versionButton.innerText = version.concat(" ▼");
// Toggle the menu when we click on the button
versionButton.addEventListener("click", () => {
languageMenu.classList.remove("show");
versionMenu.classList.toggle("show");
versionButton.classList.toggle("active");
});
// Hide the menu when we click elsewhere
window.addEventListener("click", (event) => {
if (event.target != languageButton){
languageButton.classList.remove('active');
languageMenu.classList.remove('show');
}
if (event.target != versionButton){
versionButton.classList.remove('active');
versionMenu.classList.remove('show');
}
});
const buttonContainer = document.createElement("div");
buttonContainer.classList.add("button-container");
buttonContainer.appendChild(languageButton);
buttonContainer.appendChild(versionButton);
// Container
const div = document.createElement("div");
div.classList.add("selectors");
div.appendChild(buttonContainer);
div.appendChild(languageMenu);
div.appendChild(versionMenu);
div.style.paddingTop = '25px';
div.style.backgroundColor = '#6670FF';
div.style.display = 'block';
div.style.textAlign = 'center';
const scrollDiv = document.querySelector(".wy-side-scroll");
scrollDiv.insertBefore(div, scrollDiv.children[1]);
}
function addHfMenu() {
const div = `
<div class="hf-menu">
<a href="/welcome">🔥 Sign in</a>
<a href="/models">🚀 Models</a>
<a href="http://discuss.huggingface.co">💬 Forum</a>
</div>
`;
document.body.insertAdjacentHTML('afterbegin', div);
}
/*!
* github-buttons v2.2.10
* (c) 2019 なつき
* @license BSD-2-Clause
*/
/**
* modified to run programmatically
*/
function parseGithubButtons (){"use strict";var e=window.document,t=e.location,o=window.encodeURIComponent,r=window.decodeURIComponent,n=window.Math,a=window.HTMLElement,i=window.XMLHttpRequest,l="https://unpkg.com/github-buttons@2.2.10/dist/buttons.html",c=i&&i.prototype&&"withCredentials"in i.prototype,d=c&&a&&a.prototype.attachShadow&&!a.prototype.attachShadow.prototype,s=function(e,t,o){e.addEventListener?e.addEventListener(t,o):e.attachEvent("on"+t,o)},u=function(e,t,o){e.removeEventListener?e.removeEventListener(t,o):e.detachEvent("on"+t,o)},h=function(e,t,o){var r=function(n){return u(e,t,r),o(n)};s(e,t,r)},f=function(e,t,o){var r=function(n){if(t.test(e.readyState))return u(e,"readystatechange",r),o(n)};s(e,"readystatechange",r)},p=function(e){return function(t,o,r){var n=e.createElement(t);if(o)for(var a in o){var i=o[a];null!=i&&(null!=n[a]?n[a]=i:n.setAttribute(a,i))}if(r)for(var l=0,c=r.length;l<c;l++){var d=r[l];n.appendChild("string"==typeof d?e.createTextNode(d):d)}return n}},g=p(e),b=function(e){var t;return function(){t||(t=1,e.apply(this,arguments))}},m="body{margin:0}a{color:#24292e;text-decoration:none;outline:0}.octicon{display:inline-block;vertical-align:text-top;fill:currentColor}.widget{ display:inline-block;overflow:hidden;font-family:-apple-system, BlinkMacSystemFont, \"Segoe UI\", Helvetica, Arial, sans-serif;font-size:0;white-space:nowrap;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.btn,.social-count{display:inline-block;height:14px;padding:2px 5px;font-size:11px;font-weight:600;line-height:14px;vertical-align:bottom;cursor:pointer;border:1px solid #c5c9cc;border-radius:0.25em}.btn{background-color:#eff3f6;background-image:-webkit-linear-gradient(top, #fafbfc, #eff3f6 90%);background-image:-moz-linear-gradient(top, #fafbfc, #eff3f6 90%);background-image:linear-gradient(180deg, #fafbfc, #eff3f6 90%);background-position:-1px -1px;background-repeat:repeat-x;background-size:110% 110%;border-color:rgba(27,31,35,0.2);-ms-filter:\"progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFFAFBFC', endColorstr='#FFEEF2F5')\";*filter:progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFFAFBFC', endColorstr='#FFEEF2F5')}.btn:active{background-color:#e9ecef;background-image:none;border-color:#a5a9ac;border-color:rgba(27,31,35,0.35);box-shadow:inset 0 0.15em 0.3em rgba(27,31,35,0.15)}.btn:focus,.btn:hover{background-color:#e6ebf1;background-image:-webkit-linear-gradient(top, #f0f3f6, #e6ebf1 90%);background-image:-moz-linear-gradient(top, #f0f3f6, #e6ebf1 90%);background-image:linear-gradient(180deg, #f0f3f6, #e6ebf1 90%);border-color:#a5a9ac;border-color:rgba(27,31,35,0.35);-ms-filter:\"progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFF0F3F6', endColorstr='#FFE5EAF0')\";*filter:progid:DXImageTransform.Microsoft.Gradient(startColorstr='#FFF0F3F6', endColorstr='#FFE5EAF0')}.social-count{position:relative;margin-left:5px;background-color:#fff}.social-count:focus,.social-count:hover{color:#0366d6}.social-count b,.social-count i{position:absolute;top:50%;left:0;display:block;width:0;height:0;margin:-4px 0 0 -4px;border:solid transparent;border-width:4px 4px 4px 0;_line-height:0;_border-top-color:red !important;_border-bottom-color:red !important;_border-left-color:red !important;_filter:chroma(color=red)}.social-count b{border-right-color:#c5c9cc}.social-count i{margin-left:-3px;border-right-color:#fff}.lg .btn,.lg .social-count{height:16px;padding:5px 10px;font-size:12px;line-height:16px}.lg .social-count{margin-left:6px}.lg .social-count b,.lg .social-count i{margin:-5px 0 0 -5px;border-width:5px 5px 5px 0}.lg .social-count i{margin-left:-4px}\n",v={"mark-github":{width:16,height:16,path:'<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/>'},eye:{width:16,height:16,path:'<path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/>'},star:{width:14,height:16,path:'<path fill-rule="evenodd" d="M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74L14 6z"/>'},"repo-forked":{width:10,height:16,path:'<path fill-rule="evenodd" d="M8 1a1.993 1.993 0 0 0-1 3.72V6L5 8 3 6V4.72A1.993 1.993 0 0 0 2 1a1.993 1.993 0 0 0-1 3.72V6.5l3 3v1.78A1.993 1.993 0 0 0 5 15a1.993 1.993 0 0 0 1-3.72V9.5l3-3V4.72A1.993 1.993 0 0 0 8 1zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3 10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3-10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/>'},"issue-opened":{width:14,height:16,path:'<path fill-rule="evenodd" d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z"/>'},"cloud-download":{width:16,height:16,path:'<path fill-rule="evenodd" d="M9 12h2l-3 3-3-3h2V7h2v5zm3-8c0-.44-.91-3-4.5-3C5.08 1 3 2.92 3 5 1.02 5 0 6.52 0 8c0 1.53 1 3 3 3h3V9.7H3C1.38 9.7 1.3 8.28 1.3 8c0-.17.05-1.7 1.7-1.7h1.3V5c0-1.39 1.56-2.7 3.2-2.7 2.55 0 3.13 1.55 3.2 1.8v1.2H12c.81 0 2.7.22 2.7 2.2 0 2.09-2.25 2.2-2.7 2.2h-2V11h2c2.08 0 4-1.16 4-3.5C16 5.06 14.08 4 12 4z"/>'}},w={},x=function(e,t,o){var r=p(e.ownerDocument),n=e.appendChild(r("style",{type:"text/css"}));n.styleSheet?n.styleSheet.cssText=m:n.appendChild(e.ownerDocument.createTextNode(m));var a,l,d=r("a",{className:"btn",href:t.href,target:"_blank",innerHTML:(a=t["data-icon"],l=/^large$/i.test(t["data-size"])?16:14,a=(""+a).toLowerCase().replace(/^octicon-/,""),{}.hasOwnProperty.call(v,a)||(a="mark-github"),'<svg version="1.1" width="'+l*v[a].width/v[a].height+'" height="'+l+'" viewBox="0 0 '+v[a].width+" "+v[a].height+'" class="octicon octicon-'+a+'" aria-hidden="true">'+v[a].path+"</svg>"),"aria-label":t["aria-label"]||void 0},[" ",r("span",{},[t["data-text"]||""])]);/\.github\.com$/.test("."+d.hostname)?/^https?:\/\/((gist\.)?github\.com\/[^\/?#]+\/[^\/?#]+\/archive\/|github\.com\/[^\/?#]+\/[^\/?#]+\/releases\/download\/|codeload\.github\.com\/)/.test(d.href)&&(d.target="_top"):(d.href="#",d.target="_self");var u,h,g,x,y=e.appendChild(r("div",{className:"widget"+(/^large$/i.test(t["data-size"])?" lg":"")},[d]));/^(true|1)$/i.test(t["data-show-count"])&&"github.com"===d.hostname&&(u=d.pathname.replace(/^(?!\/)/,"/").match(/^\/([^\/?#]+)(?:\/([^\/?#]+)(?:\/(?:(subscription)|(fork)|(issues)|([^\/?#]+)))?)?(?:[\/?#]|$)/))&&!u[6]?(u[2]?(h="/repos/"+u[1]+"/"+u[2],u[3]?(x="subscribers_count",g="watchers"):u[4]?(x="forks_count",g="network"):u[5]?(x="open_issues_count",g="issues"):(x="stargazers_count",g="stargazers")):(h="/users/"+u[1],g=x="followers"),function(e,t){var o=w[e]||(w[e]=[]);if(!(o.push(t)>1)){var r=b(function(){for(delete w[e];t=o.shift();)t.apply(null,arguments)});if(c){var n=new i;s(n,"abort",r),s(n,"error",r),s(n,"load",function(){var e;try{e=JSON.parse(n.responseText)}catch(e){return void r(e)}r(200!==n.status,e)}),n.open("GET",e),n.send()}else{var a=this||window;a._=function(e){a._=null,r(200!==e.meta.status,e.data)};var l=p(a.document)("script",{async:!0,src:e+(/\?/.test(e)?"&":"?")+"callback=_"}),d=function(){a._&&a._({meta:{}})};s(l,"load",d),s(l,"error",d),l.readyState&&f(l,/de|m/,d),a.document.getElementsByTagName("head")[0].appendChild(l)}}}.call(this,"https://api.github.com"+h,function(e,t){if(!e){var n=t[x];y.appendChild(r("a",{className:"social-count",href:t.html_url+"/"+g,target:"_blank","aria-label":n+" "+x.replace(/_count$/,"").replace("_"," ").slice(0,n<2?-1:void 0)+" on GitHub"},[r("b"),r("i"),r("span",{},[(""+n).replace(/\B(?=(\d{3})+(?!\d))/g,",")])]))}o&&o(y)})):o&&o(y)},y=window.devicePixelRatio||1,C=function(e){return(y>1?n.ceil(n.round(e*y)/y*2)/2:n.ceil(e))||0},F=function(e,t){e.style.width=t[0]+"px",e.style.height=t[1]+"px"},k=function(t,r){if(null!=t&&null!=r)if(t.getAttribute&&(t=function(e){for(var t={href:e.href,title:e.title,"aria-label":e.getAttribute("aria-label")},o=["icon","text","size","show-count"],r=0,n=o.length;r<n;r++){var a="data-"+o[r];t[a]=e.getAttribute(a)}return null==t["data-text"]&&(t["data-text"]=e.textContent||e.innerText),t}(t)),d){var a=g("span",{title:t.title||void 0});x(a.attachShadow({mode:"closed"}),t,function(){r(a)})}else{var i=g("iframe",{src:"javascript:0",title:t.title||void 0,allowtransparency:!0,scrolling:"no",frameBorder:0});F(i,[0,0]),i.style.border="none";var c=function(){var a,d=i.contentWindow;try{a=d.document.body}catch(t){return void e.body.appendChild(i.parentNode.removeChild(i))}u(i,"load",c),x.call(d,a,t,function(e){var a=function(e){var t=e.offsetWidth,o=e.offsetHeight;if(e.getBoundingClientRect){var r=e.getBoundingClientRect();t=n.max(t,C(r.width)),o=n.max(o,C(r.height))}return[t,o]}(e);i.parentNode.removeChild(i),h(i,"load",function(){F(i,a)}),i.src=l+"#"+(i.name=function(e){var t=[];for(var r in e){var n=e[r];null!=n&&t.push(o(r)+"="+o(n))}return t.join("&")}(t)),r(i)})};s(i,"load",c),e.body.appendChild(i)}};t.protocol+"//"+t.host+t.pathname===l?x(e.body,function(e){for(var t={},o=e.split("&"),n=0,a=o.length;n<a;n++){var i=o[n];if(""!==i){var l=i.split("=");t[r(l[0])]=null!=l[1]?r(l.slice(1).join("=")):void 0}}return t}(window.name||t.hash.replace(/^#/,""))):function(t){if(/m/.test(e.readyState)||!/g/.test(e.readyState)&&!e.documentElement.doScroll)setTimeout(t);else if(e.addEventListener){var o=b(t);h(e,"DOMContentLoaded",o),h(window,"load",o)}else f(e,/m/,t)}(function(){for(var t=e.querySelectorAll?e.querySelectorAll("a.github-button"):function(){for(var t=[],o=e.getElementsByTagName("a"),r=0,n=o.length;r<n;r++)~(" "+o[r].className+" ").replace(/[ \t\n\f\r]+/g," ").indexOf(" github-button ")&&t.push(o[r]);return t}(),o=0,r=t.length;o<r;o++)!function(e){k(e,function(t){e.parentNode.replaceChild(t,e)})}(t[o])})};
function onLoad() {
addIcon();
addVersionControl();
addCustomFooter();
addGithubButton();
parseGithubButtons();
addHfMenu();
}
window.addEventListener("load", onLoad);
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/main.rst | Installation
====================================================================================================
.. only:: python
.. include:: python.inc
.. only:: rust
.. include:: rust.inc
.. only:: node
.. include:: node.inc
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/rust.inc | Crates.io
----------------------------------------------------------------------------------------------------
🤗 Tokenizers is available on `crates.io <https://crates.io/crates/tokenizers>`__.
You just need to add it to your :obj:`Cargo.toml`::
tokenizers = "0.10"
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/python.inc | 🤗 Tokenizers is tested on Python 3.5+.
You should install 🤗 Tokenizers in a
`virtual environment <https://docs.python.org/3/library/venv.html>`_. If you're unfamiliar with
Python virtual environments, check out the
`user guide <https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/>`__.
Create a virtual environment with the version of Python you're going to use and activate it.
Installation with pip
----------------------------------------------------------------------------------------------------
🤗 Tokenizers can be installed using pip as follows::
pip install tokenizers
Installation from sources
----------------------------------------------------------------------------------------------------
To use this method, you need to have the Rust language installed. You can follow
`the official guide <https://www.rust-lang.org/learn/get-started>`__ for more information.
If you are using a unix based OS, the installation should be as simple as running::
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
Or you can easiy update it with the following command::
rustup update
Once rust is installed, we can start retrieving the sources for 🤗 Tokenizers::
git clone https://github.com/huggingface/tokenizers
Then we go into the python bindings folder::
cd tokenizers/bindings/python
At this point you should have your `virtual environment`_ already activated. In order to
compile 🤗 Tokenizers, you need to::
pip install -e .
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/installation/node.inc | Installation with npm
----------------------------------------------------------------------------------------------------
You can simply install 🤗 Tokenizers with npm using::
npm install tokenizers
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/_ext/rust_doc.py | from docutils import nodes
import sphinx
from sphinx.locale import _
from conf import rust_version
logger = sphinx.util.logging.getLogger(__name__)
class RustRef:
def __call__(self, name, rawtext, text, lineno, inliner, options={}, content=[]):
doctype = name.split("_")[1]
parts = text.split("::")
if text.startswith("~"):
title = parts[-1]
parts[0] = parts[0][1:]
else:
content = text
link = self.base_link()
if doctype == "struct":
l, title = self.make_struct_link(parts, title)
if doctype == "func":
l, title = self.make_func_link(parts, title)
if doctype == "meth":
l, title = self.make_meth_link(parts, title)
if doctype == "trait":
l, title = self.make_trait_link(parts, title)
link += l
node = nodes.reference(internal=False, refuri=link, text=title)
wrapper = nodes.literal(classes=["xref"])
wrapper += node
return [wrapper], []
def base_link(self):
return f"https://docs.rs/tokenizers/{rust_version}"
def make_struct_link(self, parts, title):
link = ""
struct_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/struct.{struct_name}.html"
return link, title
def make_func_link(self, parts, title):
link = ""
fn_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/fn.{fn_name}.html"
return link, title
def make_meth_link(self, parts, title):
meth_name = parts[-1]
if meth_name.endswith("()"):
meth_name = meth_name[:-2]
link, title = self.make_struct_link(parts[:-1], title)
link += f"#method.{meth_name}"
if not title.endswith(")"):
title += "()"
return link, title
def make_trait_link(self, parts, title):
link = ""
trait_name = parts[-1]
path = parts[:-1]
for p in path:
link += f"/{p}"
link += f"/trait.{trait_name}.html"
return link, title
def setup(app):
app.add_role("rust_struct", RustRef())
app.add_role("rust_func", RustRef())
app.add_role("rust_meth", RustRef())
app.add_role("rust_trait", RustRef())
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/_ext/toctree_tags.py | import re
from sphinx.directives.other import TocTree
class TocTreeTags(TocTree):
hasPat = re.compile("^\s*:(.+):(.+)$")
def filter_entries(self, entries):
filtered = []
for e in entries:
m = self.hasPat.match(e)
if m != None:
if self.env.app.tags.has(m.groups()[0]):
filtered.append(m.groups()[1])
else:
filtered.append(e)
return filtered
def run(self):
self.content = self.filter_entries(self.content)
return super().run()
def setup(app):
app.add_directive("toctree-tags", TocTreeTags)
return {
"version": "0.1",
}
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/_ext/entities.py | from collections import defaultdict, abc
from typing import cast
from docutils import nodes
from docutils.parsers.rst import Directive
import sphinx
from sphinx.locale import _
from sphinx.util.docutils import SphinxDirective
from sphinx.errors import ExtensionError
from conf import languages as LANGUAGES
logger = sphinx.util.logging.getLogger(__name__)
GLOBALNAME = "$GLOBAL$"
def update(d, u):
for k, v in u.items():
if isinstance(v, abc.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
class EntityNode(nodes.General, nodes.Element):
pass
class EntitiesNode(nodes.General, nodes.Element):
pass
class AllEntities:
def __init__(self):
self.entities = defaultdict(dict)
@classmethod
def install(cls, env):
if not hasattr(env, "entity_all_entities"):
entities = cls()
env.entity_all_entities = entities
return env.entity_all_entities
def merge(self, other):
self.entities.update(other.entities)
def purge(self, docname):
for env_docname in [GLOBALNAME, docname]:
self.entities[env_docname] = dict(
[
(name, entity)
for name, entity in self.entities[env_docname].items()
if entity["docname"] != docname
]
)
def _extract_entities(self, nodes):
pass
def _extract_options(self, nodes):
pass
def _add_entities(self, entities, language, is_global, docname):
scope = GLOBALNAME if is_global else docname
for entity in entities:
name = f'{language}-{entity["name"]}'
content = entity["content"]
if name in self.entities[scope]:
logger.warning(
f'Entity "{name}" has already been defined{" globally" if is_global else ""}',
location=docname,
)
self.entities[scope][name] = {"docname": docname, "content": content}
def _extract_global(self, nodes):
for node in nodes:
if node.tagname != "field":
raise Exception(f"Expected a field, found {node.tagname}")
name, _ = node.children
if name.tagname != "field_name":
raise Exception(f"Expected a field name here, found {name_node.tagname}")
if str(name.children[0]) == "global":
return True
def _extract_entities(self, nodes):
entities = []
for node in nodes:
if node.tagname != "definition_list_item":
raise Exception(f"Expected a list item here, found {node.tagname}")
name_node, content_node = node.children
if name_node.tagname != "term":
raise Exception(f"Expected a term here, found {name_node.tagname}")
if content_node.tagname != "definition":
raise Exception(f"Expected a definition here, found {content_node.tagname}")
name = str(name_node.children[0])
if len(content_node.children) == 1 and content_node.children[0].tagname == "paragraph":
content = content_node.children[0].children[0]
else:
content = content_node
entities.append({"name": name, "content": content})
return entities
def extract(self, node, docname):
is_global = False
entities = []
language = None
for node in node.children:
if language is None and node.tagname != "paragraph":
raise Exception(f"Expected language name:\n.. entities:: <LANGUAGE>")
elif language is None and node.tagname == "paragraph":
language = str(node.children[0])
if language not in LANGUAGES:
raise Exception(
f'Unknown language "{language}. Might be missing a newline after language"'
)
elif node.tagname == "field_list":
is_global = self._extract_global(node.children)
elif node.tagname == "definition_list":
entities.extend(self._extract_entities(node.children))
else:
raise Exception(f"Expected a list of terms/options, found {node.tagname}")
self._add_entities(entities, language, is_global, docname)
def resolve_pendings(self, app):
env = app.builder.env
updates = defaultdict(dict)
for env_docname in self.entities.keys():
for name, entity in self.entities[env_docname].items():
docname = entity["docname"]
node = entity["content"]
for node in node.traverse(sphinx.addnodes.pending_xref):
contnode = cast(nodes.TextElement, node[0].deepcopy())
newnode = None
typ = node["reftype"]
target = node["reftarget"]
refdoc = node.get("refdoc", docname)
domain = None
try:
if "refdomain" in node and node["refdomain"]:
# let the domain try to resolve the reference
try:
domain = env.domains[node["refdomain"]]
except KeyError as exc:
raise NoUri(target, typ) from exc
newnode = domain.resolve_xref(
env, refdoc, app.builder, typ, target, node, contnode
)
except NoUri:
newnode = contnode
updates[env_docname][name] = {
"docname": docname,
"content": newnode or contnode,
}
update(self.entities, updates)
def get(self, language, name, docname):
name = f"{language}-{name}"
if name in self.entities[docname]:
return self.entities[docname][name]
elif name in self.entities[GLOBALNAME]:
return self.entities[GLOBALNAME][name]
else:
return None
class EntitiesDirective(SphinxDirective):
has_content = True
def run(self):
content = nodes.definition_list()
self.state.nested_parse(self.content, self.content_offset, content)
try:
entities = AllEntities.install(self.env)
entities.extract(content, self.env.docname)
except Exception as err:
raise self.error(f'Malformed directive "entities": {err}')
return []
def entity_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
node = EntityNode()
node.entity = text
return [node], []
def process_entity_nodes(app, doctree, docname):
""" Replace all the entities by their content """
env = app.builder.env
entities = AllEntities.install(env)
entities.resolve_pendings(app)
language = None
try:
language = next(l for l in LANGUAGES if l in app.tags)
except Exception:
logger.warning(f"No language tag specified, not resolving entities in {docname}")
for node in doctree.traverse(EntityNode):
if language is None:
node.replace_self(nodes.Text(_(node.entity), _(node.entity)))
else:
entity = entities.get(language, node.entity, docname)
if entity is None:
node.replace_self(nodes.Text(_(node.entity), _(node.entity)))
logger.warning(f'Entity "{node.entity}" has not been defined', location=node)
else:
node.replace_self(entity["content"])
def purge_entities(app, env, docname):
""" Purge any entity that comes from the given docname """
entities = AllEntities.install(env)
entities.purge(docname)
def merge_entities(app, env, docnames, other):
""" Merge multiple environment entities """
entities = AllEntities.install(env)
other_entities = AllEntities.install(other)
entities.merge(other_entities)
def setup(app):
app.add_node(EntityNode)
app.add_node(EntitiesNode)
app.add_directive("entities", EntitiesDirective)
app.add_role("entity", entity_role)
app.connect("doctree-resolved", process_entity_nodes)
app.connect("env-merge-info", merge_entities)
app.connect("env-purge-doc", purge_entities)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/rust.inc | Documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Rust API Reference is available directly on the `Docs.rs <https://docs.rs/tokenizers>`__
website.
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/reference.rst | .. only:: python
.. include:: python.inc
.. only:: rust
.. include:: rust.inc
.. only:: node
.. include:: node.inc
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/python.inc | Input sequences
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These types represent all the different kinds of sequence that can be used as input of a Tokenizer.
Globally, any sequence can be either a string or a list of strings, according to the operating
mode of the tokenizer: ``raw text`` vs ``pre-tokenized``.
.. autodata:: tokenizers.TextInputSequence
.. autodata:: tokenizers.PreTokenizedInputSequence
.. autodata:: tokenizers.InputSequence
Encode inputs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These types represent all the different kinds of input that a :class:`~tokenizers.Tokenizer` accepts
when using :meth:`~tokenizers.Tokenizer.encode_batch`.
.. autodata:: tokenizers.TextEncodeInput
.. autodata:: tokenizers.PreTokenizedEncodeInput
.. autodata:: tokenizers.EncodeInput
Tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.Tokenizer
:members:
Encoding
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.Encoding
:members:
Added Tokens
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.AddedToken
:members:
Models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.models
:members:
Normalizers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.normalizers
:members:
Pre-tokenizers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.pre_tokenizers
:members:
Post-processor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.processors
:members:
Trainers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.trainers
:members:
Decoders
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: tokenizers.decoders
:members:
Visualizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: tokenizers.tools.Annotation
:members:
.. autoclass:: tokenizers.tools.EncodingVisualizer
:members: __call__
| 0 |
hf_public_repos/tokenizers/docs/source | hf_public_repos/tokenizers/docs/source/api/node.inc | Documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The node API has not been documented yet.
| 0 |
hf_public_repos/tokenizers/docs/source/tutorials | hf_public_repos/tokenizers/docs/source/tutorials/python/training_from_memory.rst | Training from memory
----------------------------------------------------------------------------------------------------
In the `Quicktour <quicktour>`__, we saw how to build and train a tokenizer using text files,
but we can actually use any Python Iterator. In this section we'll see a few different ways of
training our tokenizer.
For all the examples listed below, we'll use the same :class:`~tokenizers.Tokenizer` and
:class:`~tokenizers.trainers.Trainer`, built as following:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START init_tokenizer_trainer
:end-before: END init_tokenizer_trainer
:dedent: 8
This tokenizer is based on the :class:`~tokenizers.models.Unigram` model. It takes care of
normalizing the input using the NFKC Unicode normalization method, and uses a
:class:`~tokenizers.pre_tokenizers.ByteLevel` pre-tokenizer with the corresponding decoder.
For more information on the components used here, you can check `here <components>`__
The most basic way
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As you probably guessed already, the easiest way to train our tokenizer is by using a :obj:`List`:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START train_basic
:end-before: END train_basic
:dedent: 8
Easy, right? You can use anything working as an iterator here, be it a :obj:`List`, :obj:`Tuple`,
or a :obj:`np.Array`. Anything works as long as it provides strings.
Using the 🤗 Datasets library
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An awesome way to access one of the many datasets that exist out there is by using the 🤗 Datasets
library. For more information about it, you should check
`the official documentation here <https://huggingface.co/docs/datasets/>`__.
Let's start by loading our dataset:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START load_dataset
:end-before: END load_dataset
:dedent: 8
The next step is to build an iterator over this dataset. The easiest way to do this is probably by
using a generator:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START def_batch_iterator
:end-before: END def_batch_iterator
:dedent: 8
As you can see here, for improved efficiency we can actually provide a batch of examples used
to train, instead of iterating over them one by one. By doing so, we can expect performances very
similar to those we got while training directly from files.
With our iterator ready, we just need to launch the training. In order to improve the look of our
progress bars, we can specify the total length of the dataset:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START train_datasets
:end-before: END train_datasets
:dedent: 8
And that's it!
Using gzip files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since gzip files in Python can be used as iterators, it is extremely simple to train on such files:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START single_gzip
:end-before: END single_gzip
:dedent: 8
Now if we wanted to train from multiple gzip files, it wouldn't be much harder:
.. literalinclude:: ../../../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
:language: python
:start-after: START multi_gzip
:end-before: END multi_gzip
:dedent: 8
And voilà!
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/index.mdx | <!-- DISABLE-FRONTMATTER-SECTIONS -->
# Tokenizers
Fast State-of-the-art tokenizers, optimized for both research and
production
[🤗 Tokenizers](https://github.com/huggingface/tokenizers) provides an
implementation of today's most used tokenizers, with a focus on
performance and versatility. These tokenizers are also used in [🤗 Transformers](https://github.com/huggingface/transformers).
# Main features:
- Train new vocabularies and tokenize, using today's most used tokenizers.
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for both research and production.
- Full alignment tracking. Even with destructive normalization, it's always possible to get the part of the original sentence that corresponds to any token.
- Does all the pre-processing: Truncation, Padding, add the special tokens your model needs.
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/_toctree.yml | - sections:
- local: index
title: 🤗 Tokenizers
- local: quicktour
title: Quicktour
- local: installation
title: Installation
- local: pipeline
title: The tokenization pipeline
- local: components
title: Components
- local: training_from_memory
title: Training from memory
title: Getting started
- sections:
- local: api/input-sequences
title: Input Sequences
- local: api/encode-inputs
title: Encode Inputs
- local: api/tokenizer
title: Tokenizer
- local: api/encoding
title: Encoding
- local: api/added-tokens
title: Added Tokens
- local: api/models
title: Models
- local: api/normalizers
title: Normalizers
- local: api/pre-tokenizers
title: Pre-tokenizers
- local: api/post-processors
title: Post-processors
- local: api/trainers
title: Trainers
- local: api/decoders
title: Decoders
- local: api/visualizer
title: Visualizer
title: API
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/training_from_memory.mdx | # Training from memory
In the [Quicktour](quicktour), we saw how to build and train a
tokenizer using text files, but we can actually use any Python Iterator.
In this section we'll see a few different ways of training our
tokenizer.
For all the examples listed below, we'll use the same [`~tokenizers.Tokenizer`] and
[`~tokenizers.trainers.Trainer`], built as
following:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START init_tokenizer_trainer",
"end-before": "END init_tokenizer_trainer",
"dedent": 8}
</literalinclude>
This tokenizer is based on the [`~tokenizers.models.Unigram`] model. It
takes care of normalizing the input using the NFKC Unicode normalization
method, and uses a [`~tokenizers.pre_tokenizers.ByteLevel`] pre-tokenizer with the corresponding decoder.
For more information on the components used here, you can check
[here](components).
## The most basic way
As you probably guessed already, the easiest way to train our tokenizer
is by using a `List`{.interpreted-text role="obj"}:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_basic",
"end-before": "END train_basic",
"dedent": 8}
</literalinclude>
Easy, right? You can use anything working as an iterator here, be it a
`List`{.interpreted-text role="obj"}, `Tuple`{.interpreted-text
role="obj"}, or a `np.Array`{.interpreted-text role="obj"}. Anything
works as long as it provides strings.
## Using the 🤗 Datasets library
An awesome way to access one of the many datasets that exist out there
is by using the 🤗 Datasets library. For more information about it, you
should check [the official documentation
here](https://huggingface.co/docs/datasets/).
Let's start by loading our dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START load_dataset",
"end-before": "END load_dataset",
"dedent": 8}
</literalinclude>
The next step is to build an iterator over this dataset. The easiest way
to do this is probably by using a generator:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START def_batch_iterator",
"end-before": "END def_batch_iterator",
"dedent": 8}
</literalinclude>
As you can see here, for improved efficiency we can actually provide a
batch of examples used to train, instead of iterating over them one by
one. By doing so, we can expect performances very similar to those we
got while training directly from files.
With our iterator ready, we just need to launch the training. In order
to improve the look of our progress bars, we can specify the total
length of the dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_datasets",
"end-before": "END train_datasets",
"dedent": 8}
</literalinclude>
And that's it!
## Using gzip files
Since gzip files in Python can be used as iterators, it is extremely
simple to train on such files:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START single_gzip",
"end-before": "END single_gzip",
"dedent": 8}
</literalinclude>
Now if we wanted to train from multiple gzip files, it wouldn't be much
harder:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START multi_gzip",
"end-before": "END multi_gzip",
"dedent": 8}
</literalinclude>
And voilà!
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/pipeline.mdx | # The tokenization pipeline
When calling `Tokenizer.encode` or
`Tokenizer.encode_batch`, the input
text(s) go through the following pipeline:
- `normalization`
- `pre-tokenization`
- `model`
- `post-processing`
We'll see in details what happens during each of those steps in detail,
as well as when you want to `decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you
to customize each of those steps to your needs. If you're already
familiar with those steps and want to learn by seeing some code, jump to
`our BERT from scratch example <example>`.
For the examples that require a `Tokenizer` we will use the tokenizer we trained in the
`quicktour`, which you can load with:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 12}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_reload_tokenizer",
"end-before": "END pipeline_reload_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Normalization
Normalization is, in a nutshell, a set of operations you apply to a raw
string to make it less random or "cleaner". Common operations include
stripping whitespace, removing accented characters or lowercasing all
text. If you're familiar with [Unicode
normalization](https://unicode.org/reports/tr15), it is also a very
common normalization operation applied in most tokenizers.
Each normalization operation is represented in the 🤗 Tokenizers library
by a `Normalizer`, and you can combine
several of those by using a `normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization
and removing accents as an example:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_normalizer",
"end-before": "END setup_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_normalizer",
"end-before": "END pipeline_setup_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_normalizer",
"end-before": "END setup_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
You can manually test that normalizer by applying it to any string:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START test_normalizer",
"end-before": "END test_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_test_normalizer",
"end-before": "END pipeline_test_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START test_normalizer",
"end-before": "END test_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
When building a `Tokenizer`, you can
customize its normalizer by just changing the corresponding attribute:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START replace_normalizer",
"end-before": "END replace_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_replace_normalizer",
"end-before": "END pipeline_replace_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START replace_normalizer",
"end-before": "END replace_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Of course, if you change the way a tokenizer applies normalization, you
should probably retrain it from scratch afterward.
## Pre-Tokenization
Pre-tokenization is the act of splitting a text into smaller objects
that give an upper bound to what your tokens will be at the end of
training. A good way to think of this is that the pre-tokenizer will
split your text into "words" and then, your final tokens will be parts
of those words.
An easy way to pre-tokenize inputs is to split on spaces and
punctuations, which is done by the
`pre_tokenizers.Whitespace`
pre-tokenizer:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_pre_tokenizer",
"end-before": "END setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_pre_tokenizer",
"end-before": "END pipeline_setup_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_pre_tokenizer",
"end-before": "END setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The output is a list of tuples, with each tuple containing one word and
its span in the original sentence (which is used to determine the final
`offsets` of our `Encoding`). Note that splitting on
punctuation will split contractions like `"I'm"` in this example.
You can combine together any `PreTokenizer` together. For instance, here is a pre-tokenizer that will
split on space, punctuation and digits, separating numbers in their
individual digits:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START combine_pre_tokenizer",
"end-before": "END combine_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_combine_pre_tokenizer",
"end-before": "END pipeline_combine_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START combine_pre_tokenizer",
"end-before": "END combine_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
As we saw in the `quicktour`, you can
customize the pre-tokenizer of a `Tokenizer` by just changing the corresponding attribute:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START replace_pre_tokenizer",
"end-before": "END replace_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_replace_pre_tokenizer",
"end-before": "END pipeline_replace_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START replace_pre_tokenizer",
"end-before": "END replace_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Of course, if you change the way the pre-tokenizer, you should probably
retrain your tokenizer from scratch afterward.
## Model
Once the input texts are normalized and pre-tokenized, the
`Tokenizer` applies the model on the
pre-tokens. This is the part of the pipeline that needs training on your
corpus (or that has been trained if you are using a pretrained
tokenizer).
The role of the model is to split your "words" into tokens, using the
rules it has learned. It's also responsible for mapping those tokens to
their corresponding IDs in the vocabulary of the model.
This model is passed along when intializing the
`Tokenizer` so you already know how to
customize this part. Currently, the 🤗 Tokenizers library supports:
- `models.BPE`
- `models.Unigram`
- `models.WordLevel`
- `models.WordPiece`
For more details about each model and its behavior, you can check
[here](components#models)
## Post-Processing
Post-processing is the last step of the tokenization pipeline, to
perform any additional transformation to the
`Encoding` before it's returned, like
adding potential special tokens.
As we saw in the quick tour, we can customize the post processor of a
`Tokenizer` by setting the
corresponding attribute. For instance, here is how we can post-process
to make the inputs suitable for the BERT model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START setup_processor",
"end-before": "END setup_processor",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_setup_processor",
"end-before": "END pipeline_setup_processor",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START setup_processor",
"end-before": "END setup_processor",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Note that contrarily to the pre-tokenizer or the normalizer, you don't
need to retrain a tokenizer after changing its post-processor.
## All together: a BERT tokenizer from scratch
Let's put all those pieces together to build a BERT tokenizer. First,
BERT relies on WordPiece, so we instantiate a new
`Tokenizer` with this model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_tokenizer",
"end-before": "END bert_setup_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Then we know that BERT preprocesses texts by removing accents and
lowercasing. We also use a unicode normalizer:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_normalizer",
"end-before": "END bert_setup_normalizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The pre-tokenizer is just splitting on whitespace and punctuation:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_pre_tokenizer",
"end-before": "END bert_setup_pre_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
And the post-processing uses the template we saw in the previous
section:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_setup_processor",
"end-before": "END bert_setup_processor",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can use this tokenizer and train on it on wikitext like in the
`quicktour`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_train_tokenizer",
"end-before": "END bert_train_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Decoding
On top of encoding the input texts, a `Tokenizer` also has an API for decoding, that is converting IDs
generated by your model back to a text. This is done by the methods
`Tokenizer.decode` (for one predicted text) and `Tokenizer.decode_batch` (for a batch of predictions).
The `decoder` will first convert the IDs back to tokens
(using the tokenizer's vocabulary) and remove all special tokens, then
join those tokens with spaces:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START test_decoding",
"end-before": "END test_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START pipeline_test_decoding",
"end-before": "END pipeline_test_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START test_decoding",
"end-before": "END test_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
If you used a model that added special characters to represent subtokens
of a given "word" (like the `"##"` in
WordPiece) you will need to customize the `decoder` to treat
them properly. If we take our previous `bert_tokenizer` for instance the
default decoding will give:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_test_decoding",
"end-before": "END bert_test_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
But by changing it to a proper decoder, we get:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_pipeline.py",
"language": "python",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/pipeline.test.ts",
"language": "js",
"start-after": "START bert_proper_decoding",
"end-before": "END bert_proper_decoding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/quicktour.mdx | # Quicktour
Let's have a quick look at the 🤗 Tokenizers library features. The
library provides an implementation of today's most used tokenizers that
is both easy to use and blazing fast.
## Build a tokenizer from scratch
To illustrate how fast the 🤗 Tokenizers library is, let's train a new
tokenizer on [wikitext-103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)
(516M of text) in just a few seconds. First things first, you will need
to download this dataset and unzip it with:
``` bash
wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip
unzip wikitext-103-raw-v1.zip
```
### Training the tokenizer
In this tour, we will build and train a Byte-Pair Encoding (BPE)
tokenizer. For more information about the different type of tokenizers,
check out this [guide](https://huggingface.co/transformers/tokenizer_summary.html) in
the 🤗 Transformers documentation. Here, training the tokenizer means it
will learn merge rules by:
- Start with all the characters present in the training corpus as
tokens.
- Identify the most common pair of tokens and merge it into one token.
- Repeat until the vocabulary (e.g., the number of tokens) has reached
the size we want.
The main API of the library is the `class` `Tokenizer`, here is how
we instantiate one with a BPE model:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_tokenizer",
"end-before": "END init_tokenizer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_tokenizer",
"end-before": "END quicktour_init_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_tokenizer",
"end-before": "END init_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
To train our tokenizer on the wikitext files, we will need to
instantiate a [trainer]{.title-ref}, in this case a
`BpeTrainer`
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_trainer",
"end-before": "END init_trainer",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_trainer",
"end-before": "END quicktour_init_trainer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_trainer",
"end-before": "END init_trainer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can set the training arguments like `vocab_size` or `min_frequency` (here
left at their default values of 30,000 and 0) but the most important
part is to give the `special_tokens` we
plan to use later on (they are not used at all during training) so that
they get inserted in the vocabulary.
<Tip>
The order in which you write the special tokens list matters: here `"[UNK]"` will get the ID 0,
`"[CLS]"` will get the ID 1 and so forth.
</Tip>
We could train our tokenizer right now, but it wouldn't be optimal.
Without a pre-tokenizer that will split our inputs into words, we might
get tokens that overlap several words: for instance we could get an
`"it is"` token since those two words
often appear next to each other. Using a pre-tokenizer will ensure no
token is bigger than a word returned by the pre-tokenizer. Here we want
to train a subword BPE tokenizer, and we will use the easiest
pre-tokenizer possible by splitting on whitespace.
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_pretok",
"end-before": "END init_pretok",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_pretok",
"end-before": "END quicktour_init_pretok",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_pretok",
"end-before": "END init_pretok",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Now, we can just call the `Tokenizer.train` method with any list of files we want to use:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START train",
"end-before": "END train",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_train",
"end-before": "END quicktour_train",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START train",
"end-before": "END train",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
This should only take a few seconds to train our tokenizer on the full
wikitext dataset! To save the tokenizer in one file that contains all
its configuration and vocabulary, just use the
`Tokenizer.save` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START save",
"end-before": "END save",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_save",
"end-before": "END quicktour_save",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START save",
"end-before": "END save",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
and you can reload your tokenizer from that file with the
`Tokenizer.from_file`
`classmethod`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 12}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_reload_tokenizer",
"end-before": "END quicktour_reload_tokenizer",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START reload_tokenizer",
"end-before": "END reload_tokenizer",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
### Using the tokenizer
Now that we have trained a tokenizer, we can use it on any text we want
with the `Tokenizer.encode` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode",
"end-before": "END encode",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode",
"end-before": "END quicktour_encode",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode",
"end-before": "END encode",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
This applied the full pipeline of the tokenizer on the text, returning
an `Encoding` object. To learn more
about this pipeline, and how to apply (or customize) parts of it, check out [this page](pipeline).
This `Encoding` object then has all the
attributes you need for your deep learning model (or other). The
`tokens` attribute contains the
segmentation of your text in tokens:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_tokens",
"end-before": "END print_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_tokens",
"end-before": "END quicktour_print_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_tokens",
"end-before": "END print_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Similarly, the `ids` attribute will
contain the index of each of those tokens in the tokenizer's
vocabulary:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_ids",
"end-before": "END print_ids",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_ids",
"end-before": "END quicktour_print_ids",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_ids",
"end-before": "END print_ids",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
An important feature of the 🤗 Tokenizers library is that it comes with
full alignment tracking, meaning you can always get the part of your
original sentence that corresponds to a given token. Those are stored in
the `offsets` attribute of our
`Encoding` object. For instance, let's
assume we would want to find back what caused the
`"[UNK]"` token to appear, which is the
token at index 9 in the list, we can just ask for the offset at the
index:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_offsets",
"end-before": "END print_offsets",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_offsets",
"end-before": "END quicktour_print_offsets",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_offsets",
"end-before": "END print_offsets",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
and those are the indices that correspond to the emoji in the original
sentence:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START use_offsets",
"end-before": "END use_offsets",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_use_offsets",
"end-before": "END quicktour_use_offsets",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START use_offsets",
"end-before": "END use_offsets",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
### Post-processing
We might want our tokenizer to automatically add special tokens, like
`"[CLS]"` or `"[SEP]"`. To do this, we use a post-processor.
`TemplateProcessing` is the most
commonly used, you just have to specify a template for the processing of
single sentences and pairs of sentences, along with the special tokens
and their IDs.
When we built our tokenizer, we set `"[CLS]"` and `"[SEP]"` in positions 1
and 2 of our list of special tokens, so this should be their IDs. To
double-check, we can use the `Tokenizer.token_to_id` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START check_sep",
"end-before": "END check_sep",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_check_sep",
"end-before": "END quicktour_check_sep",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START check_sep",
"end-before": "END check_sep",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Here is how we can set the post-processing to give us the traditional
BERT inputs:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START init_template_processing",
"end-before": "END init_template_processing",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_init_template_processing",
"end-before": "END quicktour_init_template_processing",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START init_template_processing",
"end-before": "END init_template_processing",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
Let's go over this snippet of code in more details. First we specify
the template for single sentences: those should have the form
`"[CLS] $A [SEP]"` where
`$A` represents our sentence.
Then, we specify the template for sentence pairs, which should have the
form `"[CLS] $A [SEP] $B [SEP]"` where
`$A` represents the first sentence and
`$B` the second one. The
`:1` added in the template represent the `type IDs` we want for each part of our input: it defaults
to 0 for everything (which is why we don't have
`$A:0`) and here we set it to 1 for the
tokens of the second sentence and the last `"[SEP]"` token.
Lastly, we specify the special tokens we used and their IDs in our
tokenizer's vocabulary.
To check out this worked properly, let's try to encode the same
sentence as before:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_special_tokens",
"end-before": "END print_special_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_special_tokens",
"end-before": "END quicktour_print_special_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_special_tokens",
"end-before": "END print_special_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
To check the results on a pair of sentences, we just pass the two
sentences to `Tokenizer.encode`:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_special_tokens_pair",
"end-before": "END print_special_tokens_pair",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_special_tokens_pair",
"end-before": "END quicktour_print_special_tokens_pair",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_special_tokens_pair",
"end-before": "END print_special_tokens_pair",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
You can then check the type IDs attributed to each token is correct with
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_type_ids",
"end-before": "END print_type_ids",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_type_ids",
"end-before": "END quicktour_print_type_ids",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_type_ids",
"end-before": "END print_type_ids",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
If you save your tokenizer with `Tokenizer.save`, the post-processor will be saved along.
### Encoding multiple sentences in a batch
To get the full speed of the 🤗 Tokenizers library, it's best to
process your texts by batches by using the
`Tokenizer.encode_batch` method:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode_batch",
"end-before": "END encode_batch",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode_batch",
"end-before": "END quicktour_encode_batch",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode_batch",
"end-before": "END encode_batch",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
The output is then a list of `Encoding`
objects like the ones we saw before. You can process together as many
texts as you like, as long as it fits in memory.
To process a batch of sentences pairs, pass two lists to the
`Tokenizer.encode_batch` method: the
list of sentences A and the list of sentences B:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START encode_batch_pair",
"end-before": "END encode_batch_pair",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_encode_batch_pair",
"end-before": "END quicktour_encode_batch_pair",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START encode_batch_pair",
"end-before": "END encode_batch_pair",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
When encoding multiple sentences, you can automatically pad the outputs
to the longest sentence present by using
`Tokenizer.enable_padding`, with the
`pad_token` and its ID (which we can
double-check the id for the padding token with
`Tokenizer.token_to_id` like before):
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START enable_padding",
"end-before": "END enable_padding",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_enable_padding",
"end-before": "END quicktour_enable_padding",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START enable_padding",
"end-before": "END enable_padding",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
We can set the `direction` of the padding
(defaults to the right) or a given `length` if we want to pad every sample to that specific number (here
we leave it unset to pad to the size of the longest text).
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_batch_tokens",
"end-before": "END print_batch_tokens",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_batch_tokens",
"end-before": "END quicktour_print_batch_tokens",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_batch_tokens",
"end-before": "END print_batch_tokens",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
In this case, the `attention mask` generated by the
tokenizer takes the padding into account:
<tokenizerslangcontent>
<python>
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_quicktour.py",
"language": "python",
"start-after": "START print_attention_mask",
"end-before": "END print_attention_mask",
"dedent": 8}
</literalinclude>
</python>
<rust>
<literalinclude>
{"path": "../../tokenizers/tests/documentation.rs",
"language": "rust",
"start-after": "START quicktour_print_attention_mask",
"end-before": "END quicktour_print_attention_mask",
"dedent": 4}
</literalinclude>
</rust>
<node>
<literalinclude>
{"path": "../../bindings/node/examples/documentation/quicktour.test.ts",
"language": "js",
"start-after": "START print_attention_mask",
"end-before": "END print_attention_mask",
"dedent": 8}
</literalinclude>
</node>
</tokenizerslangcontent>
## Pretrained
<tokenizerslangcontent>
<python>
### Using a pretrained tokenizer
You can load any tokenizer from the Hugging Face Hub as long as a
`tokenizer.json` file is available in the repository.
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-uncased")
```
### Importing a pretrained tokenizer from legacy vocabulary files
You can also import a pretrained tokenizer directly in, as long as you
have its vocabulary file. For instance, here is how to import the
classic pretrained BERT tokenizer:
```python
from tokenizers import BertWordPieceTokenizer
tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True)
```
as long as you have downloaded the file `bert-base-uncased-vocab.txt` with
```bash
wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt
```
</python>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/components.mdx | # Components
When building a Tokenizer, you can attach various types of components to
this Tokenizer in order to customize its behavior. This page lists most
provided components.
## Normalizers
A `Normalizer` is in charge of pre-processing the input string in order
to normalize it as relevant for a given use case. Some common examples
of normalization are the Unicode normalization algorithms (NFD, NFKD,
NFC & NFKC), lowercasing etc... The specificity of `tokenizers` is that
we keep track of the alignment while normalizing. This is essential to
allow mapping from the generated tokens back to the input text.
The `Normalizer` is optional.
<tokenizerslangcontent>
<python>
| Name | Description | Example |
| :--- | :--- | :--- |
| NFD | NFD unicode normalization | |
| NFKD | NFKD unicode normalization | |
| NFC | NFC unicode normalization | |
| NFKC | NFKC unicode normalization | |
| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ` <br> Output: `hello`ὀδυσσεύς` |
| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"` <br> Output: `"hi"` |
| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é` <br> Ouput: `e` |
| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this: <br> Input: `"banana"` <br> Ouput: `"benene"` |
| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: <ul> <li>clean_text</li> <li>handle_chinese_chars</li> <li>strip_accents</li> <li>lowercase</li> </ul> | |
| Sequence | Composes multiple normalizers that will run in the provided order | `Sequence([NFKC(), Lowercase()])` |
</python>
<rust>
| Name | Description | Example |
| :--- | :--- | :--- |
| NFD | NFD unicode normalization | |
| NFKD | NFKD unicode normalization | |
| NFC | NFC unicode normalization | |
| NFKC | NFKC unicode normalization | |
| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ` <br> Output: `hello`ὀδυσσεύς` |
| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"` <br> Output: `"hi"` |
| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é` <br> Ouput: `e` |
| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this: <br> Input: `"banana"` <br> Ouput: `"benene"` |
| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: <ul> <li>clean_text</li> <li>handle_chinese_chars</li> <li>strip_accents</li> <li>lowercase</li> </ul> | |
| Sequence | Composes multiple normalizers that will run in the provided order | `Sequence::new(vec![NFKC, Lowercase])` |
</rust>
<node>
| Name | Description | Example |
| :--- | :--- | :--- |
| NFD | NFD unicode normalization | |
| NFKD | NFKD unicode normalization | |
| NFC | NFC unicode normalization | |
| NFKC | NFKC unicode normalization | |
| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ` <br> Output: `hello`ὀδυσσεύς` |
| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `"`hi`"` <br> Output: `"hi"` |
| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é` <br> Ouput: `e` |
| Replace | Replaces a custom string or regexp and changes it with given content | `Replace("a", "e")` will behave like this: <br> Input: `"banana"` <br> Ouput: `"benene"` |
| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are: <ul> <li>cleanText</li> <li>handleChineseChars</li> <li>stripAccents</li> <li>lowercase</li> </ul> | |
| Sequence | Composes multiple normalizers that will run in the provided order | |
</node>
</tokenizerslangcontent>
## Pre-tokenizers
The `PreTokenizer` takes care of splitting the input according to a set
of rules. This pre-processing lets you ensure that the underlying
`Model` does not build tokens across multiple "splits". For example if
you don't want to have whitespaces inside a token, then you can have a
`PreTokenizer` that splits on these whitespaces.
You can easily combine multiple `PreTokenizer` together using a
`Sequence` (see below). The `PreTokenizer` is also allowed to modify the
string, just like a `Normalizer` does. This is necessary to allow some
complicated algorithms that require to split before normalizing (e.g.
the ByteLevel)
<tokenizerslangcontent>
<python>
| Name | Description | Example |
| :--- | :--- | :--- |
| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: <ul> <li>Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.</li> <li>A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)</li> <li>For non ascii characters, it gets completely unreadable, but it works nonetheless!</li> </ul> | Input: `"Hello my friend, how are you?"` <br> Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` |
| Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"` <br> Output: `"Hello", "there", "!"` |
| WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"` <br> Output: `"Hello", "there!"` |
| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"` <br> Ouput: `"Hello", "?"` |
| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"` <br> Ouput: `"Hello", "▁there"` |
| CharDelimiterSplit | Splits on a given character | Example with `x`: <br> Input: `"Helloxthere"` <br> Ouput: `"Hello", "there"` |
| Digits | Splits the numbers from any other characters. | Input: `"Hello123there"` <br> Output: ``"Hello", "123", "there"`` |
| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. <ul> <li>pattern should be either a custom string or regexp.</li> <li>behavior should be one of: <ul><li>removed</li><li>isolated</li><li>merged_with_previous</li><li>merged_with_next</li><li>contiguous</li></ul></li> <li>invert should be a boolean flag.</li> </ul> | Example with pattern = ` `, behavior = `"isolated"`, invert = `False`: <br> Input: `"Hello, how are you?"` <br> Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` |
| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | `Sequence([Punctuation(), WhitespaceSplit()])` |
</python>
<rust>
| Name | Description | Example |
| :--- | :--- | :--- |
| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: <ul> <li>Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.</li> <li>A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)</li> <li>For non ascii characters, it gets completely unreadable, but it works nonetheless!</li> </ul> | Input: `"Hello my friend, how are you?"` <br> Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` |
| Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"` <br> Output: `"Hello", "there", "!"` |
| WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"` <br> Output: `"Hello", "there!"` |
| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"` <br> Ouput: `"Hello", "?"` |
| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"` <br> Ouput: `"Hello", "▁there"` |
| CharDelimiterSplit | Splits on a given character | Example with `x`: <br> Input: `"Helloxthere"` <br> Ouput: `"Hello", "there"` |
| Digits | Splits the numbers from any other characters. | Input: `"Hello123there"` <br> Output: ``"Hello", "123", "there"`` |
| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. <ul> <li>pattern should be either a custom string or regexp.</li> <li>behavior should be one of: <ul><li>Removed</li><li>Isolated</li><li>MergedWithPrevious</li><li>MergedWithNext</li><li>Contiguous</li></ul></li> <li>invert should be a boolean flag.</li> </ul> | Example with pattern = ` `, behavior = `"isolated"`, invert = `False`: <br> Input: `"Hello, how are you?"` <br> Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` |
| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | `Sequence::new(vec![Punctuation, WhitespaceSplit])` |
</rust>
<node>
| Name | Description | Example |
| :--- | :--- | :--- |
| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties: <ul> <li>Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.</li> <li>A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)</li> <li>For non ascii characters, it gets completely unreadable, but it works nonetheless!</li> </ul> | Input: `"Hello my friend, how are you?"` <br> Ouput: `"Hello", "Ġmy", Ġfriend", ",", "Ġhow", "Ġare", "Ġyou", "?"` |
| Whitespace | Splits on word boundaries (using the following regular expression: `\w+|[^\w\s]+` | Input: `"Hello there!"` <br> Output: `"Hello", "there", "!"` |
| WhitespaceSplit | Splits on any whitespace character | Input: `"Hello there!"` <br> Output: `"Hello", "there!"` |
| Punctuation | Will isolate all punctuation characters | Input: `"Hello?"` <br> Ouput: `"Hello", "?"` |
| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `"Hello there"` <br> Ouput: `"Hello", "▁there"` |
| CharDelimiterSplit | Splits on a given character | Example with `x`: <br> Input: `"Helloxthere"` <br> Ouput: `"Hello", "there"` |
| Digits | Splits the numbers from any other characters. | Input: `"Hello123there"` <br> Output: ``"Hello", "123", "there"`` |
| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary. <ul> <li>pattern should be either a custom string or regexp.</li> <li>behavior should be one of: <ul><li>removed</li><li>isolated</li><li>mergedWithPrevious</li><li>mergedWithNext</li><li>contiguous</li></ul></li> <li>invert should be a boolean flag.</li> </ul> | Example with pattern = ` `, behavior = `"isolated"`, invert = `False`: <br> Input: `"Hello, how are you?"` <br> Output: `"Hello,", " ", "how", " ", "are", " ", "you?"` |
| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | |
</node>
</tokenizerslangcontent>
## Models
Models are the core algorithms used to actually tokenize, and therefore,
they are the only mandatory component of a Tokenizer.
| Name | Description |
| :--- | :--- |
| WordLevel | This is the “classic” tokenization algorithm. It let’s you simply map words to IDs without anything fancy. This has the advantage of being really simple to use and understand, but it requires extremely large vocabularies for a good coverage. Using this `Model` requires the use of a `PreTokenizer`. No choice will be made by this model directly, it simply maps input tokens to IDs. |
| BPE | One of the most popular subword tokenization algorithm. The Byte-Pair-Encoding works by starting with characters, while merging those that are the most frequently seen together, thus creating new tokens. It then works iteratively to build new tokens out of the most frequent pairs it sees in a corpus. BPE is able to build words it has never seen by using multiple subword tokens, and thus requires smaller vocabularies, with less chances of having “unk” (unknown) tokens. |
| WordPiece | This is a subword tokenization algorithm quite similar to BPE, used mainly by Google in models like BERT. It uses a greedy algorithm, that tries to build long words first, splitting in multiple tokens when entire words don’t exist in the vocabulary. This is different from BPE that starts from characters, building bigger tokens as possible. It uses the famous `##` prefix to identify tokens that are part of a word (ie not starting a word). |
| Unigram | Unigram is also a subword tokenization algorithm, and works by trying to identify the best set of subword tokens to maximize the probability for a given sentence. This is different from BPE in the way that this is not deterministic based on a set of rules applied sequentially. Instead Unigram will be able to compute multiple ways of tokenizing, while choosing the most probable one. |
## Post-Processors
After the whole pipeline, we sometimes want to insert some special
tokens before feed a tokenized string into a model like "[CLS] My
horse is amazing [SEP]". The `PostProcessor` is the component doing
just that.
| Name | Description | Example |
| :--- | :--- | :--- |
| TemplateProcessing | Let’s you easily template the post processing, adding special tokens, and specifying the `type_id` for each sequence/special token. The template is given two strings representing the single sequence and the pair of sequences, as well as a set of special tokens to use. | Example, when specifying a template with these values:<br> <ul> <li> single: `"[CLS] $A [SEP]"` </li> <li> pair: `"[CLS] $A [SEP] $B [SEP]"` </li> <li> special tokens: <ul> <li>`"[CLS]"`</li> <li>`"[SEP]"`</li> </ul> </li> </ul> <br> Input: `("I like this", "but not this")` <br> Output: `"[CLS] I like this [SEP] but not this [SEP]"` |
## Decoders
The Decoder knows how to go from the IDs used by the Tokenizer, back to
a readable piece of text. Some `Normalizer` and `PreTokenizer` use
special characters or identifiers that need to be reverted for example.
| Name | Description |
| :--- | :--- |
| ByteLevel | Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using a set of visible Unicode characters to represent each byte, so we need a Decoder to revert this process and get something readable again. |
| Metaspace | Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifer `▁` to identify whitespaces, and so this Decoder helps with decoding these. |
| WordPiece | Reverts the WordPiece Model. This model uses a special identifier `##` for continuing subwords, and so this Decoder helps with decoding these. |
| 0 |
hf_public_repos/tokenizers/docs | hf_public_repos/tokenizers/docs/source-doc-builder/installation.mdx | # Installation
<tokenizerslangcontent>
<python>
🤗 Tokenizers is tested on Python 3.5+.
You should install 🤗 Tokenizers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're
unfamiliar with Python virtual environments, check out the [user
guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
Create a virtual environment with the version of Python you're going to
use and activate it.
## Installation with pip
🤗 Tokenizers can be installed using pip as follows:
```bash
pip install tokenizers
```
## Installation from sources
To use this method, you need to have the Rust language installed. You
can follow [the official
guide](https://www.rust-lang.org/learn/get-started) for more
information.
If you are using a unix based OS, the installation should be as simple
as running:
```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
```
Or you can easiy update it with the following command:
```bash
rustup update
```
Once rust is installed, we can start retrieving the sources for 🤗
Tokenizers:
```bash
git clone https://github.com/huggingface/tokenizers
```
Then we go into the python bindings folder:
```bash
cd tokenizers/bindings/python
```
At this point you should have your [virtual environment]() already
activated. In order to compile 🤗 Tokenizers, you need to:
```bash
pip install -e .
```
</python>
<rust>
## Crates.io
🤗 Tokenizers is available on [crates.io](https://crates.io/crates/tokenizers).
You just need to add it to your `Cargo.toml`:
```bash
cargo add tokenizers
```
</rust>
<node>
## Installation with npm
You can simply install 🤗 Tokenizers with npm using:
```bash
npm install tokenizers
```
</node>
</tokenizerslangcontent>
| 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/pre-tokenizers.mdx | # Pre-tokenizers
<tokenizerslangcontent>
<python>
## BertPreTokenizer
[[autodoc]] tokenizers.pre_tokenizers.BertPreTokenizer
## ByteLevel
[[autodoc]] tokenizers.pre_tokenizers.ByteLevel
## CharDelimiterSplit
[[autodoc]] tokenizers.pre_tokenizers.CharDelimiterSplit
## Digits
[[autodoc]] tokenizers.pre_tokenizers.Digits
## Metaspace
[[autodoc]] tokenizers.pre_tokenizers.Metaspace
## PreTokenizer
[[autodoc]] tokenizers.pre_tokenizers.PreTokenizer
## Punctuation
[[autodoc]] tokenizers.pre_tokenizers.Punctuation
## Sequence
[[autodoc]] tokenizers.pre_tokenizers.Sequence
## Split
[[autodoc]] tokenizers.pre_tokenizers.Split
## UnicodeScripts
[[autodoc]] tokenizers.pre_tokenizers.UnicodeScripts
## Whitespace
[[autodoc]] tokenizers.pre_tokenizers.Whitespace
## WhitespaceSplit
[[autodoc]] tokenizers.pre_tokenizers.WhitespaceSplit
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/input-sequences.mdx | # Input Sequences
<tokenizerslangcontent>
<python>
These types represent all the different kinds of sequence that can be used as input of a Tokenizer.
Globally, any sequence can be either a string or a list of strings, according to the operating
mode of the tokenizer: `raw text` vs `pre-tokenized`.
## TextInputSequence[[tokenizers.TextInputSequence]]
<code>tokenizers.TextInputSequence</code>
A `str` that represents an input sequence
## PreTokenizedInputSequence[[tokenizers.PreTokenizedInputSequence]]
<code>tokenizers.PreTokenizedInputSequence</code>
A pre-tokenized input sequence. Can be one of:
- A `List` of `str`
- A `Tuple` of `str`
alias of `Union[List[str], Tuple[str]]`.
## InputSequence[[tokenizers.InputSequence]]
<code>tokenizers.InputSequence</code>
Represents all the possible types of input sequences for encoding. Can be:
- When `is_pretokenized=False`: [TextInputSequence](#tokenizers.TextInputSequence)
- When `is_pretokenized=True`: [PreTokenizedInputSequence](#tokenizers.PreTokenizedInputSequence)
alias of `Union[str, List[str], Tuple[str]]`.
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/normalizers.mdx | # Normalizers
<tokenizerslangcontent>
<python>
## BertNormalizer
[[autodoc]] tokenizers.normalizers.BertNormalizer
## Lowercase
[[autodoc]] tokenizers.normalizers.Lowercase
## NFC
[[autodoc]] tokenizers.normalizers.NFC
## NFD
[[autodoc]] tokenizers.normalizers.NFD
## NFKC
[[autodoc]] tokenizers.normalizers.NFKC
## NFKD
[[autodoc]] tokenizers.normalizers.NFKD
## Nmt
[[autodoc]] tokenizers.normalizers.Nmt
## Normalizer
[[autodoc]] tokenizers.normalizers.Normalizer
## Precompiled
[[autodoc]] tokenizers.normalizers.Precompiled
## Replace
[[autodoc]] tokenizers.normalizers.Replace
## Sequence
[[autodoc]] tokenizers.normalizers.Sequence
## Strip
[[autodoc]] tokenizers.normalizers.Strip
## StripAccents
[[autodoc]] tokenizers.normalizers.StripAccents
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/encoding.mdx | # Encoding
<tokenizerslangcontent>
<python>
## Encoding
[[autodoc]] tokenizers.Encoding
- all
- attention_mask
- ids
- n_sequences
- offsets
- overflowing
- sequence_ids
- special_tokens_mask
- tokens
- type_ids
- word_ids
- words
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/added-tokens.mdx | # Added Tokens
<tokenizerslangcontent>
<python>
## AddedToken
[[autodoc]] tokenizers.AddedToken
- content
- lstrip
- normalized
- rstrip
- single_word
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/models.mdx | # Models
<tokenizerslangcontent>
<python>
## BPE
[[autodoc]] tokenizers.models.BPE
## Model
[[autodoc]] tokenizers.models.Model
## Unigram
[[autodoc]] tokenizers.models.Unigram
## WordLevel
[[autodoc]] tokenizers.models.WordLevel
## WordPiece
[[autodoc]] tokenizers.models.WordPiece
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/visualizer.mdx | # Visualizer
<tokenizerslangcontent>
<python>
## Annotation
[[autodoc]] tokenizers.tools.Annotation
## EncodingVisualizer
[[autodoc]] tokenizers.tools.EncodingVisualizer
- __call__
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/encode-inputs.mdx | # Encode Inputs
<tokenizerslangcontent>
<python>
These types represent all the different kinds of input that a [`~tokenizers.Tokenizer`] accepts
when using [`~tokenizers.Tokenizer.encode_batch`].
## TextEncodeInput[[[[tokenizers.TextEncodeInput]]]]
<code>tokenizers.TextEncodeInput</code>
Represents a textual input for encoding. Can be either:
- A single sequence: [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence)
- A pair of sequences:
- A Tuple of [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence)
- Or a List of [TextInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.TextInputSequence) of size 2
alias of `Union[str, Tuple[str, str], List[str]]`.
## PreTokenizedEncodeInput[[[[tokenizers.PreTokenizedEncodeInput]]]]
<code>tokenizers.PreTokenizedEncodeInput</code>
Represents a pre-tokenized input for encoding. Can be either:
- A single sequence: [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence)
- A pair of sequences:
- A Tuple of [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence)
- Or a List of [PreTokenizedInputSequence](/docs/tokenizers/api/input-sequences#tokenizers.PreTokenizedInputSequence) of size 2
alias of `Union[List[str], Tuple[str], Tuple[Union[List[str], Tuple[str]], Union[List[str], Tuple[str]]], List[Union[List[str], Tuple[str]]]]`.
## EncodeInput[[[[tokenizers.EncodeInput]]]]
<code>tokenizers.EncodeInput</code>
Represents all the possible types of input for encoding. Can be:
- When `is_pretokenized=False`: [TextEncodeInput](#tokenizers.TextEncodeInput)
- When `is_pretokenized=True`: [PreTokenizedEncodeInput](#tokenizers.PreTokenizedEncodeInput)
alias of `Union[str, Tuple[str, str], List[str], Tuple[str], Tuple[Union[List[str], Tuple[str]], Union[List[str], Tuple[str]]], List[Union[List[str], Tuple[str]]]]`.
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/post-processors.mdx | # Post-processors
<tokenizerslangcontent>
<python>
## BertProcessing
[[autodoc]] tokenizers.processors.BertProcessing
## ByteLevel
[[autodoc]] tokenizers.processors.ByteLevel
## RobertaProcessing
[[autodoc]] tokenizers.processors.RobertaProcessing
## TemplateProcessing
[[autodoc]] tokenizers.processors.TemplateProcessing
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/decoders.mdx | # Decoders
<tokenizerslangcontent>
<python>
## BPEDecoder
[[autodoc]] tokenizers.decoders.BPEDecoder
## ByteLevel
[[autodoc]] tokenizers.decoders.ByteLevel
## CTC
[[autodoc]] tokenizers.decoders.CTC
## Metaspace
[[autodoc]] tokenizers.decoders.Metaspace
## WordPiece
[[autodoc]] tokenizers.decoders.WordPiece
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/tokenizer.mdx | # Tokenizer
<tokenizerslangcontent>
<python>
## Tokenizer
[[autodoc]] tokenizers.Tokenizer
- all
- decoder
- model
- normalizer
- padding
- post_processor
- pre_tokenizer
- truncation
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/docs/source-doc-builder | hf_public_repos/tokenizers/docs/source-doc-builder/api/trainers.mdx | # Trainers
<tokenizerslangcontent>
<python>
## BpeTrainer
[[autodoc]] tokenizers.trainers.BpeTrainer
## UnigramTrainer
[[autodoc]] tokenizers.trainers.UnigramTrainer
## WordLevelTrainer
[[autodoc]] tokenizers.trainers.WordLevelTrainer
## WordPieceTrainer
[[autodoc]] tokenizers.trainers.WordPieceTrainer
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/Cargo.toml | [package]
name = "tokenizers-python"
version = "0.15.1-dev.0"
authors = ["Anthony MOI <m.anthony.moi@gmail.com>"]
edition = "2021"
[lib]
name = "tokenizers"
crate-type = ["cdylib"]
[dependencies]
rayon = "1.8"
serde = { version = "1.0", features = [ "rc", "derive" ]}
serde_json = "1.0"
libc = "0.2"
env_logger = "0.10.0"
pyo3 = { version = "0.19" }
numpy = "0.19.0"
ndarray = "0.15"
onig = { version = "6.4", default-features = false }
itertools = "0.11"
[dependencies.tokenizers]
version = "0.15.1-dev.0"
path = "../../tokenizers"
[dev-dependencies]
tempfile = "3.8"
pyo3 = { version = "0.19", features = ["auto-initialize"] }
[features]
defaut = ["pyo3/extension-module"]
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/conftest.py | import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true", default=False, help="run slow tests")
def pytest_configure(config):
config.addinivalue_line("markers", "slow: mark test as slow to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/rust-toolchain | stable
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.13.2]
- [#1096] Python 3.11 support
## [0.13.1]
- [#1072] Fixing Roberta type ids.
## [0.13.0]
- [#956] PyO3 version upgrade
- [#1055] M1 automated builds
- [#1008] `Decoder` is now a composable trait, but without being backward incompatible
- [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible
Both trait changes warrant a "major" number since, despite best efforts to not break backward
compatibility, the code is different enough that we cannot be exactly sure.
## [0.12.1]
- [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520
## [0.12.0] YANKED
Bump minor version because of a breaking change.
- [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free.
- [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience)
- [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens)
- [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking.
- [#962] Fix tests for python 3.10
- [#961] Added link for Ruby port of `tokenizers`
## [0.11.6]
- [#919] Fixing single_word AddedToken. (regression from 0.11.2)
- [#916] Deserializing faster `added_tokens` by loading them in batch.
## [0.11.5]
- [#895] Build `python 3.10` wheels.
## [0.11.4]
- [#884] Fixing bad deserialization following inclusion of a default for Punctuation
## [0.11.3]
- [#882] Fixing Punctuation deserialize without argument.
- [#868] Fixing missing direction in TruncationParams
- [#860] Adding TruncationSide to TruncationParams
## [0.11.0]
### Fixed
- [#585] Conda version should now work on old CentOS
- [#844] Fixing interaction between `is_pretokenized` and `trim_offsets`.
- [#851] Doc links
### Added
- [#657]: Add SplitDelimiterBehavior customization to Punctuation constructor
- [#845]: Documentation for `Decoders`.
### Changed
- [#850]: Added a feature gate to enable disabling `http` features
- [#718]: Fix `WordLevel` tokenizer determinism during training
- [#762]: Add a way to specify the unknown token in `SentencePieceUnigramTokenizer`
- [#770]: Improved documentation for `UnigramTrainer`
- [#780]: Add `Tokenizer.from_pretrained` to load tokenizers from the Hugging Face Hub
- [#793]: Saving a pretty JSON file by default when saving a tokenizer
## [0.10.3]
### Fixed
- [#686]: Fix SPM conversion process for whitespace deduplication
- [#707]: Fix stripping strings containing Unicode characters
### Added
- [#693]: Add a CTC Decoder for Wave2Vec models
### Removed
- [#714]: Removed support for Python 3.5
## [0.10.2]
### Fixed
- [#652]: Fix offsets for `Precompiled` corner case
- [#656]: Fix BPE `continuing_subword_prefix`
- [#674]: Fix `Metaspace` serialization problems
## [0.10.1]
### Fixed
- [#616]: Fix SentencePiece tokenizers conversion
- [#617]: Fix offsets produced by Precompiled Normalizer (used by tokenizers converted from SPM)
- [#618]: Fix Normalizer.normalize with `PyNormalizedStringRefMut`
- [#620]: Fix serialization/deserialization for overlapping models
- [#621]: Fix `ByteLevel` instantiation from a previously saved state (using `__getstate__()`)
## [0.10.0]
### Added
- [#508]: Add a Visualizer for notebooks to help understand how the tokenizers work
- [#519]: Add a `WordLevelTrainer` used to train a `WordLevel` model
- [#533]: Add support for conda builds
- [#542]: Add Split pre-tokenizer to easily split using a pattern
- [#544]: Ability to train from memory. This also improves the integration with `datasets`
- [#590]: Add getters/setters for components on BaseTokenizer
- [#574]: Add `fust_unk` option to SentencePieceBPETokenizer
### Changed
- [#509]: Automatically stubbing the `.pyi` files
- [#519]: Each `Model` can return its associated `Trainer` with `get_trainer()`
- [#530]: The various attributes on each component can be get/set (ie.
`tokenizer.model.dropout = 0.1`)
- [#538]: The API Reference has been improved and is now up-to-date.
### Fixed
- [#519]: During training, the `Model` is now trained in-place. This fixes several bugs that were
forcing to reload the `Model` after a training.
- [#539]: Fix `BaseTokenizer` enable_truncation docstring
## [0.9.4]
### Fixed
- [#492]: Fix `from_file` on `BertWordPieceTokenizer`
- [#498]: Fix the link to download `sentencepiece_model_pb2.py`
- [#500]: Fix a typo in the docs quicktour
### Changed
- [#506]: Improve Encoding mappings for pairs of sequence
## [0.9.3]
### Fixed
- [#470]: Fix hanging error when training with custom component
- [#476]: TemplateProcessing serialization is now deterministic
- [#481]: Fix SentencePieceBPETokenizer.from_files
### Added
- [#477]: UnicodeScripts PreTokenizer to avoid merges between various scripts
- [#480]: Unigram now accepts an `initial_alphabet` and handles `special_tokens` correctly
## [0.9.2]
### Fixed
- [#464]: Fix a problem with RobertaProcessing being deserialized as BertProcessing
## [0.9.1]
### Fixed
- [#459]: Fix a problem with deserialization
## [0.9.0]
### Fixed
- [#362]: Fix training deadlock with Python components.
- [#363]: Fix a crash when calling `.train` with some non-existent files
- [#355]: Remove a lot of possible crashes
- [#389]: Improve truncation (crash and consistency)
### Added
- [#379]: Add the ability to call `encode`/`encode_batch` with numpy arrays
- [#292]: Support for the Unigram algorithm
- [#378], [#394], [#416], [#417]: Many new Normalizer and PreTokenizer
- [#403]: Add `TemplateProcessing` `PostProcessor`.
- [#420]: Ability to fuse the "unk" token in BPE.
### Changed
- [#360]: Lots of improvements related to words/alignment tracking
- [#426]: Improvements on error messages thanks to PyO3 0.12
## [0.8.1]
### Fixed
- [#333]: Fix deserialization of `AddedToken`, where the content was not restored properly
### Changed
- [#329]: Improved warning and behavior when we detect a fork
- [#330]: BertNormalizer now keeps the same behavior than the original implementation when
`strip_accents` is not specified.
## [0.8.0]
### Highlights of this release
- We can now encode both pre-tokenized inputs, and raw strings. This is especially usefull when
processing datasets that are already pre-tokenized like for NER (Name Entity Recognition), and helps
while applying labels to each word.
- Full tokenizer serialization. It is now easy to save a tokenizer to a single JSON file, to later
load it back with just one line of code. That's what sharing a Tokenizer means now: 1 line of code.
- With the serialization comes the compatibility with `Pickle`! The Tokenizer, all of its components,
Encodings, everything can be pickled!
- Training a tokenizer is now even faster (up to 5-10x) than before!
- Compatibility with `multiprocessing`, even when using the `fork` start method. Since this library
makes heavy use of the multithreading capacities of our computers to allows a very fast tokenization,
this led to problems (deadlocks) when used with `multiprocessing`. This version now allows to
disable the parallelism, and will warn you if this is necessary.
- And a lot of other improvements, and fixes.
### Fixed
- [#286]: Fix various crash when training a BPE model
- [#309]: Fixed a few bugs related to additional vocabulary/tokens
### Added
- [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...).
This adds some methods to easily save/load an entire tokenizer (`from_str`, `from_file`).
- [#273]: `Tokenizer` and its parts are now pickable
- [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure
activation of the Tensor Cores, while ensuring padding to a multiple of 8. Use with
`enable_padding(pad_to_multiple_of=8)` for example.
- [#298]: Ability to get the currently set truncation/padding params
- [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment
variable. This is especially usefull when using `multiprocessing` capabilities, with the `fork`
start method, which happens to be the default on Linux systems. Without disabling the parallelism,
the process dead-locks while encoding. (Cf [#187] for more information)
### Changed
- Improved errors generated during truncation: When the provided max length is too low are
now handled properly.
- [#249] `encode` and `encode_batch` now accept pre-tokenized inputs. When the input is pre-tokenized,
the argument `is_pretokenized=True` must be specified.
- [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the
processing of each file
- [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original
implementation from GPT-2
- [#309]: Improved the management of the additional vocabulary. This introduces an option
`normalized`, controlling whether a token should be extracted from the normalized version of the
input text.
## [0.7.0]
### Changed
- Only one progress bar while reading files during training. This is better for use-cases with
a high number of files as it avoids having too many progress bars on screen. Also avoids reading the
size of each file before starting to actually read these files, as this process could take really
long.
- [#193]: `encode` and `encode_batch` now take a new optional argument, specifying whether we
should add the special tokens. This is activated by default.
- [#197]: `original_str` and `normalized_str` have been removed from the `Encoding` returned by
`encode` and `encode_batch`. This brings a reduction of 70% of the memory footprint.
- [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the
normalized one anymore.
- The added token given to `add_special_tokens` or `add_tokens` on a `Tokenizer`, or while using
`train(special_tokens=...)` can now be instances of `AddedToken` to provide more control over these
tokens.
- [#136]: Updated Pyo3 version
- [#136]: Static methods `Model.from_files` and `Model.empty` are removed in favor of using
constructors.
- [#239]: `CharBPETokenizer` now corresponds to OpenAI GPT BPE implementation by default.
### Added
- [#188]: `ByteLevel` is also a `PostProcessor` now and handles trimming the offsets if activated.
This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these
whitespaces are part of the actual token.
It has been added to `ByteLevelBPETokenizer` but it is off by default (`trim_offsets=False`).
- [#236]: `RobertaProcessing` also handles trimming the offsets.
- [#234]: New alignment mappings on the `Encoding`. Provide methods to easily convert between `char`
or `word` (input space) and `token` (output space).
- `post_process` can be called on the `Tokenizer`
- [#208]: Ability to retrieve the vocabulary from the `Tokenizer` with
`get_vocab(with_added_tokens: bool)`
- [#136] Models can now be instantiated through object constructors.
### Fixed
- [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE:
- when `add_prefix_space=True`
- [#156]: when a Unicode character gets split-up in multiple byte-level characters
- Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded.
- [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if
not advised, but that's not the question).
- [#205]: Trim the decoded string in `BPEDecoder` used by `CharBPETokenizer`
### How to migrate
- Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant. If you are
using `ByteLevelBPETokenizer`, this option is disabled by default (`trim_offsets=False`).
- `BertWordPieceTokenizer` option to `add_special_tokens` must now be given to `encode` or
`encode_batch`
- Access to the `original_str` on the `Encoding` has been removed. The original string is the input
of `encode` so it didn't make sense to keep it here.
- No need to call `original_str.offsets(offsets[N])` to convert offsets to the original string. They
are now relative to the original string by default.
- Access to the `normalized_str` on the `Encoding` has been removed. Can be retrieved by calling
`normalize(sequence)` on the `Tokenizer`
- Change `Model.from_files` and `Model.empty` to use constructor. The model constructor should take
the same arguments as the old methods. (ie `BPE(vocab, merges)` or `BPE()`)
- If you were using the `CharBPETokenizer` and want to keep the same behavior as before, set
`bert_normalizer=False` and `split_on_whitespace_only=True`.
## [0.6.0]
### Changed
- [#165]: Big improvements in speed for BPE (Both training and tokenization)
### Fixed
- [#160]: Some default tokens were missing from `BertWordPieceTokenizer`
- [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got
split up in multiple bytes.
- [#174]: The `longest_first` truncation strategy had a bug
## [0.5.2]
- [#163]: Do not open all files directly while training
### Fixed
- We introduced a bug related to the saving of the WordPiece model in 0.5.1: The `vocab.txt` file
was named `vocab.json`. This is now fixed.
- The `WordLevel` model was also saving its vocabulary to the wrong format.
## [0.5.1]
### Changed
- `name` argument is now optional when saving a `Model`'s vocabulary. When the name is not
specified, the files get a more generic naming, like `vocab.json` or `merges.txt`.
## [0.5.0]
### Changed
- [#145]: `BertWordPieceTokenizer` now cleans up some tokenization artifacts while decoding
- [#149]: `ByteLevelBPETokenizer` now has `dropout`.
- `do_lowercase` has been changed to `lowercase` for consistency between the different tokenizers.
(Especially `ByteLevelBPETokenizer` and `CharBPETokenizer`)
- [#139]: Expose `__len__` on `Encoding`
- Improved padding performances.
### Added
- Added a new `Strip` normalizer
### Fixed
- [#145]: Decoding was buggy on `BertWordPieceTokenizer`.
- [#152]: Some documentation and examples were still using the old `BPETokenizer`
### How to migrate
- Use `lowercase` when initializing `ByteLevelBPETokenizer` or `CharBPETokenizer` instead of
`do_lowercase`.
## [0.4.2]
### Fixed
- [#137]: Fix a bug in the class `WordPieceTrainer` that prevented `BertWordPieceTokenizer` from
being trained.
## [0.4.1]
### Fixed
- [#134]: Fix a bug related to the punctuation in BertWordPieceTokenizer
## [0.4.0]
### Changed
- [#131]: Replaced all .new() class methods by a proper __new__ implementation
- Improved typings
### How to migrate
- Remove all `.new` on all classe instanciations
## [0.3.0]
### Changed
- BPETokenizer has been renamed to CharBPETokenizer for clarity.
- Improve truncation/padding and the handling of overflowing tokens. Now when a sequence gets
truncated, we provide a list of overflowing `Encoding` that are ready to be processed by a language
model, just as the main `Encoding`.
- Provide mapping to the original string offsets using:
```
output = tokenizer.encode(...)
print(output.original_str.offsets(output.offsets[3]))
```
- [#99]: Exposed the vocabulary size on all tokenizers
### Added
- Added `CharDelimiterSplit`: a new `PreTokenizer` that allows splitting sequences on the given
delimiter (Works like `.split(delimiter)`)
- Added `WordLevel`: a new model that simply maps `tokens` to their `ids`.
### Fixed
- Fix a bug with IndexableString
- Fix a bug with truncation
### How to migrate
- Rename `BPETokenizer` to `CharBPETokenizer`
- `Encoding.overflowing` is now a List instead of a `Optional[Encoding]`
## [0.2.1]
### Fixed
- Fix a bug with the IDs associated with added tokens.
- Fix a bug that was causing crashes in Python 3.5
[#1096]: https://github.com/huggingface/tokenizers/pull/1096
[#1072]: https://github.com/huggingface/tokenizers/pull/1072
[#956]: https://github.com/huggingface/tokenizers/pull/956
[#1008]: https://github.com/huggingface/tokenizers/pull/1008
[#1009]: https://github.com/huggingface/tokenizers/pull/1009
[#1047]: https://github.com/huggingface/tokenizers/pull/1047
[#1055]: https://github.com/huggingface/tokenizers/pull/1055
[#1051]: https://github.com/huggingface/tokenizers/pull/1051
[#1052]: https://github.com/huggingface/tokenizers/pull/1052
[#938]: https://github.com/huggingface/tokenizers/pull/938
[#939]: https://github.com/huggingface/tokenizers/pull/939
[#952]: https://github.com/huggingface/tokenizers/pull/952
[#954]: https://github.com/huggingface/tokenizers/pull/954
[#962]: https://github.com/huggingface/tokenizers/pull/962
[#961]: https://github.com/huggingface/tokenizers/pull/961
[#960]: https://github.com/huggingface/tokenizers/pull/960
[#919]: https://github.com/huggingface/tokenizers/pull/919
[#916]: https://github.com/huggingface/tokenizers/pull/916
[#895]: https://github.com/huggingface/tokenizers/pull/895
[#884]: https://github.com/huggingface/tokenizers/pull/884
[#882]: https://github.com/huggingface/tokenizers/pull/882
[#868]: https://github.com/huggingface/tokenizers/pull/868
[#860]: https://github.com/huggingface/tokenizers/pull/860
[#850]: https://github.com/huggingface/tokenizers/pull/850
[#844]: https://github.com/huggingface/tokenizers/pull/844
[#845]: https://github.com/huggingface/tokenizers/pull/845
[#851]: https://github.com/huggingface/tokenizers/pull/851
[#585]: https://github.com/huggingface/tokenizers/pull/585
[#793]: https://github.com/huggingface/tokenizers/pull/793
[#780]: https://github.com/huggingface/tokenizers/pull/780
[#770]: https://github.com/huggingface/tokenizers/pull/770
[#762]: https://github.com/huggingface/tokenizers/pull/762
[#718]: https://github.com/huggingface/tokenizers/pull/718
[#714]: https://github.com/huggingface/tokenizers/pull/714
[#707]: https://github.com/huggingface/tokenizers/pull/707
[#693]: https://github.com/huggingface/tokenizers/pull/693
[#686]: https://github.com/huggingface/tokenizers/pull/686
[#674]: https://github.com/huggingface/tokenizers/pull/674
[#657]: https://github.com/huggingface/tokenizers/pull/657
[#656]: https://github.com/huggingface/tokenizers/pull/656
[#652]: https://github.com/huggingface/tokenizers/pull/652
[#621]: https://github.com/huggingface/tokenizers/pull/621
[#620]: https://github.com/huggingface/tokenizers/pull/620
[#618]: https://github.com/huggingface/tokenizers/pull/618
[#617]: https://github.com/huggingface/tokenizers/pull/617
[#616]: https://github.com/huggingface/tokenizers/pull/616
[#590]: https://github.com/huggingface/tokenizers/pull/590
[#574]: https://github.com/huggingface/tokenizers/pull/574
[#544]: https://github.com/huggingface/tokenizers/pull/544
[#542]: https://github.com/huggingface/tokenizers/pull/542
[#539]: https://github.com/huggingface/tokenizers/pull/539
[#538]: https://github.com/huggingface/tokenizers/pull/538
[#533]: https://github.com/huggingface/tokenizers/pull/533
[#530]: https://github.com/huggingface/tokenizers/pull/530
[#519]: https://github.com/huggingface/tokenizers/pull/519
[#509]: https://github.com/huggingface/tokenizers/pull/509
[#508]: https://github.com/huggingface/tokenizers/pull/508
[#506]: https://github.com/huggingface/tokenizers/pull/506
[#500]: https://github.com/huggingface/tokenizers/pull/500
[#498]: https://github.com/huggingface/tokenizers/pull/498
[#492]: https://github.com/huggingface/tokenizers/pull/492
[#481]: https://github.com/huggingface/tokenizers/pull/481
[#480]: https://github.com/huggingface/tokenizers/pull/480
[#477]: https://github.com/huggingface/tokenizers/pull/477
[#476]: https://github.com/huggingface/tokenizers/pull/476
[#470]: https://github.com/huggingface/tokenizers/pull/470
[#464]: https://github.com/huggingface/tokenizers/pull/464
[#459]: https://github.com/huggingface/tokenizers/pull/459
[#420]: https://github.com/huggingface/tokenizers/pull/420
[#417]: https://github.com/huggingface/tokenizers/pull/417
[#416]: https://github.com/huggingface/tokenizers/pull/416
[#403]: https://github.com/huggingface/tokenizers/pull/403
[#394]: https://github.com/huggingface/tokenizers/pull/394
[#389]: https://github.com/huggingface/tokenizers/pull/389
[#379]: https://github.com/huggingface/tokenizers/pull/379
[#378]: https://github.com/huggingface/tokenizers/pull/378
[#363]: https://github.com/huggingface/tokenizers/pull/363
[#362]: https://github.com/huggingface/tokenizers/pull/362
[#360]: https://github.com/huggingface/tokenizers/pull/360
[#355]: https://github.com/huggingface/tokenizers/pull/355
[#333]: https://github.com/huggingface/tokenizers/pull/333
[#330]: https://github.com/huggingface/tokenizers/pull/330
[#329]: https://github.com/huggingface/tokenizers/pull/329
[#311]: https://github.com/huggingface/tokenizers/pull/311
[#309]: https://github.com/huggingface/tokenizers/pull/309
[#292]: https://github.com/huggingface/tokenizers/pull/292
[#289]: https://github.com/huggingface/tokenizers/pull/289
[#286]: https://github.com/huggingface/tokenizers/pull/286
[#280]: https://github.com/huggingface/tokenizers/pull/280
[#276]: https://github.com/huggingface/tokenizers/pull/276
[#273]: https://github.com/huggingface/tokenizers/pull/273
[#272]: https://github.com/huggingface/tokenizers/pull/272
[#249]: https://github.com/huggingface/tokenizers/pull/249
[#239]: https://github.com/huggingface/tokenizers/pull/239
[#236]: https://github.com/huggingface/tokenizers/pull/236
[#234]: https://github.com/huggingface/tokenizers/pull/234
[#208]: https://github.com/huggingface/tokenizers/pull/208
[#205]: https://github.com/huggingface/tokenizers/issues/205
[#197]: https://github.com/huggingface/tokenizers/pull/197
[#193]: https://github.com/huggingface/tokenizers/pull/193
[#190]: https://github.com/huggingface/tokenizers/pull/190
[#188]: https://github.com/huggingface/tokenizers/pull/188
[#187]: https://github.com/huggingface/tokenizers/issues/187
[#175]: https://github.com/huggingface/tokenizers/issues/175
[#174]: https://github.com/huggingface/tokenizers/issues/174
[#165]: https://github.com/huggingface/tokenizers/pull/165
[#163]: https://github.com/huggingface/tokenizers/issues/163
[#160]: https://github.com/huggingface/tokenizers/issues/160
[#156]: https://github.com/huggingface/tokenizers/pull/156
[#152]: https://github.com/huggingface/tokenizers/issues/152
[#149]: https://github.com/huggingface/tokenizers/issues/149
[#145]: https://github.com/huggingface/tokenizers/issues/145
[#139]: https://github.com/huggingface/tokenizers/issues/139
[#137]: https://github.com/huggingface/tokenizers/issues/137
[#134]: https://github.com/huggingface/tokenizers/issues/134
[#131]: https://github.com/huggingface/tokenizers/issues/131
[#99]: https://github.com/huggingface/tokenizers/pull/99
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/Makefile | .PHONY: style check-style test
DATA_DIR = data
dir_guard=@mkdir -p $(@D)
check_dirs := examples py_src/tokenizers tests
# Format source code automatically
style:
python stub.py
black --line-length 119 --target-version py35 $(check_dirs)
# Check the source code is formatted correctly
check-style:
python stub.py --check
black --check --line-length 119 --target-version py35 examples py_src/tokenizers tests
TESTS_RESOURCES = $(DATA_DIR)/small.txt $(DATA_DIR)/roberta.json
# Launch the test suite
test: $(TESTS_RESOURCES)
pip install pytest requests setuptools_rust numpy pyarrow datasets
python -m pytest -s -v tests
cargo test --no-default-features
$(DATA_DIR)/big.txt :
$(dir_guard)
wget https://norvig.com/big.txt -O $@
$(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt
head -100 $(DATA_DIR)/big.txt > $@
$(DATA_DIR)/roberta.json :
$(dir_guard)
wget https://huggingface.co/roberta-large/raw/main/tokenizer.json -O $@
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/pyproject.toml | [project]
name = 'tokenizers'
requires-python = '>=3.7'
authors = [
{name = 'Nicolas Patry', email = 'patry.nicolas@protonmail.com'},
{name = 'Anthony Moi', email = 'anthony@huggingface.co'}
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
]
keywords = ["NLP", "tokenizer", "BPE", "transformer", "deep learning"]
dynamic = [
'description',
'license',
'readme',
]
dependencies = ["huggingface_hub>=0.16.4,<1.0"]
[project.urls]
Homepage = 'https://github.com/huggingface/tokenizers'
Source = 'https://github.com/huggingface/tokenizers'
[project.optional-dependencies]
testing = ["pytest", "requests", "numpy", "datasets", "black==22.3"]
docs = ["sphinx", "sphinx_rtd_theme", "setuptools_rust"]
dev = ["tokenizers[testing]"]
[build-system]
requires = ["maturin>=1.0,<2.0"]
build-backend = "maturin"
[tool.maturin]
python-source = "py_src"
module-name = "tokenizers.tokenizers"
bindings = 'pyo3'
features = ["pyo3/extension-module"]
[tool.black]
line-length = 119
target-version = ['py35']
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/MANIFEST.in | include Cargo.toml
include pyproject.toml
include rust-toolchain
include ../../LICENSE
recursive-include src *
recursive-include tokenizers-lib *
recursive-exclude tokenizers-lib/target *
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/README.md | <p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<a href="https://badge.fury.io/py/tokenizers">
<img alt="Build" src="https://badge.fury.io/py/tokenizers.svg">
</a>
<a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE">
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue">
</a>
</p>
<br>
# Tokenizers
Provides an implementation of today's most used tokenizers, with a focus on performance and
versatility.
Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation.
If you are interested in the High-level design, you can go check it there.
Otherwise, let's dive in!
## Main features:
- Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3
most common BPE versions).
- Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes
less than 20 seconds to tokenize a GB of text on a server's CPU.
- Easy to use, but also extremely versatile.
- Designed for research and production.
- Normalization comes with alignments tracking. It's always possible to get the part of the
original sentence that corresponds to a given token.
- Does all the pre-processing: Truncate, Pad, add the special tokens your model needs.
### Installation
#### With pip:
```bash
pip install tokenizers
```
#### From sources:
To use this method, you need to have the Rust installed:
```bash
# Install with:
curl https://sh.rustup.rs -sSf | sh -s -- -y
export PATH="$HOME/.cargo/bin:$PATH"
```
Once Rust is installed, you can compile doing the following
```bash
git clone https://github.com/huggingface/tokenizers
cd tokenizers/bindings/python
# Create a virtual env (you can use yours as well)
python -m venv .env
source .env/bin/activate
# Install `tokenizers` in the current virtual env
pip install -e .
```
### Load a pretrained tokenizer from the Hub
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_pretrained("bert-base-cased")
```
### Using the provided Tokenizers
We provide some pre-build tokenizers to cover the most common cases. You can easily load one of
these using some `vocab.json` and `merges.txt` files:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
vocab = "./path/to/vocab.json"
merges = "./path/to/merges.txt"
tokenizer = CharBPETokenizer(vocab, merges)
# And then encode:
encoded = tokenizer.encode("I can feel the magic, can you?")
print(encoded.ids)
print(encoded.tokens)
```
And you can train them just as simply:
```python
from tokenizers import CharBPETokenizer
# Initialize a tokenizer
tokenizer = CharBPETokenizer()
# Then train it!
tokenizer.train([ "./path/to/files/1.txt", "./path/to/files/2.txt" ])
# Now, let's use it:
encoded = tokenizer.encode("I can feel the magic, can you?")
# And finally save it somewhere
tokenizer.save("./path/to/directory/my-bpe.tokenizer.json")
```
#### Provided Tokenizers
- `CharBPETokenizer`: The original BPE
- `ByteLevelBPETokenizer`: The byte level version of the BPE
- `SentencePieceBPETokenizer`: A BPE implementation compatible with the one used by SentencePiece
- `BertWordPieceTokenizer`: The famous Bert tokenizer, using WordPiece
All of these can be used and trained as explained above!
### Build your own
Whenever these provided tokenizers don't give you enough freedom, you can build your own tokenizer,
by putting all the different parts you need together.
You can check how we implemented the [provided tokenizers](https://github.com/huggingface/tokenizers/tree/master/bindings/python/py_src/tokenizers/implementations) and adapt them easily to your own needs.
#### Building a byte-level BPE
Here is an example showing how to build your own byte-level BPE by putting all the different pieces
together, and then saving it to a single file:
```python
from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors
# Initialize a tokenizer
tokenizer = Tokenizer(models.BPE())
# Customize pre-tokenization and decoding
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=True)
# And then train
trainer = trainers.BpeTrainer(
vocab_size=20000,
min_frequency=2,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet()
)
tokenizer.train([
"./path/to/dataset/1.txt",
"./path/to/dataset/2.txt",
"./path/to/dataset/3.txt"
], trainer=trainer)
# And Save it
tokenizer.save("byte-level-bpe.tokenizer.json", pretty=True)
```
Now, when you want to use this tokenizer, this is as simple as:
```python
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_file("byte-level-bpe.tokenizer.json")
encoded = tokenizer.encode("I can feel the magic, can you?")
```
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/setup.cfg | [isort]
default_section = FIRSTPARTY
ensure_newline_before_comments = True
force_grid_wrap = 0
include_trailing_comma = True
known_first_party = transformers
known_third_party =
absl
conllu
datasets
elasticsearch
fairseq
faiss-cpu
fastprogress
fire
fugashi
git
h5py
matplotlib
nltk
numpy
packaging
pandas
PIL
psutil
pytest
pytorch_lightning
rouge_score
sacrebleu
seqeval
sklearn
streamlit
tensorboardX
tensorflow
tensorflow_datasets
timeout_decorator
torch
torchaudio
torchtext
torchvision
torch_xla
tqdm
line_length = 119
lines_after_imports = 2
multi_line_output = 3
use_parentheses = True
[flake8]
ignore = E203, E501, E741, W503, W605
max-line-length = 119
[tool:pytest]
doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/test.txt | <DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
<DOCUMENT> \test{bla} thisisatest </DOCUMENT>
| 0 |
hf_public_repos/tokenizers/bindings | hf_public_repos/tokenizers/bindings/python/stub.py | import argparse
import inspect
import os
from pathlib import Path
import black
INDENT = " " * 4
GENERATED_COMMENT = "# Generated content DO NOT EDIT\n"
def do_indent(text: str, indent: str):
return text.replace("\n", f"\n{indent}")
def function(obj, indent, text_signature=None):
if text_signature is None:
text_signature = obj.__text_signature__
string = ""
string += f"{indent}def {obj.__name__}{text_signature}:\n"
indent += INDENT
string += f'{indent}"""\n'
string += f"{indent}{do_indent(obj.__doc__, indent)}\n"
string += f'{indent}"""\n'
string += f"{indent}pass\n"
string += "\n"
string += "\n"
return string
def member_sort(member):
if inspect.isclass(member):
value = 10 + len(inspect.getmro(member))
else:
value = 1
return value
def fn_predicate(obj):
value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj)
if value:
return obj.__doc__ and obj.__text_signature__ and not obj.__name__.startswith("_")
if inspect.isgetsetdescriptor(obj):
return obj.__doc__ and not obj.__name__.startswith("_")
return False
def get_module_members(module):
members = [
member
for name, member in inspect.getmembers(module)
if not name.startswith("_") and not inspect.ismodule(member)
]
members.sort(key=member_sort)
return members
def pyi_file(obj, indent=""):
string = ""
if inspect.ismodule(obj):
string += GENERATED_COMMENT
members = get_module_members(obj)
for member in members:
string += pyi_file(member, indent)
elif inspect.isclass(obj):
indent += INDENT
mro = inspect.getmro(obj)
if len(mro) > 2:
inherit = f"({mro[1].__name__})"
else:
inherit = ""
string += f"class {obj.__name__}{inherit}:\n"
body = ""
if obj.__doc__:
body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n'
fns = inspect.getmembers(obj, fn_predicate)
# Init
if obj.__text_signature__:
body += f"{indent}def __init__{obj.__text_signature__}:\n"
body += f"{indent+INDENT}pass\n"
body += "\n"
for (name, fn) in fns:
body += pyi_file(fn, indent=indent)
if not body:
body += f"{indent}pass\n"
string += body
string += "\n\n"
elif inspect.isbuiltin(obj):
string += f"{indent}@staticmethod\n"
string += function(obj, indent)
elif inspect.ismethoddescriptor(obj):
string += function(obj, indent)
elif inspect.isgetsetdescriptor(obj):
# TODO it would be interesing to add the setter maybe ?
string += f"{indent}@property\n"
string += function(obj, indent, text_signature="(self)")
else:
raise Exception(f"Object {obj} is not supported")
return string
def py_file(module, origin):
members = get_module_members(module)
string = GENERATED_COMMENT
string += f"from .. import {origin}\n"
string += "\n"
for member in members:
name = member.__name__
string += f"{name} = {origin}.{name}\n"
return string
def do_black(content, is_pyi):
mode = black.Mode(
target_versions={black.TargetVersion.PY35},
line_length=119,
is_pyi=is_pyi,
string_normalization=True,
experimental_string_processing=False,
)
try:
return black.format_file_contents(content, fast=True, mode=mode)
except black.NothingChanged:
return content
def write(module, directory, origin, check=False):
submodules = [(name, member) for name, member in inspect.getmembers(module) if inspect.ismodule(member)]
filename = os.path.join(directory, "__init__.pyi")
pyi_content = pyi_file(module)
pyi_content = do_black(pyi_content, is_pyi=True)
os.makedirs(directory, exist_ok=True)
if check:
with open(filename, "r") as f:
data = f.read()
assert data == pyi_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(pyi_content)
filename = os.path.join(directory, "__init__.py")
py_content = py_file(module, origin)
py_content = do_black(py_content, is_pyi=False)
os.makedirs(directory, exist_ok=True)
is_auto = False
if not os.path.exists(filename):
is_auto = True
else:
with open(filename, "r") as f:
line = f.readline()
if line == GENERATED_COMMENT:
is_auto = True
if is_auto:
if check:
with open(filename, "r") as f:
data = f.read()
assert data == py_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(py_content)
for name, submodule in submodules:
write(submodule, os.path.join(directory, name), f"{name}", check=check)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check", action="store_true")
args = parser.parse_args()
import tokenizers
write(tokenizers.tokenizers, "py_src/tokenizers/", "tokenizers", check=args.check)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.pyi | # Generated content DO NOT EDIT
class AddedToken:
"""
Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`.
It can have special options that defines the way it should behave.
Args:
content (:obj:`str`): The content of the token
single_word (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should only match single words. If :obj:`True`, this
token will never match inside of a word. For example the token ``ing`` would match
on ``tokenizing`` if this option is :obj:`False`, but not if it is :obj:`True`.
The notion of "`inside of a word`" is defined by the word boundaries pattern in
regular expressions (ie. the token should start and end with word boundaries).
lstrip (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should strip all potential whitespaces on its left side.
If :obj:`True`, this token will greedily match any whitespace on its left. For
example if we try to match the token ``[MASK]`` with ``lstrip=True``, in the text
``"I saw a [MASK]"``, we would match on ``" [MASK]"``. (Note the space on the left).
rstrip (:obj:`bool`, defaults to :obj:`False`):
Defines whether this token should strip all potential whitespaces on its right
side. If :obj:`True`, this token will greedily match any whitespace on its right.
It works just like :obj:`lstrip` but on the right.
normalized (:obj:`bool`, defaults to :obj:`True` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
Defines whether this token should match against the normalized version of the input
text. For example, with the added token ``"yesterday"``, and a normalizer in charge of
lowercasing the text, the token could be extract from the input ``"I saw a lion
Yesterday"``.
special (:obj:`bool`, defaults to :obj:`False` with :meth:`~tokenizers.Tokenizer.add_tokens` and :obj:`False` with :meth:`~tokenizers.Tokenizer.add_special_tokens`):
Defines whether this token should be skipped when decoding.
"""
def __init__(self, content, single_word=False, lstrip=False, rstrip=False, normalized=True, special=False):
pass
@property
def content(self):
"""
Get the content of this :obj:`AddedToken`
"""
pass
@property
def lstrip(self):
"""
Get the value of the :obj:`lstrip` option
"""
pass
@property
def normalized(self):
"""
Get the value of the :obj:`normalized` option
"""
pass
@property
def rstrip(self):
"""
Get the value of the :obj:`rstrip` option
"""
pass
@property
def single_word(self):
"""
Get the value of the :obj:`single_word` option
"""
pass
@property
def special(self):
"""
Get the value of the :obj:`special` option
"""
pass
class Encoding:
"""
The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`.
"""
@property
def attention_mask(self):
"""
The attention mask
This indicates to the LM which tokens should be attended to, and which should not.
This is especially important when batching sequences, where we need to applying
padding.
Returns:
:obj:`List[int]`: The attention mask
"""
pass
def char_to_token(self, char_pos, sequence_index=0):
"""
Get the token that contains the char at the given position in the input sequence.
Args:
char_pos (:obj:`int`):
The position of a char in the input string
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target char
Returns:
:obj:`int`: The index of the token that contains this char in the encoded sequence
"""
pass
def char_to_word(self, char_pos, sequence_index=0):
"""
Get the word that contains the char at the given position in the input sequence.
Args:
char_pos (:obj:`int`):
The position of a char in the input string
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target char
Returns:
:obj:`int`: The index of the word that contains this char in the input sequence
"""
pass
@property
def ids(self):
"""
The generated IDs
The IDs are the main input to a Language Model. They are the token indices,
the numerical representations that a LM understands.
Returns:
:obj:`List[int]`: The list of IDs
"""
pass
@staticmethod
def merge(encodings, growing_offsets=True):
"""
Merge the list of encodings into one final :class:`~tokenizers.Encoding`
Args:
encodings (A :obj:`List` of :class:`~tokenizers.Encoding`):
The list of encodings that should be merged in one
growing_offsets (:obj:`bool`, defaults to :obj:`True`):
Whether the offsets should accumulate while merging
Returns:
:class:`~tokenizers.Encoding`: The resulting Encoding
"""
pass
@property
def n_sequences(self):
"""
The number of sequences represented
Returns:
:obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding`
"""
pass
@property
def offsets(self):
"""
The offsets associated to each token
These offsets let's you slice the input string, and thus retrieve the original
part that led to producing the corresponding token.
Returns:
A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets
"""
pass
@property
def overflowing(self):
"""
A :obj:`List` of overflowing :class:`~tokenizers.Encoding`
When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting
the output into as many pieces as required to match the specified maximum length.
This field lets you retrieve all the subsequent pieces.
When you use pairs of sequences, the overflowing pieces will contain enough
variations to cover all the possible combinations, while respecting the provided
maximum length.
"""
pass
def pad(self, length, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]"):
"""
Pad the :class:`~tokenizers.Encoding` at the given length
Args:
length (:obj:`int`):
The desired length
direction: (:obj:`str`, defaults to :obj:`right`):
The expected padding direction. Can be either :obj:`right` or :obj:`left`
pad_id (:obj:`int`, defaults to :obj:`0`):
The ID corresponding to the padding token
pad_type_id (:obj:`int`, defaults to :obj:`0`):
The type ID corresponding to the padding token
pad_token (:obj:`str`, defaults to `[PAD]`):
The pad token to use
"""
pass
@property
def sequence_ids(self):
"""
The generated sequence indices.
They represent the index of the input sequence associated to each token.
The sequence id can be None if the token is not related to any input sequence,
like for example with special tokens.
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index.
"""
pass
def set_sequence_id(self, sequence_id):
"""
Set the given sequence index
Set the given sequence index for the whole range of tokens contained in this
:class:`~tokenizers.Encoding`.
"""
pass
@property
def special_tokens_mask(self):
"""
The special token mask
This indicates which tokens are special tokens, and which are not.
Returns:
:obj:`List[int]`: The special tokens mask
"""
pass
def token_to_chars(self, token_index):
"""
Get the offsets of the token at the given index.
The returned offsets are related to the input sequence that contains the
token. In order to determine in which input sequence it belongs, you
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)`
"""
pass
def token_to_sequence(self, token_index):
"""
Get the index of the sequence represented by the given token.
In the general use case, this method returns :obj:`0` for a single sequence or
the first sequence of a pair, and :obj:`1` for the second sequence of a pair
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`int`: The sequence id of the given token
"""
pass
def token_to_word(self, token_index):
"""
Get the index of the word that contains the token in one of the input sequences.
The returned word index is related to the input sequence that contains
the token. In order to determine in which input sequence it belongs, you
must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
Args:
token_index (:obj:`int`):
The index of a token in the encoded sequence.
Returns:
:obj:`int`: The index of the word in the relevant input sequence.
"""
pass
@property
def tokens(self):
"""
The generated tokens
They are the string representation of the IDs.
Returns:
:obj:`List[str]`: The list of tokens
"""
pass
def truncate(self, max_length, stride=0, direction="right"):
"""
Truncate the :class:`~tokenizers.Encoding` at the given length
If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating
this information is lost. It will be considered as representing a single sequence.
Args:
max_length (:obj:`int`):
The desired length
stride (:obj:`int`, defaults to :obj:`0`):
The length of previous content to be included in each overflowing piece
direction (:obj:`str`, defaults to :obj:`right`):
Truncate direction
"""
pass
@property
def type_ids(self):
"""
The generated type IDs
Generally used for tasks like sequence classification or question answering,
these tokens let the LM know which input sequence corresponds to each tokens.
Returns:
:obj:`List[int]`: The list of type ids
"""
pass
@property
def word_ids(self):
"""
The generated word indices.
They represent the index of the word associated to each token.
When the input is pre-tokenized, they correspond to the ID of the given input label,
otherwise they correspond to the words indices as defined by the
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
For special tokens and such (any token that was generated from something that was
not part of the input), the output is :obj:`None`
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
"""
pass
def word_to_chars(self, word_index, sequence_index=0):
"""
Get the offsets of the word at the given index in one of the input sequences.
Args:
word_index (:obj:`int`):
The index of a word in one of the input sequences.
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target word
Returns:
:obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)`
"""
pass
def word_to_tokens(self, word_index, sequence_index=0):
"""
Get the encoded tokens corresponding to the word at the given index
in one of the input sequences.
Args:
word_index (:obj:`int`):
The index of a word in one of the input sequences.
sequence_index (:obj:`int`, defaults to :obj:`0`):
The index of the sequence that contains the target word
Returns:
:obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)`
"""
pass
@property
def words(self):
"""
The generated word indices.
.. warning::
This is deprecated and will be removed in a future version.
Please use :obj:`~tokenizers.Encoding.word_ids` instead.
They represent the index of the word associated to each token.
When the input is pre-tokenized, they correspond to the ID of the given input label,
otherwise they correspond to the words indices as defined by the
:class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
For special tokens and such (any token that was generated from something that was
not part of the input), the output is :obj:`None`
Returns:
A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
"""
pass
class NormalizedString:
"""
NormalizedString
A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one.
While making all the requested modifications, it keeps track of the alignment information
between the two versions of the string.
Args:
sequence: str:
The string sequence used to initialize this NormalizedString
"""
def append(self, s):
"""
Append the given sequence to the string
"""
pass
def clear(self):
"""
Clears the string
"""
pass
def filter(self, func):
"""
Filter each character of the string using the given func
"""
pass
def for_each(self, func):
"""
Calls the given function for each character of the string
"""
pass
def lowercase(self):
"""
Lowercase the string
"""
pass
def lstrip(self):
"""
Strip the left of the string
"""
pass
def map(self, func):
"""
Calls the given function for each character of the string
Replaces each character of the string using the returned value. Each
returned value **must** be a str of length 1 (ie a character).
"""
pass
def nfc(self):
"""
Runs the NFC normalization
"""
pass
def nfd(self):
"""
Runs the NFD normalization
"""
pass
def nfkc(self):
"""
Runs the NFKC normalization
"""
pass
def nfkd(self):
"""
Runs the NFKD normalization
"""
pass
@property
def normalized(self):
"""
The normalized part of the string
"""
pass
def prepend(self, s):
"""
Prepend the given sequence to the string
"""
pass
def replace(self, pattern, content):
"""
Replace the content of the given pattern with the provided content
Args:
pattern: Pattern:
A pattern used to match the string. Usually a string or a Regex
content: str:
The content to be used as replacement
"""
pass
def rstrip(self):
"""
Strip the right of the string
"""
pass
def slice(self, range):
"""
Slice the string using the given range
"""
pass
def split(self, pattern, behavior):
"""
Split the NormalizedString using the given pattern and the specified behavior
Args:
pattern: Pattern:
A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`
behavior: SplitDelimiterBehavior:
The behavior to use when splitting.
Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
"contiguous"
Returns:
A list of NormalizedString, representing each split
"""
pass
def strip(self):
"""
Strip both ends of the string
"""
pass
def uppercase(self):
"""
Uppercase the string
"""
pass
class PreTokenizedString:
"""
PreTokenizedString
Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the
underlying string, while keeping track of the alignment information (offsets).
The PreTokenizedString manages what we call `splits`. Each split represents a substring
which is a subpart of the original string, with the relevant offsets and tokens.
When calling one of the methods used to modify the PreTokenizedString (namely one of
`split`, `normalize` or `tokenize), only the `splits` that don't have any associated
tokens will get modified.
Args:
sequence: str:
The string sequence used to initialize this PreTokenizedString
"""
def __init__(self, sequence):
pass
def get_splits(self, offset_referential="original", offset_type="char"):
"""
Get the splits currently managed by the PreTokenizedString
Args:
offset_referential: :obj:`str`
Whether the returned splits should have offsets expressed relative
to the original string, or the normalized one. choices: "original", "normalized".
offset_type: :obj:`str`
Whether the returned splits should have offsets expressed in bytes or chars.
When slicing an str, we usually want to use chars, which is the default value.
Now in some cases it might be interesting to get these offsets expressed in bytes,
so it is possible to change this here.
choices: "char", "bytes"
Returns
A list of splits
"""
pass
def normalize(self, func):
"""
Normalize each split of the `PreTokenizedString` using the given `func`
Args:
func: Callable[[NormalizedString], None]:
The function used to normalize each underlying split. This function
does not need to return anything, just calling the methods on the provided
NormalizedString allow its modification.
"""
pass
def split(self, func):
"""
Split the PreTokenizedString using the given `func`
Args:
func: Callable[[index, NormalizedString], List[NormalizedString]]:
The function used to split each underlying split.
It is expected to return a list of `NormalizedString`, that represent the new
splits. If the given `NormalizedString` does not need any splitting, we can
just return it directly.
In order for the offsets to be tracked accurately, any returned `NormalizedString`
should come from calling either `.split` or `.slice` on the received one.
"""
pass
def to_encoding(self, type_id=0, word_idx=None):
"""
Return an Encoding generated from this PreTokenizedString
Args:
type_id: int = 0:
The type_id to be used on the generated Encoding.
word_idx: Optional[int] = None:
An optional word index to be used for each token of this Encoding. If provided,
all the word indices in the generated Encoding will use this value, instead
of the one automatically tracked during pre-tokenization.
Returns:
An Encoding
"""
pass
def tokenize(self, func):
"""
Tokenize each split of the `PreTokenizedString` using the given `func`
Args:
func: Callable[[str], List[Token]]:
The function used to tokenize each underlying split. This function must return
a list of Token generated from the input str.
"""
pass
class Regex:
"""
Instantiate a new Regex with the given pattern
"""
def __init__(self, pattern):
pass
class Token:
pass
class Tokenizer:
"""
A :obj:`Tokenizer` works as a pipeline. It processes some raw text as input
and outputs an :class:`~tokenizers.Encoding`.
Args:
model (:class:`~tokenizers.models.Model`):
The core algorithm that this :obj:`Tokenizer` should be using.
"""
def __init__(self, model):
pass
def add_special_tokens(self, tokens):
"""
Add the given special tokens to the Tokenizer.
If these tokens are already part of the vocabulary, it just let the Tokenizer know about
them. If they don't exist, the Tokenizer creates them, giving them a new id.
These special tokens will never be processed by the model (ie won't be split into
multiple tokens), and they can be removed from the output when decoding.
Args:
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
The list of special tokens we want to add to the vocabulary. Each token can either
be a string or an instance of :class:`~tokenizers.AddedToken` for more
customization.
Returns:
:obj:`int`: The number of tokens that were created in the vocabulary
"""
pass
def add_tokens(self, tokens):
"""
Add the given tokens to the vocabulary
The given tokens are added only if they don't already exist in the vocabulary.
Each token then gets a new attributed id.
Args:
tokens (A :obj:`List` of :class:`~tokenizers.AddedToken` or :obj:`str`):
The list of tokens we want to add to the vocabulary. Each token can be either a
string or an instance of :class:`~tokenizers.AddedToken` for more customization.
Returns:
:obj:`int`: The number of tokens that were created in the vocabulary
"""
pass
def decode(self, ids, skip_special_tokens=True):
"""
Decode the given list of ids back to a string
This is used to decode anything coming back from a Language Model
Args:
ids (A :obj:`List/Tuple` of :obj:`int`):
The list of ids that we want to decode
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether the special tokens should be removed from the decoded string
Returns:
:obj:`str`: The decoded string
"""
pass
def decode_batch(self, sequences, skip_special_tokens=True):
"""
Decode a batch of ids back to their corresponding string
Args:
sequences (:obj:`List` of :obj:`List[int]`):
The batch of sequences we want to decode
skip_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether the special tokens should be removed from the decoded strings
Returns:
:obj:`List[str]`: A list of decoded strings
"""
pass
@property
def decoder(self):
"""
The `optional` :class:`~tokenizers.decoders.Decoder` in use by the Tokenizer
"""
pass
def enable_padding(
self, direction="right", pad_id=0, pad_type_id=0, pad_token="[PAD]", length=None, pad_to_multiple_of=None
):
"""
Enable the padding
Args:
direction (:obj:`str`, `optional`, defaults to :obj:`right`):
The direction in which to pad. Can be either ``right`` or ``left``
pad_to_multiple_of (:obj:`int`, `optional`):
If specified, the padding length should always snap to the next multiple of the
given value. For example if we were going to pad witha length of 250 but
``pad_to_multiple_of=8`` then we will pad to 256.
pad_id (:obj:`int`, defaults to 0):
The id to be used when padding
pad_type_id (:obj:`int`, defaults to 0):
The type id to be used when padding
pad_token (:obj:`str`, defaults to :obj:`[PAD]`):
The pad token to be used when padding
length (:obj:`int`, `optional`):
If specified, the length at which to pad. If not specified we pad using the size of
the longest sequence in a batch.
"""
pass
def enable_truncation(self, max_length, stride=0, strategy="longest_first", direction="right"):
"""
Enable truncation
Args:
max_length (:obj:`int`):
The max length at which to truncate
stride (:obj:`int`, `optional`):
The length of the previous first sequence to be included in the overflowing
sequence
strategy (:obj:`str`, `optional`, defaults to :obj:`longest_first`):
The strategy used to truncation. Can be one of ``longest_first``, ``only_first`` or
``only_second``.
direction (:obj:`str`, defaults to :obj:`right`):
Truncate direction
"""
pass
def encode(self, sequence, pair=None, is_pretokenized=False, add_special_tokens=True):
"""
Encode the given sequence and pair. This method can process raw text sequences
as well as already pre-tokenized sequences.
Example:
Here are some examples of the inputs that are accepted::
encode("A single sequence")`
encode("A sequence", "And its pair")`
encode([ "A", "pre", "tokenized", "sequence" ], is_pretokenized=True)`
encode(
[ "A", "pre", "tokenized", "sequence" ], [ "And", "its", "pair" ],
is_pretokenized=True
)
Args:
sequence (:obj:`~tokenizers.InputSequence`):
The main input sequence we want to encode. This sequence can be either raw
text or pre-tokenized, according to the ``is_pretokenized`` argument:
- If ``is_pretokenized=False``: :class:`~tokenizers.TextInputSequence`
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedInputSequence`
pair (:obj:`~tokenizers.InputSequence`, `optional`):
An optional input sequence. The expected format is the same that for ``sequence``.
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Whether the input is already pre-tokenized
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to add the special tokens
Returns:
:class:`~tokenizers.Encoding`: The encoded result
"""
pass
def encode_batch(self, input, is_pretokenized=False, add_special_tokens=True):
"""
Encode the given batch of inputs. This method accept both raw text sequences
as well as already pre-tokenized sequences.
Example:
Here are some examples of the inputs that are accepted::
encode_batch([
"A single sequence",
("A tuple with a sequence", "And its pair"),
[ "A", "pre", "tokenized", "sequence" ],
([ "A", "pre", "tokenized", "sequence" ], "And its pair")
])
Args:
input (A :obj:`List`/:obj:`Tuple` of :obj:`~tokenizers.EncodeInput`):
A list of single sequences or pair sequences to encode. Each sequence
can be either raw text or pre-tokenized, according to the ``is_pretokenized``
argument:
- If ``is_pretokenized=False``: :class:`~tokenizers.TextEncodeInput`
- If ``is_pretokenized=True``: :class:`~tokenizers.PreTokenizedEncodeInput`
is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
Whether the input is already pre-tokenized
add_special_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to add the special tokens
Returns:
A :obj:`List` of :class:`~tokenizers.Encoding`: The encoded batch
"""
pass
@staticmethod
def from_buffer(buffer):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the given buffer.
Args:
buffer (:obj:`bytes`):
A buffer containing a previously serialized :class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_file(path):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the file at the given path.
Args:
path (:obj:`str`):
A path to a local JSON file representing a previously serialized
:class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_pretrained(identifier, revision="main", auth_token=None):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from an existing file on the
Hugging Face Hub.
Args:
identifier (:obj:`str`):
The identifier of a Model on the Hugging Face Hub, that contains
a tokenizer.json file
revision (:obj:`str`, defaults to `main`):
A branch or commit id
auth_token (:obj:`str`, `optional`, defaults to `None`):
An optional auth token used to access private repositories on the
Hugging Face Hub
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
@staticmethod
def from_str(json):
"""
Instantiate a new :class:`~tokenizers.Tokenizer` from the given JSON string.
Args:
json (:obj:`str`):
A valid JSON string representing a previously serialized
:class:`~tokenizers.Tokenizer`
Returns:
:class:`~tokenizers.Tokenizer`: The new tokenizer
"""
pass
def get_added_tokens_decoder(self):
"""
Get the underlying vocabulary
Returns:
:obj:`Dict[int, AddedToken]`: The vocabulary
"""
pass
def get_vocab(self, with_added_tokens=True):
"""
Get the underlying vocabulary
Args:
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to include the added tokens
Returns:
:obj:`Dict[str, int]`: The vocabulary
"""
pass
def get_vocab_size(self, with_added_tokens=True):
"""
Get the size of the underlying vocabulary
Args:
with_added_tokens (:obj:`bool`, defaults to :obj:`True`):
Whether to include the added tokens
Returns:
:obj:`int`: The size of the vocabulary
"""
pass
def id_to_token(self, id):
"""
Convert the given id to its corresponding token if it exists
Args:
id (:obj:`int`):
The id to convert
Returns:
:obj:`Optional[str]`: An optional token, :obj:`None` if out of vocabulary
"""
pass
@property
def model(self):
"""
The :class:`~tokenizers.models.Model` in use by the Tokenizer
"""
pass
def no_padding(self):
"""
Disable padding
"""
pass
def no_truncation(self):
"""
Disable truncation
"""
pass
@property
def normalizer(self):
"""
The `optional` :class:`~tokenizers.normalizers.Normalizer` in use by the Tokenizer
"""
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
:param is_pair: Boolean indicating if the input would be a single sentence or a pair
:return:
"""
pass
@property
def padding(self):
"""
Get the current padding parameters
`Cannot be set, use` :meth:`~tokenizers.Tokenizer.enable_padding` `instead`
Returns:
(:obj:`dict`, `optional`):
A dict with the current padding parameters if padding is enabled
"""
pass
def post_process(self, encoding, pair=None, add_special_tokens=True):
"""
Apply all the post-processing steps to the given encodings.
The various steps are:
1. Truncate according to the set truncation params (provided with
:meth:`~tokenizers.Tokenizer.enable_truncation`)
2. Apply the :class:`~tokenizers.processors.PostProcessor`
3. Pad according to the set padding params (provided with
:meth:`~tokenizers.Tokenizer.enable_padding`)
Args:
encoding (:class:`~tokenizers.Encoding`):
The :class:`~tokenizers.Encoding` corresponding to the main sequence.
pair (:class:`~tokenizers.Encoding`, `optional`):
An optional :class:`~tokenizers.Encoding` corresponding to the pair sequence.
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Returns:
:class:`~tokenizers.Encoding`: The final post-processed encoding
"""
pass
@property
def post_processor(self):
"""
The `optional` :class:`~tokenizers.processors.PostProcessor` in use by the Tokenizer
"""
pass
@property
def pre_tokenizer(self):
"""
The `optional` :class:`~tokenizers.pre_tokenizers.PreTokenizer` in use by the Tokenizer
"""
pass
def save(self, path, pretty=True):
"""
Save the :class:`~tokenizers.Tokenizer` to the file at the given path.
Args:
path (:obj:`str`):
A path to a file in which to save the serialized tokenizer.
pretty (:obj:`bool`, defaults to :obj:`True`):
Whether the JSON file should be pretty formatted.
"""
pass
def to_str(self, pretty=False):
"""
Gets a serialized string representing this :class:`~tokenizers.Tokenizer`.
Args:
pretty (:obj:`bool`, defaults to :obj:`False`):
Whether the JSON string should be pretty formatted.
Returns:
:obj:`str`: A string representing the serialized Tokenizer
"""
pass
def token_to_id(self, token):
"""
Convert the given token to its corresponding id if it exists
Args:
token (:obj:`str`):
The token to convert
Returns:
:obj:`Optional[int]`: An optional id, :obj:`None` if out of vocabulary
"""
pass
def train(self, files, trainer=None):
"""
Train the Tokenizer using the given files.
Reads the files line by line, while keeping all the whitespace, even new lines.
If you want to train from data store in-memory, you can check
:meth:`~tokenizers.Tokenizer.train_from_iterator`
Args:
files (:obj:`List[str]`):
A list of path to the files that we should use for training
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
An optional trainer that should be used to train our Model
"""
pass
def train_from_iterator(self, iterator, trainer=None, length=None):
"""
Train the Tokenizer using the provided iterator.
You can provide anything that is a Python Iterator
* A list of sequences :obj:`List[str]`
* A generator that yields :obj:`str` or :obj:`List[str]`
* A Numpy array of strings
* ...
Args:
iterator (:obj:`Iterator`):
Any iterator over strings or list of strings
trainer (:obj:`~tokenizers.trainers.Trainer`, `optional`):
An optional trainer that should be used to train our Model
length (:obj:`int`, `optional`):
The total number of sequences in the iterator. This is used to
provide meaningful progress tracking
"""
pass
@property
def truncation(self):
"""
Get the currently set truncation parameters
`Cannot set, use` :meth:`~tokenizers.Tokenizer.enable_truncation` `instead`
Returns:
(:obj:`dict`, `optional`):
A dict with the current truncation parameters if truncation is enabled
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.py | from enum import Enum
from typing import List, Tuple, Union
Offsets = Tuple[int, int]
TextInputSequence = str
"""A :obj:`str` that represents an input sequence """
PreTokenizedInputSequence = Union[List[str], Tuple[str]]
"""A pre-tokenized input sequence. Can be one of:
- A :obj:`List` of :obj:`str`
- A :obj:`Tuple` of :obj:`str`
"""
TextEncodeInput = Union[
TextInputSequence,
Tuple[TextInputSequence, TextInputSequence],
List[TextInputSequence],
]
"""Represents a textual input for encoding. Can be either:
- A single sequence: :data:`~tokenizers.TextInputSequence`
- A pair of sequences:
- A :obj:`Tuple` of :data:`~tokenizers.TextInputSequence`
- Or a :obj:`List` of :data:`~tokenizers.TextInputSequence` of size 2
"""
PreTokenizedEncodeInput = Union[
PreTokenizedInputSequence,
Tuple[PreTokenizedInputSequence, PreTokenizedInputSequence],
List[PreTokenizedInputSequence],
]
"""Represents a pre-tokenized input for encoding. Can be either:
- A single sequence: :data:`~tokenizers.PreTokenizedInputSequence`
- A pair of sequences:
- A :obj:`Tuple` of :data:`~tokenizers.PreTokenizedInputSequence`
- Or a :obj:`List` of :data:`~tokenizers.PreTokenizedInputSequence` of size 2
"""
InputSequence = Union[TextInputSequence, PreTokenizedInputSequence]
"""Represents all the possible types of input sequences for encoding. Can be:
- When ``is_pretokenized=False``: :data:`~TextInputSequence`
- When ``is_pretokenized=True``: :data:`~PreTokenizedInputSequence`
"""
EncodeInput = Union[TextEncodeInput, PreTokenizedEncodeInput]
"""Represents all the possible types of input for encoding. Can be:
- When ``is_pretokenized=False``: :data:`~TextEncodeInput`
- When ``is_pretokenized=True``: :data:`~PreTokenizedEncodeInput`
"""
class OffsetReferential(Enum):
ORIGINAL = "original"
NORMALIZED = "normalized"
class OffsetType(Enum):
BYTE = "byte"
CHAR = "char"
class SplitDelimiterBehavior(Enum):
REMOVED = "removed"
ISOLATED = "isolated"
MERGED_WITH_PREVIOUS = "merged_with_previous"
MERGED_WITH_NEXT = "merged_with_next"
CONTIGUOUS = "contiguous"
from .tokenizers import (
AddedToken,
Encoding,
NormalizedString,
PreTokenizedString,
Regex,
Token,
Tokenizer,
decoders,
models,
normalizers,
pre_tokenizers,
processors,
trainers,
__version__,
)
from .implementations import (
BertWordPieceTokenizer,
ByteLevelBPETokenizer,
CharBPETokenizer,
SentencePieceBPETokenizer,
SentencePieceUnigramTokenizer,
)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.pyi | # Generated content DO NOT EDIT
class Normalizer:
"""
Base class for all normalizers
This class is not supposed to be instantiated directly. Instead, any implementation of a
Normalizer will return an instance of this class when instantiated.
"""
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class BertNormalizer(Normalizer):
"""
BertNormalizer
Takes care of normalizing raw text before giving it to a Bert model.
This includes cleaning the text, handling accents, chinese chars and lowercasing
Args:
clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to clean the text, by removing any control characters
and replacing all whitespaces by the classic one.
handle_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to handle chinese chars by putting spaces around them.
strip_accents (:obj:`bool`, `optional`):
Whether to strip all accents. If this option is not specified (ie == None),
then it will be determined by the value for `lowercase` (as in the original Bert).
lowercase (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to lowercase.
"""
def __init__(self, clean_text=True, handle_chinese_chars=True, strip_accents=None, lowercase=True):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Lowercase(Normalizer):
"""
Lowercase Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFC(Normalizer):
"""
NFC Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFD(Normalizer):
"""
NFD Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFKC(Normalizer):
"""
NFKC Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class NFKD(Normalizer):
"""
NFKD Unicode Normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Nmt(Normalizer):
"""
Nmt normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Precompiled(Normalizer):
"""
Precompiled normalizer
Don't use manually it is used for compatiblity for SentencePiece.
"""
def __init__(self, precompiled_charsmap):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Prepend(Normalizer):
"""
Prepend normalizer
"""
def __init__(self, prepend):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Replace(Normalizer):
"""
Replace normalizer
"""
def __init__(self, pattern, content):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Sequence(Normalizer):
"""
Allows concatenating multiple other Normalizer as a Sequence.
All the normalizers run in sequence in the given order
Args:
normalizers (:obj:`List[Normalizer]`):
A list of Normalizer to be run as a sequence
"""
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class Strip(Normalizer):
"""
Strip normalizer
"""
def __init__(self, left=True, right=True):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
class StripAccents(Normalizer):
"""
StripAccents normalizer
"""
def __init__(self):
pass
def normalize(self, normalized):
"""
Normalize a :class:`~tokenizers.NormalizedString` in-place
This method allows to modify a :class:`~tokenizers.NormalizedString` to
keep track of the alignment information. If you just want to see the result
of the normalization on a raw string, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize_str`
Args:
normalized (:class:`~tokenizers.NormalizedString`):
The normalized string on which to apply this
:class:`~tokenizers.normalizers.Normalizer`
"""
pass
def normalize_str(self, sequence):
"""
Normalize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.normalizers.Normalizer` but it does not keep track of the alignment
information. If you need to get/convert offsets, you can use
:meth:`~tokenizers.normalizers.Normalizer.normalize`
Args:
sequence (:obj:`str`):
A string to normalize
Returns:
:obj:`str`: A string after normalization
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.py | from .. import normalizers
Normalizer = normalizers.Normalizer
BertNormalizer = normalizers.BertNormalizer
NFD = normalizers.NFD
NFKD = normalizers.NFKD
NFC = normalizers.NFC
NFKC = normalizers.NFKC
Sequence = normalizers.Sequence
Lowercase = normalizers.Lowercase
Prepend = normalizers.Prepend
Strip = normalizers.Strip
StripAccents = normalizers.StripAccents
Nmt = normalizers.Nmt
Precompiled = normalizers.Precompiled
Replace = normalizers.Replace
NORMALIZERS = {"nfc": NFC, "nfd": NFD, "nfkc": NFKC, "nfkd": NFKD}
def unicode_normalizer_from_str(normalizer: str) -> Normalizer:
if normalizer not in NORMALIZERS:
raise ValueError(
"{} is not a known unicode normalizer. Available are {}".format(normalizer, NORMALIZERS.keys())
)
return NORMALIZERS[normalizer]()
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css | .tokenized-text {
width:100%;
padding:2rem;
max-height: 400px;
overflow-y: auto;
box-sizing:border-box;
line-height:4rem; /* Lots of space between lines */
font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace;
box-shadow: 2px 2px 2px rgba(0,0,0,0.2);
background-color: rgba(0,0,0,0.01);
letter-spacing:2px; /* Give some extra separation between chars */
}
.non-token{
/* White space and other things the tokenizer ignores*/
white-space: pre;
letter-spacing:4px;
border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/
border-bottom:1px solid #A0A0A0;
line-height: 1rem;
height: calc(100% - 2px);
}
.token {
white-space: pre;
position:relative;
color:black;
letter-spacing:2px;
}
.annotation{
white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */
border-radius:4px;
position:relative;
width:fit-content;
}
.annotation:before {
/*The before holds the text and the after holds the background*/
z-index:1000; /* Make sure this is above the background */
content:attr(data-label); /* The annotations label is on a data attribute */
color:white;
position:absolute;
font-size:1rem;
text-align:center;
font-weight:bold;
top:1.75rem;
line-height:0;
left:0;
width:100%;
padding:0.5rem 0;
/* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/
overflow: hidden;
white-space: nowrap;
text-overflow:ellipsis;
}
.annotation:after {
content:attr(data-label); /* The content defines the width of the annotation*/
position:absolute;
font-size:0.75rem;
text-align:center;
font-weight:bold;
text-overflow:ellipsis;
top:1.75rem;
line-height:0;
overflow: hidden;
white-space: nowrap;
left:0;
width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
padding:0.5rem 0;
/* Nast hack below:
We set the annotations color in code because we don't know the colors at css time.
But you can't pass a color as a data attribute to get it into the pseudo element (this thing)
So to get around that, annotations have the color set on them with a style attribute and then we
can get the color with currentColor.
Annotations wrap tokens and tokens set the color back to black
*/
background-color: currentColor;
}
.annotation:hover::after, .annotation:hover::before{
/* When the user hovers over an annotation expand the label to display in full
*/
min-width: fit-content;
}
.annotation:hover{
/* Emphasize the annotation start end with a border on hover*/
border-color: currentColor;
border: 2px solid;
}
.special-token:not(:empty){
/*
A none empty special token is like UNK (as opposed to CLS which has no representation in the text )
*/
position:relative;
}
.special-token:empty::before{
/* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/
content:attr(data-stok);
background:#202020;
font-size:0.75rem;
color:white;
margin: 0 0.25rem;
padding: 0.25rem;
border-radius:4px
}
.special-token:not(:empty):before {
/* Special tokens that have text (UNK) are displayed above the actual text*/
content:attr(data-stok);
position:absolute;
bottom:1.75rem;
min-width:100%;
width:100%;
height:1rem;
line-height:1rem;
font-size:1rem;
text-align:center;
color:white;
font-weight:bold;
background:#202020;
border-radius:10%;
}
/*
We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations
instead we apply even and odd class at generation time and color them that way
*/
.even-token{
background:#DCDCDC ;
border: 1px solid #DCDCDC;
}
.odd-token{
background:#A0A0A0;
border: 1px solid #A0A0A0;
}
.even-token.multi-token,.odd-token.multi-token{
background: repeating-linear-gradient(
45deg,
transparent,
transparent 1px,
#ccc 1px,
#ccc 1px
),
/* on "bottom" */
linear-gradient(
to bottom,
#FFB6C1,
#999
);
}
.multi-token:hover::after {
content:"This char has more than 1 token"; /* The content defines the width of the annotation*/
color:white;
background-color: black;
position:absolute;
font-size:0.75rem;
text-align:center;
font-weight:bold;
text-overflow:ellipsis;
top:1.75rem;
line-height:0;
overflow: hidden;
white-space: nowrap;
left:0;
width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/
padding:0.5rem 0;
}
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/__init__.py | from .visualizer import Annotation, EncodingVisualizer
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer.py | import itertools
import os
import re
from string import Template
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple
from tokenizers import Encoding, Tokenizer
dirname = os.path.dirname(__file__)
css_filename = os.path.join(dirname, "visualizer-styles.css")
with open(css_filename) as f:
css = f.read()
class Annotation:
start: int
end: int
label: int
def __init__(self, start: int, end: int, label: str):
self.start = start
self.end = end
self.label = label
AnnotationList = List[Annotation]
PartialIntList = List[Optional[int]]
class CharStateKey(NamedTuple):
token_ix: Optional[int]
anno_ix: Optional[int]
class CharState:
char_ix: Optional[int]
def __init__(self, char_ix):
self.char_ix = char_ix
self.anno_ix: Optional[int] = None
self.tokens: List[int] = []
@property
def token_ix(self):
return self.tokens[0] if len(self.tokens) > 0 else None
@property
def is_multitoken(self):
"""
BPE tokenizers can output more than one token for a char
"""
return len(self.tokens) > 1
def partition_key(self) -> CharStateKey:
return CharStateKey(
token_ix=self.token_ix,
anno_ix=self.anno_ix,
)
class Aligned:
pass
class EncodingVisualizer:
"""
Build an EncodingVisualizer
Args:
tokenizer (:class:`~tokenizers.Tokenizer`):
A tokenizer instance
default_to_notebook (:obj:`bool`):
Whether to render html output in a notebook by default
annotation_converter (:obj:`Callable`, `optional`):
An optional (lambda) function that takes an annotation in any format and returns
an Annotation object
"""
unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE)
def __init__(
self,
tokenizer: Tokenizer,
default_to_notebook: bool = True,
annotation_converter: Optional[Callable[[Any], Annotation]] = None,
):
if default_to_notebook:
try:
from IPython.core.display import HTML, display
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?
You can also pass `default_to_notebook=False` to get back raw HTML
"""
)
self.tokenizer = tokenizer
self.default_to_notebook = default_to_notebook
self.annotation_coverter = annotation_converter
pass
def __call__(
self,
text: str,
annotations: AnnotationList = [],
default_to_notebook: Optional[bool] = None,
) -> Optional[str]:
"""
Build a visualization of the given text
Args:
text (:obj:`str`):
The text to tokenize
annotations (:obj:`List[Annotation]`, `optional`):
An optional list of annotations of the text. The can either be an annotation class
or anything else if you instantiated the visualizer with a converter function
default_to_notebook (:obj:`bool`, `optional`, defaults to `False`):
If True, will render the html in a notebook. Otherwise returns an html string.
Returns:
The HTML string if default_to_notebook is False, otherwise (default) returns None and
renders the HTML in the notebook
"""
final_default_to_notebook = self.default_to_notebook
if default_to_notebook is not None:
final_default_to_notebook = default_to_notebook
if final_default_to_notebook:
try:
from IPython.core.display import HTML, display
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?"""
)
if self.annotation_coverter is not None:
annotations = list(map(self.annotation_coverter, annotations))
encoding = self.tokenizer.encode(text)
html = EncodingVisualizer.__make_html(text, encoding, annotations)
if final_default_to_notebook:
display(HTML(html))
else:
return html
@staticmethod
def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]:
"""
Generates a color palette for all the labels in a given set of annotations
Args:
annotations (:obj:`Annotation`):
A list of annotations
Returns:
:obj:`dict`: A dictionary mapping labels to colors in HSL format
"""
if len(annotations) == 0:
return {}
labels = set(map(lambda x: x.label, annotations))
num_labels = len(labels)
h_step = int(255 / num_labels)
if h_step < 20:
h_step = 20
s = 32
l = 64
h = 10
colors = {}
for label in sorted(labels): # sort so we always get the same colors for a given set of labels
colors[label] = f"hsl({h},{s}%,{l}%"
h += h_step
return colors
@staticmethod
def consecutive_chars_to_html(
consecutive_chars_list: List[CharState],
text: str,
encoding: Encoding,
):
"""
Converts a list of "consecutive chars" into a single HTML element.
Chars are consecutive if they fall under the same word, token and annotation.
The CharState class is a named tuple with a "partition_key" method that makes it easy to
compare if two chars are consecutive.
Args:
consecutive_chars_list (:obj:`List[CharState]`):
A list of CharStates that have been grouped together
text (:obj:`str`):
The original text being processed
encoding (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`str`: The HTML span for a set of consecutive chars
"""
first = consecutive_chars_list[0]
if first.char_ix is None:
# its a special token
stoken = encoding.tokens[first.token_ix]
# special tokens are represented as empty spans. We use the data attribute and css
# magic to display it
return f'<span class="special-token" data-stoken={stoken}></span>'
# We're not in a special token so this group has a start and end.
last = consecutive_chars_list[-1]
start = first.char_ix
end = last.char_ix + 1
span_text = text[start:end]
css_classes = [] # What css classes will we apply on the resulting span
data_items = {} # What data attributes will we apply on the result span
if first.token_ix is not None:
# We can either be in a token or not (e.g. in white space)
css_classes.append("token")
if first.is_multitoken:
css_classes.append("multi-token")
if first.token_ix % 2:
# We use this to color alternating tokens.
# A token might be split by an annotation that ends in the middle of it, so this
# lets us visually indicate a consecutive token despite its possible splitting in
# the html markup
css_classes.append("odd-token")
else:
# Like above, but a different color so we can see the tokens alternate
css_classes.append("even-token")
if EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix]) is not None:
# This is a special token that is in the text. probably UNK
css_classes.append("special-token")
# TODO is this the right name for the data attribute ?
data_items["stok"] = encoding.tokens[first.token_ix]
else:
# In this case we are looking at a group/single char that is not tokenized.
# e.g. white space
css_classes.append("non-token")
css = f'''class="{' '.join(css_classes)}"'''
data = ""
for key, val in data_items.items():
data += f' data-{key}="{val}"'
return f"<span {css} {data} >{span_text}</span>"
@staticmethod
def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str:
char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations)
current_consecutive_chars = [char_states[0]]
prev_anno_ix = char_states[0].anno_ix
spans = []
label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations)
cur_anno_ix = char_states[0].anno_ix
if cur_anno_ix is not None:
# If we started in an annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
for cs in char_states[1:]:
cur_anno_ix = cs.anno_ix
if cur_anno_ix != prev_anno_ix:
# If we've transitioned in or out of an annotation
spans.append(
# Create a span from the current consecutive characters
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
current_consecutive_chars = [cs]
if prev_anno_ix is not None:
# if we transitioned out of an annotation close it's span
spans.append("</span>")
if cur_anno_ix is not None:
# If we entered a new annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
prev_anno_ix = cur_anno_ix
if cs.partition_key() == current_consecutive_chars[0].partition_key():
# If the current charchter is in the same "group" as the previous one
current_consecutive_chars.append(cs)
else:
# Otherwise we make a span for the previous group
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
# An reset the consecutive_char_list to form a new group
current_consecutive_chars = [cs]
# All that's left is to fill out the final span
# TODO I think there is an edge case here where an annotation's span might not close
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
res = HTMLBody(spans) # Send the list of spans to the body of our html
return res
@staticmethod
def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList:
"""
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`AnnotationList`):
A (possibly empty) list of annotations
Returns:
A list of length len(text) whose entry at index i is None if there is no annotation on
charachter i or k, the index of the annotation that covers index i where k is with
respect to the list of annotations
"""
annotation_map = [None] * len(text)
for anno_ix, a in enumerate(annotations):
for i in range(a.start, a.end):
annotation_map[i] = anno_ix
return annotation_map
@staticmethod
def __make_char_states(text: str, encoding: Encoding, annotations: AnnotationList) -> List[CharState]:
"""
For each character in the original text, we emit a tuple representing it's "state":
* which token_ix it corresponds to
* which word_ix it corresponds to
* which annotation_ix it corresponds to
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`List[Annotation]`):
A (possibly empty) list of annotations
encoding: (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what
it's state is
"""
annotation_map = EncodingVisualizer.__make_anno_map(text, annotations)
# Todo make this a dataclass or named tuple
char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))]
for token_ix, token in enumerate(encoding.tokens):
offsets = encoding.token_to_chars(token_ix)
if offsets is not None:
start, end = offsets
for i in range(start, end):
char_states[i].tokens.append(token_ix)
for char_ix, anno_ix in enumerate(annotation_map):
char_states[char_ix].anno_ix = anno_ix
return char_states
def HTMLBody(children: List[str], css_styles=css) -> str:
"""
Generates the full html with css from a list of html spans
Args:
children (:obj:`List[str]`):
A list of strings, assumed to be html elements
css_styles (:obj:`str`, `optional`):
Optional alternative implementation of the css
Returns:
:obj:`str`: An HTML string with style markup
"""
children_text = "".join(children)
return f"""
<html>
<head>
<style>
{css_styles}
</style>
</head>
<body>
<div class="tokenized-text" dir=auto>
{children_text}
</div>
</body>
</html>
"""
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi | # Generated content DO NOT EDIT
class Model:
"""
Base class for all models
The model represents the actual tokenization algorithm. This is the part that
will contain and manage the learned vocabulary.
This class cannot be constructed directly. Please use one of the concrete models.
"""
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class BPE(Model):
"""
An implementation of the BPE (Byte-Pair Encoding) algorithm
Args:
vocab (:obj:`Dict[str, int]`, `optional`):
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
merges (:obj:`List[Tuple[str, str]]`, `optional`):
A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]`
cache_capacity (:obj:`int`, `optional`):
The number of words that the BPE cache can contain. The cache allows
to speed-up the process by keeping the result of the merge operations
for a number of words.
dropout (:obj:`float`, `optional`):
A float between 0 and 1 that represents the BPE dropout to use.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
continuing_subword_prefix (:obj:`str`, `optional`):
The prefix to attach to subword units that don't represent a beginning of word.
end_of_word_suffix (:obj:`str`, `optional`):
The suffix to attach to subword units that represent an end of word.
fuse_unk (:obj:`bool`, `optional`):
Whether to fuse any subsequent unknown tokens into a single one
byte_fallback (:obj:`bool`, `optional`):
Whether to use spm byte-fallback trick (defaults to False)
"""
def __init__(
self,
vocab=None,
merges=None,
cache_capacity=None,
dropout=None,
unk_token=None,
continuing_subword_prefix=None,
end_of_word_suffix=None,
fuse_unk=None,
byte_fallback=False,
):
pass
@staticmethod
def from_file(cls, vocab, merge, **kwargs):
"""
Instantiate a BPE model from the given files.
This method is roughly equivalent to doing::
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
bpe = BPE(vocab, merges)
If you don't need to keep the :obj:`vocab, merges` values lying around,
this method is more optimized than manually calling
:meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE`
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
merges (:obj:`str`):
The path to a :obj:`merges.txt` file
Returns:
:class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files
"""
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
@staticmethod
def read_file(self, vocab, merges):
"""
Read a :obj:`vocab.json` and a :obj:`merges.txt` files
This method provides a way to read and parse the content of these files,
returning the relevant data structures. If you want to instantiate some BPE models
from memory, this method gives you the expected input from the standard files.
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
merges (:obj:`str`):
The path to a :obj:`merges.txt` file
Returns:
A :obj:`Tuple` with the vocab and the merges:
The vocabulary and merges loaded into memory
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class Unigram(Model):
"""
An implementation of the Unigram algorithm
Args:
vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`):
A list of vocabulary items and their relative score [("am", -0.2442),...]
"""
def __init__(self, vocab, unk_id, byte_fallback):
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class WordLevel(Model):
"""
An implementation of the WordLevel algorithm
Most simple tokenizer model based on mapping tokens to their corresponding id.
Args:
vocab (:obj:`str`, `optional`):
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
"""
def __init__(self, vocab, unk_token):
pass
@staticmethod
def from_file(vocab, unk_token):
"""
Instantiate a WordLevel model from the given file
This method is roughly equivalent to doing::
vocab = WordLevel.read_file(vocab_filename)
wordlevel = WordLevel(vocab)
If you don't need to keep the :obj:`vocab` values lying around, this method is
more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to
initialize a :class:`~tokenizers.models.WordLevel`
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
Returns:
:class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file
"""
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
@staticmethod
def read_file(vocab):
"""
Read a :obj:`vocab.json`
This method provides a way to read and parse the content of a vocabulary file,
returning the relevant data structures. If you want to instantiate some WordLevel models
from memory, this method gives you the expected input from the standard files.
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.json` file
Returns:
:obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
class WordPiece(Model):
"""
An implementation of the WordPiece algorithm
Args:
vocab (:obj:`Dict[str, int]`, `optional`):
A dictionnary of string keys and their ids :obj:`{"am": 0,...}`
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
max_input_chars_per_word (:obj:`int`, `optional`):
The maximum number of characters to authorize in a single word.
"""
def __init__(self, vocab, unk_token, max_input_chars_per_word):
pass
@staticmethod
def from_file(vocab, **kwargs):
"""
Instantiate a WordPiece model from the given file
This method is roughly equivalent to doing::
vocab = WordPiece.read_file(vocab_filename)
wordpiece = WordPiece(vocab)
If you don't need to keep the :obj:`vocab` values lying around, this method is
more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to
initialize a :class:`~tokenizers.models.WordPiece`
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.txt` file
Returns:
:class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file
"""
pass
def get_trainer(self):
"""
Get the associated :class:`~tokenizers.trainers.Trainer`
Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this
:class:`~tokenizers.models.Model`.
Returns:
:class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model
"""
pass
def id_to_token(self, id):
"""
Get the token associated to an ID
Args:
id (:obj:`int`):
An ID to convert to a token
Returns:
:obj:`str`: The token associated to the ID
"""
pass
@staticmethod
def read_file(vocab):
"""
Read a :obj:`vocab.txt` file
This method provides a way to read and parse the content of a standard `vocab.txt`
file as used by the WordPiece Model, returning the relevant data structures. If you
want to instantiate some WordPiece models from memory, this method gives you the
expected input from the standard files.
Args:
vocab (:obj:`str`):
The path to a :obj:`vocab.txt` file
Returns:
:obj:`Dict[str, int]`: The vocabulary as a :obj:`dict`
"""
pass
def save(self, folder, prefix):
"""
Save the current model
Save the current model in the given folder, using the given prefix for the various
files that will get created.
Any file with the same name that already exists in this folder will be overwritten.
Args:
folder (:obj:`str`):
The path to the target folder in which to save the various files
prefix (:obj:`str`, `optional`):
An optional prefix, used to prefix each file name
Returns:
:obj:`List[str]`: The list of saved files
"""
pass
def token_to_id(self, tokens):
"""
Get the ID associated to a token
Args:
token (:obj:`str`):
A token to convert to an ID
Returns:
:obj:`int`: The ID associated to the token
"""
pass
def tokenize(self, sequence):
"""
Tokenize a sequence
Args:
sequence (:obj:`str`):
A sequence to tokenize
Returns:
A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.py | # Generated content DO NOT EDIT
from .. import models
Model = models.Model
BPE = models.BPE
Unigram = models.Unigram
WordLevel = models.WordLevel
WordPiece = models.WordPiece
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.pyi | # Generated content DO NOT EDIT
class Trainer:
"""
Base class for all trainers
This class is not supposed to be instantiated directly. Instead, any implementation of a
Trainer will return an instance of this class when instantiated.
"""
class BpeTrainer(Trainer):
"""
Trainer capable of training a BPE model
Args:
vocab_size (:obj:`int`, `optional`):
The size of the final vocabulary, including all tokens and alphabet.
min_frequency (:obj:`int`, `optional`):
The minimum frequency a pair should have in order to be merged.
show_progress (:obj:`bool`, `optional`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
limit_alphabet (:obj:`int`, `optional`):
The maximum different characters to keep in the alphabet.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
continuing_subword_prefix (:obj:`str`, `optional`):
A prefix to be used for every subword that is not a beginning-of-word.
end_of_word_suffix (:obj:`str`, `optional`):
A suffix to be used for every subword that is a end-of-word.
max_token_length (:obj:`int`, `optional`):
Prevents creating tokens longer than the specified size.
This can help with reducing polluting your vocabulary with
highly repetitive tokens like `======` for wikipedia
"""
class UnigramTrainer(Trainer):
"""
Trainer capable of training a Unigram model
Args:
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
shrinking_factor (:obj:`float`):
The shrinking factor used at each step of the training to prune the
vocabulary.
unk_token (:obj:`str`):
The token used for out-of-vocabulary tokens.
max_piece_length (:obj:`int`):
The maximum length of a given token.
n_sub_iterations (:obj:`int`):
The number of iterations of the EM algorithm to perform before
pruning the vocabulary.
"""
def __init__(
self,
vocab_size=8000,
show_progress=True,
special_tokens=[],
shrinking_factor=0.75,
unk_token=None,
max_piece_length=16,
n_sub_iterations=2,
):
pass
class WordLevelTrainer(Trainer):
"""
Trainer capable of training a WorldLevel model
Args:
vocab_size (:obj:`int`, `optional`):
The size of the final vocabulary, including all tokens and alphabet.
min_frequency (:obj:`int`, `optional`):
The minimum frequency a pair should have in order to be merged.
show_progress (:obj:`bool`, `optional`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`):
A list of special tokens the model should know of.
"""
class WordPieceTrainer(Trainer):
"""
Trainer capable of training a WordPiece model
Args:
vocab_size (:obj:`int`, `optional`):
The size of the final vocabulary, including all tokens and alphabet.
min_frequency (:obj:`int`, `optional`):
The minimum frequency a pair should have in order to be merged.
show_progress (:obj:`bool`, `optional`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
limit_alphabet (:obj:`int`, `optional`):
The maximum different characters to keep in the alphabet.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
continuing_subword_prefix (:obj:`str`, `optional`):
A prefix to be used for every subword that is not a beginning-of-word.
end_of_word_suffix (:obj:`str`, `optional`):
A suffix to be used for every subword that is a end-of-word.
"""
def __init__(
self,
vocab_size=30000,
min_frequency=0,
show_progress=True,
special_tokens=[],
limit_alphabet=None,
initial_alphabet=[],
continuing_subword_prefix="##",
end_of_word_suffix=None,
):
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.py | # Generated content DO NOT EDIT
from .. import trainers
Trainer = trainers.Trainer
BpeTrainer = trainers.BpeTrainer
UnigramTrainer = trainers.UnigramTrainer
WordLevelTrainer = trainers.WordLevelTrainer
WordPieceTrainer = trainers.WordPieceTrainer
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi | # Generated content DO NOT EDIT
class PostProcessor:
"""
Base class for all post-processors
This class is not supposed to be instantiated directly. Instead, any implementation of
a PostProcessor will return an instance of this class when instantiated.
"""
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class BertProcessing(PostProcessor):
"""
This post-processor takes care of adding the special tokens needed by
a Bert model:
- a SEP token
- a CLS token
Args:
sep (:obj:`Tuple[str, int]`):
A tuple with the string representation of the SEP token, and its id
cls (:obj:`Tuple[str, int]`):
A tuple with the string representation of the CLS token, and its id
"""
def __init__(self, sep, cls):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class ByteLevel(PostProcessor):
"""
This post-processor takes care of trimming the offsets.
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
want the offsets to include these whitespaces, then this PostProcessor must be used.
Args:
trim_offsets (:obj:`bool`):
Whether to trim the whitespaces from the produced offsets.
"""
def __init__(self, trim_offsets=True):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class RobertaProcessing(PostProcessor):
"""
This post-processor takes care of adding the special tokens needed by
a Roberta model:
- a SEP token
- a CLS token
It also takes care of trimming the offsets.
By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't
want the offsets to include these whitespaces, then this PostProcessor should be initialized
with :obj:`trim_offsets=True`
Args:
sep (:obj:`Tuple[str, int]`):
A tuple with the string representation of the SEP token, and its id
cls (:obj:`Tuple[str, int]`):
A tuple with the string representation of the CLS token, and its id
trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to trim the whitespaces from the produced offsets.
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether the add_prefix_space option was enabled during pre-tokenization. This
is relevant because it defines the way the offsets are trimmed out.
"""
def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class Sequence(PostProcessor):
"""
Sequence Processor
Args:
processors (:obj:`List[PostProcessor]`)
The processors that need to be chained
"""
def __init__(self, processors):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
class TemplateProcessing(PostProcessor):
"""
Provides a way to specify templates in order to add the special tokens to each
input sequence as relevant.
Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to
delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first
sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair
sequences. The final result looks like this:
- Single sequence: :obj:`[CLS] Hello there [SEP]`
- Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]`
With the type ids as following::
[CLS] ... [SEP] ... [SEP]
0 0 0 1 1
You can achieve such behavior using a TemplateProcessing::
TemplateProcessing(
single="[CLS] $0 [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
)
In this example, each input sequence is identified using a ``$`` construct. This identifier
lets us specify each input sequence, and the type_id to use. When nothing is specified,
it uses the default values. Here are the different ways to specify it:
- Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B``
- Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ...
- Specifying both: ``$A:0``, ``$B:1``, ...
The same construct is used for special tokens: ``<identifier>(:<type_id>)?``.
**Warning**: You must ensure that you are giving the correct tokens/ids as these
will be added to the Encoding without any further check. If the given ids correspond
to something totally different in a `Tokenizer` using this `PostProcessor`, it
might lead to unexpected results.
Args:
single (:obj:`Template`):
The template used for single sequences
pair (:obj:`Template`):
The template used when both sequences are specified
special_tokens (:obj:`Tokens`):
The list of special tokens used in each sequences
Types:
Template (:obj:`str` or :obj:`List`):
- If a :obj:`str` is provided, the whitespace is used as delimiter between tokens
- If a :obj:`List[str]` is provided, a list of tokens
Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`):
- A :obj:`Tuple` with both a token and its associated ID, in any order
- A :obj:`dict` with the following keys:
- "id": :obj:`str` => The special token id, as specified in the Template
- "ids": :obj:`List[int]` => The associated IDs
- "tokens": :obj:`List[str]` => The associated tokens
The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have
the same length.
"""
def __init__(self, single, pair, special_tokens):
pass
def num_special_tokens_to_add(self, is_pair):
"""
Return the number of special tokens that would be added for single/pair sentences.
Args:
is_pair (:obj:`bool`):
Whether the input would be a pair of sequences
Returns:
:obj:`int`: The number of tokens to add
"""
pass
def process(self, encoding, pair=None, add_special_tokens=True):
"""
Post-process the given encodings, generating the final one
Args:
encoding (:class:`~tokenizers.Encoding`):
The encoding for the first sequence
pair (:class:`~tokenizers.Encoding`, `optional`):
The encoding for the pair sequence
add_special_tokens (:obj:`bool`):
Whether to add the special tokens
Return:
:class:`~tokenizers.Encoding`: The final encoding
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.py | # Generated content DO NOT EDIT
from .. import processors
PostProcessor = processors.PostProcessor
BertProcessing = processors.BertProcessing
ByteLevel = processors.ByteLevel
RobertaProcessing = processors.RobertaProcessing
Sequence = processors.Sequence
TemplateProcessing = processors.TemplateProcessing
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.pyi | # Generated content DO NOT EDIT
class Decoder:
"""
Base class for all decoders
This class is not supposed to be instantiated directly. Instead, any implementation of
a Decoder will return an instance of this class when instantiated.
"""
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class BPEDecoder(Decoder):
"""
BPEDecoder Decoder
Args:
suffix (:obj:`str`, `optional`, defaults to :obj:`</w>`):
The suffix that was used to caracterize an end-of-word. This suffix will
be replaced by whitespaces during the decoding
"""
def __init__(self, suffix="</w>"):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class ByteFallback(Decoder):
"""
ByteFallback Decoder
ByteFallback is a simple trick which converts tokens looking like `<0x61>`
to pure bytes, and attempts to make them into a string. If the tokens
cannot be decoded you will get � instead for each inconvertable byte token
"""
def __init__(self):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class ByteLevel(Decoder):
"""
ByteLevel Decoder
This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.ByteLevel`
:class:`~tokenizers.pre_tokenizers.PreTokenizer`.
"""
def __init__(self):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class CTC(Decoder):
"""
CTC Decoder
Args:
pad_token (:obj:`str`, `optional`, defaults to :obj:`<pad>`):
The pad token used by CTC to delimit a new token.
word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`|`):
The word delimiter token. It will be replaced by a <space>
cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to cleanup some tokenization artifacts.
Mainly spaces before punctuation, and some abbreviated english forms.
"""
def __init__(self, pad_token="<pad>", word_delimiter_token="|", cleanup=True):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Fuse(Decoder):
"""
Fuse Decoder
Fuse simply fuses every token into a single string.
This is the last step of decoding, this decoder exists only if
there is need to add other decoders *after* the fusion
"""
def __init__(self):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Metaspace(Decoder):
"""
Metaspace Decoder
Args:
replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
The replacement character. Must be exactly one character. By default we
use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to add a space to the first word if there isn't already one. This
lets us treat `hello` exactly like `say hello`.
"""
def __init__(self, replacement="▁", add_prefix_space=True):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Replace(Decoder):
"""
Replace Decoder
This decoder is to be used in tandem with the :class:`~tokenizers.pre_tokenizers.Replace`
:class:`~tokenizers.pre_tokenizers.PreTokenizer`.
"""
def __init__(self, pattern, content):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Sequence(Decoder):
"""
Sequence Decoder
Args:
decoders (:obj:`List[Decoder]`)
The decoders that need to be chained
"""
def __init__(self, decoders):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class Strip(Decoder):
"""
Strip normalizer
Strips n left characters of each token, or n right characters of each token
"""
def __init__(self, content, left=0, right=0):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
class WordPiece(Decoder):
"""
WordPiece Decoder
Args:
prefix (:obj:`str`, `optional`, defaults to :obj:`##`):
The prefix to use for subwords that are not a beginning-of-word
cleanup (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to cleanup some tokenization artifacts. Mainly spaces before punctuation,
and some abbreviated english forms.
"""
def __init__(self, prefix="##", cleanup=True):
pass
def decode(self, tokens):
"""
Decode the given list of tokens to a final string
Args:
tokens (:obj:`List[str]`):
The list of tokens to decode
Returns:
:obj:`str`: The decoded string
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py | from .. import decoders
Decoder = decoders.Decoder
ByteLevel = decoders.ByteLevel
Replace = decoders.Replace
WordPiece = decoders.WordPiece
ByteFallback = decoders.ByteFallback
Fuse = decoders.Fuse
Strip = decoders.Strip
Metaspace = decoders.Metaspace
BPEDecoder = decoders.BPEDecoder
CTC = decoders.CTC
Sequence = decoders.Sequence
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py | from typing import Dict, Iterator, List, Optional, Tuple, Union
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
from tokenizers.models import BPE
from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
from .base_tokenizer import BaseTokenizer
class ByteLevelBPETokenizer(BaseTokenizer):
"""ByteLevelBPETokenizer
Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
add_prefix_space: bool = False,
lowercase: bool = False,
dropout: Optional[float] = None,
unicode_normalizer: Optional[str] = None,
continuing_subword_prefix: Optional[str] = None,
end_of_word_suffix: Optional[str] = None,
trim_offsets: bool = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(
BPE(
vocab,
merges,
dropout=dropout,
continuing_subword_prefix=continuing_subword_prefix or "",
end_of_word_suffix=end_of_word_suffix or "",
)
)
else:
tokenizer = Tokenizer(BPE())
# Check for Unicode normalization first (before everything else)
normalizers = []
if unicode_normalizer:
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
if lowercase:
normalizers += [Lowercase()]
# Create the normalizer structure
if len(normalizers) > 0:
if len(normalizers) > 1:
tokenizer.normalizer = Sequence(normalizers)
else:
tokenizer.normalizer = normalizers[0]
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
parameters = {
"model": "ByteLevelBPE",
"add_prefix_space": add_prefix_space,
"lowercase": lowercase,
"dropout": dropout,
"unicode_normalizer": unicode_normalizer,
"continuing_subword_prefix": continuing_subword_prefix,
"end_of_word_suffix": end_of_word_suffix,
"trim_offsets": trim_offsets,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return ByteLevelBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
show_progress: bool = True,
special_tokens: List[Union[str, AddedToken]] = [],
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=show_progress,
special_tokens=special_tokens,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
show_progress: bool = True,
special_tokens: List[Union[str, AddedToken]] = [],
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=show_progress,
special_tokens=special_tokens,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py | import json
import os
from typing import Iterator, List, Optional, Union, Tuple
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.models import Unigram
from .base_tokenizer import BaseTokenizer
class SentencePieceUnigramTokenizer(BaseTokenizer):
"""SentencePiece Unigram Tokenizer
Represents the Unigram algorithm, with the pretokenization used by SentencePiece
"""
def __init__(
self,
vocab: Optional[List[Tuple[str, float]]] = None,
replacement: str = "▁",
add_prefix_space: bool = True,
):
if vocab is not None:
# Let Unigram(..) fail if only one of them is None
tokenizer = Tokenizer(Unigram(vocab))
else:
tokenizer = Tokenizer(Unigram())
tokenizer.normalizer = normalizers.Sequence(
[normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}"), " ")]
)
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
parameters = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(tokenizer, parameters)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 8000,
show_progress: bool = True,
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
initial_alphabet: Optional[List[str]] = None,
unk_token: Optional[str] = None,
):
"""
Train the model using the given files
Args:
files (:obj:`List[str]`):
A list of path to the files that we should use for training
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
"""
if special_tokens is None:
special_tokens = []
if initial_alphabet is None:
initial_alphabet = []
trainer = trainers.UnigramTrainer(
vocab_size=vocab_size,
special_tokens=special_tokens,
show_progress=show_progress,
initial_alphabet=initial_alphabet,
unk_token=unk_token,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 8000,
show_progress: bool = True,
special_tokens: Optional[List[Union[str, AddedToken]]] = None,
initial_alphabet: Optional[List[str]] = None,
unk_token: Optional[str] = None,
length: Optional[int] = None,
):
"""
Train the model using the given iterator
Args:
iterator (:obj:`Union[Iterator[str], Iterator[Iterator[str]]]`):
Any iterator over strings or list of strings
vocab_size (:obj:`int`):
The size of the final vocabulary, including all tokens and alphabet.
show_progress (:obj:`bool`):
Whether to show progress bars while training.
special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`):
A list of special tokens the model should know of.
initial_alphabet (:obj:`List[str]`, `optional`):
A list of characters to include in the initial alphabet, even
if not seen in the training dataset.
If the strings contain more than one character, only the first one
is kept.
unk_token (:obj:`str`, `optional`):
The unknown token to be used by the model.
length (:obj:`int`, `optional`):
The total number of sequences in the iterator. This is used to
provide meaningful progress tracking
"""
if special_tokens is None:
special_tokens = []
if initial_alphabet is None:
initial_alphabet = []
trainer = trainers.UnigramTrainer(
vocab_size=vocab_size,
special_tokens=special_tokens,
show_progress=show_progress,
initial_alphabet=initial_alphabet,
unk_token=unk_token,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
@staticmethod
def from_spm(filename: str):
try:
import sys
sys.path.append(".")
import sentencepiece_model_pb2 as model
except Exception:
raise Exception(
"You don't seem to have the required protobuf file, in order to use this function you need to run `pip install protobuf` and `wget https://raw.githubusercontent.com/google/sentencepiece/master/python/src/sentencepiece/sentencepiece_model_pb2.py` for us to be able to read the intrinsics of your spm_file. `pip install sentencepiece` is not required."
)
m = model.ModelProto()
m.ParseFromString(open(filename, "rb").read())
precompiled_charsmap = m.normalizer_spec.precompiled_charsmap
vocab = [(piece.piece, piece.score) for piece in m.pieces]
unk_id = m.trainer_spec.unk_id
model_type = m.trainer_spec.model_type
byte_fallback = m.trainer_spec.byte_fallback
if model_type != 1:
raise Exception(
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
)
replacement = "▁"
add_prefix_space = True
tokenizer = Tokenizer(Unigram(vocab, unk_id, byte_fallback))
if precompiled_charsmap:
tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Precompiled(precompiled_charsmap),
normalizers.Replace(Regex(" {2,}"), " "),
]
)
else:
tokenizer.normalizer = normalizers.Sequence([normalizers.Replace(Regex(" {2,}"), " ")])
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
parameters = {
"model": "SentencePieceUnigram",
}
obj = BaseTokenizer.__new__(SentencePieceUnigramTokenizer, tokenizer, parameters)
BaseTokenizer.__init__(obj, tokenizer, parameters)
return obj
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py | from typing import Dict, Iterator, List, Optional, Tuple, Union
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
from tokenizers.models import BPE
from tokenizers.normalizers import NFKC
from .base_tokenizer import BaseTokenizer
class SentencePieceBPETokenizer(BaseTokenizer):
"""SentencePiece BPE Tokenizer
Represents the BPE algorithm, with the pretokenization used by SentencePiece
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
unk_token: Union[str, AddedToken] = "<unk>",
replacement: str = "▁",
add_prefix_space: bool = True,
dropout: Optional[float] = None,
fuse_unk: Optional[bool] = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
else:
tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk))
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
tokenizer.normalizer = NFKC()
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)
parameters = {
"model": "SentencePieceBPE",
"unk_token": unk_token,
"replacement": replacement,
"add_prefix_space": add_prefix_space,
"dropout": dropout,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return SentencePieceBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
show_progress: bool = True,
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
show_progress=show_progress,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
show_progress: bool = True,
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
show_progress=show_progress,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py | from typing import Dict, List, Optional, Tuple, Union
from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer
from tokenizers.decoders import Decoder
from tokenizers.models import Model
from tokenizers.normalizers import Normalizer
from tokenizers.pre_tokenizers import PreTokenizer
from tokenizers.processors import PostProcessor
Offsets = Tuple[int, int]
class BaseTokenizer:
def __init__(self, tokenizer: Tokenizer, parameters=None):
self._tokenizer = tokenizer
self._parameters = parameters if parameters is not None else {}
def __repr__(self):
return "Tokenizer(vocabulary_size={}, {})".format(
self._tokenizer.get_vocab_size(),
", ".join(k + "=" + str(v) for k, v in self._parameters.items()),
)
def num_special_tokens_to_add(self, is_pair: bool) -> int:
"""
Return the number of special tokens that would be added for single/pair sentences.
:param is_pair: Boolean indicating if the input would be a single sentence or a pair
:return:
"""
return self._tokenizer.num_special_tokens_to_add(is_pair)
def get_vocab(self, with_added_tokens: bool = True) -> Dict[str, int]:
"""Returns the vocabulary
Args:
with_added_tokens: boolean:
Whether to include the added tokens in the vocabulary
Returns:
The vocabulary
"""
return self._tokenizer.get_vocab(with_added_tokens=with_added_tokens)
def get_added_tokens_decoder(self) -> Dict[int, AddedToken]:
"""Returns the added reverse vocabulary
Returns:
The added vocabulary mapping ints to AddedTokens
"""
return self._tokenizer.get_added_tokens_decoder()
def get_vocab_size(self, with_added_tokens: bool = True) -> int:
"""Return the size of vocabulary, with or without added tokens.
Args:
with_added_tokens: (`optional`) bool:
Whether to count in added special tokens or not
Returns:
Size of vocabulary
"""
return self._tokenizer.get_vocab_size(with_added_tokens=with_added_tokens)
def enable_padding(
self,
direction: Optional[str] = "right",
pad_to_multiple_of: Optional[int] = None,
pad_id: Optional[int] = 0,
pad_type_id: Optional[int] = 0,
pad_token: Optional[str] = "[PAD]",
length: Optional[int] = None,
):
"""Change the padding strategy
Args:
direction: (`optional`) str:
Can be one of: `right` or `left`
pad_to_multiple_of: (`optional`) unsigned int:
If specified, the padding length should always snap to the next multiple of
the given value. For example if we were going to pad with a length of 250 but
`pad_to_multiple_of=8` then we will pad to 256.
pad_id: (`optional`) unsigned int:
The indice to be used when padding
pad_type_id: (`optional`) unsigned int:
The type indice to be used when padding
pad_token: (`optional`) str:
The pad token to be used when padding
length: (`optional`) unsigned int:
If specified, the length at which to pad. If not specified
we pad using the size of the longest sequence in a batch
"""
return self._tokenizer.enable_padding(
direction=direction,
pad_to_multiple_of=pad_to_multiple_of,
pad_id=pad_id,
pad_type_id=pad_type_id,
pad_token=pad_token,
length=length,
)
def no_padding(self):
"""Disable padding"""
return self._tokenizer.no_padding()
@property
def padding(self) -> Optional[dict]:
"""Get the current padding parameters
Returns:
None if padding is disabled, a dict with the currently set parameters
if the padding is enabled.
"""
return self._tokenizer.padding
def enable_truncation(self, max_length: int, stride: Optional[int] = 0, strategy: Optional[str] = "longest_first"):
"""Change the truncation options
Args:
max_length: unsigned int:
The maximum length at which to truncate
stride: (`optional`) unsigned int:
The length of the previous first sequence to be included
in the overflowing sequence
strategy: (`optional`) str:
Can be one of `longest_first`, `only_first` or `only_second`
"""
return self._tokenizer.enable_truncation(max_length, stride=stride, strategy=strategy)
def no_truncation(self):
"""Disable truncation"""
return self._tokenizer.no_truncation()
@property
def truncation(self) -> Optional[dict]:
"""Get the current truncation parameters
Returns:
None if truncation is disabled, a dict with the current truncation parameters if
truncation is enabled
"""
return self._tokenizer.truncation
def add_tokens(self, tokens: List[Union[str, AddedToken]]) -> int:
"""Add the given tokens to the vocabulary
Args:
tokens: List[Union[str, AddedToken]]:
A list of tokens to add to the vocabulary. Each token can either be
a string, or an instance of AddedToken
Returns:
The number of tokens that were added to the vocabulary
"""
return self._tokenizer.add_tokens(tokens)
def add_special_tokens(self, special_tokens: List[Union[str, AddedToken]]) -> int:
"""Add the given special tokens to the vocabulary, and treat them as special tokens.
The special tokens will never be processed by the model, and will be
removed while decoding.
Args:
tokens: List[Union[str, AddedToken]]:
A list of special tokens to add to the vocabulary. Each token can either be
a string, or an instance of AddedToken
Returns:
The number of tokens that were added to the vocabulary
"""
return self._tokenizer.add_special_tokens(special_tokens)
def normalize(self, sequence: str) -> str:
"""Normalize the given sequence
Args:
sequence: str:
The sequence to normalize
Returns:
The normalized string
"""
return self._tokenizer.normalize(sequence)
def encode(
self,
sequence: InputSequence,
pair: Optional[InputSequence] = None,
is_pretokenized: bool = False,
add_special_tokens: bool = True,
) -> Encoding:
"""Encode the given sequence and pair. This method can process raw text sequences as well
as already pre-tokenized sequences.
Args:
sequence: InputSequence:
The sequence we want to encode. This sequence can be either raw text or
pre-tokenized, according to the `is_pretokenized` argument:
- If `is_pretokenized=False`: `InputSequence` is expected to be `str`
- If `is_pretokenized=True`: `InputSequence` is expected to be
`Union[List[str], Tuple[str]]`
is_pretokenized: bool:
Whether the input is already pre-tokenized.
add_special_tokens: bool:
Whether to add the special tokens while encoding.
Returns:
An Encoding
"""
if sequence is None:
raise ValueError("encode: `sequence` can't be `None`")
return self._tokenizer.encode(sequence, pair, is_pretokenized, add_special_tokens)
def encode_batch(
self,
inputs: List[EncodeInput],
is_pretokenized: bool = False,
add_special_tokens: bool = True,
) -> List[Encoding]:
"""Encode the given inputs. This method accept both raw text sequences as well as already
pre-tokenized sequences.
Args:
inputs: List[EncodeInput]:
A list of single sequences or pair sequences to encode. Each `EncodeInput` is
expected to be of the following form:
`Union[InputSequence, Tuple[InputSequence, InputSequence]]`
Each `InputSequence` can either be raw text or pre-tokenized,
according to the `is_pretokenized` argument:
- If `is_pretokenized=False`: `InputSequence` is expected to be `str`
- If `is_pretokenized=True`: `InputSequence` is expected to be
`Union[List[str], Tuple[str]]`
is_pretokenized: bool:
Whether the input is already pre-tokenized.
add_special_tokens: bool:
Whether to add the special tokens while encoding.
Returns:
A list of Encoding
"""
if inputs is None:
raise ValueError("encode_batch: `inputs` can't be `None`")
return self._tokenizer.encode_batch(inputs, is_pretokenized, add_special_tokens)
def decode(self, ids: List[int], skip_special_tokens: Optional[bool] = True) -> str:
"""Decode the given list of ids to a string sequence
Args:
ids: List[unsigned int]:
A list of ids to be decoded
skip_special_tokens: (`optional`) boolean:
Whether to remove all the special tokens from the output string
Returns:
The decoded string
"""
if ids is None:
raise ValueError("None input is not valid. Should be a list of integers.")
return self._tokenizer.decode(ids, skip_special_tokens=skip_special_tokens)
def decode_batch(self, sequences: List[List[int]], skip_special_tokens: Optional[bool] = True) -> str:
"""Decode the list of sequences to a list of string sequences
Args:
sequences: List[List[unsigned int]]:
A list of sequence of ids to be decoded
skip_special_tokens: (`optional`) boolean:
Whether to remove all the special tokens from the output strings
Returns:
A list of decoded strings
"""
if sequences is None:
raise ValueError("None input is not valid. Should be list of list of integers.")
return self._tokenizer.decode_batch(sequences, skip_special_tokens=skip_special_tokens)
def token_to_id(self, token: str) -> Optional[int]:
"""Convert the given token to its corresponding id
Args:
token: str:
The token to convert
Returns:
The corresponding id if it exists, None otherwise
"""
return self._tokenizer.token_to_id(token)
def id_to_token(self, id: int) -> Optional[str]:
"""Convert the given token id to its corresponding string
Args:
token: id:
The token id to convert
Returns:
The corresponding string if it exists, None otherwise
"""
return self._tokenizer.id_to_token(id)
def save_model(self, directory: str, prefix: Optional[str] = None):
"""Save the current model to the given directory
Args:
directory: str:
A path to the destination directory
prefix: (Optional) str:
An optional prefix, used to prefix each file name
"""
return self._tokenizer.model.save(directory, prefix=prefix)
def save(self, path: str, pretty: bool = True):
"""Save the current Tokenizer at the given path
Args:
path: str:
A path to the destination Tokenizer file
"""
return self._tokenizer.save(path, pretty)
def to_str(self, pretty: bool = False):
"""Get a serialized JSON version of the Tokenizer as a str
Args:
pretty: bool:
Whether the JSON string should be prettified
Returns:
str
"""
return self._tokenizer.to_str(pretty)
def post_process(
self, encoding: Encoding, pair: Optional[Encoding] = None, add_special_tokens: bool = True
) -> Encoding:
"""Apply all the post-processing steps to the given encodings.
The various steps are:
1. Truncate according to global params (provided to `enable_truncation`)
2. Apply the PostProcessor
3. Pad according to global params. (provided to `enable_padding`)
Args:
encoding: Encoding:
The main Encoding to post process
pair: Optional[Encoding]:
An optional pair Encoding
add_special_tokens: bool:
Whether to add special tokens
Returns:
The resulting Encoding
"""
return self._tokenizer.post_process(encoding, pair, add_special_tokens)
@property
def model(self) -> Model:
return self._tokenizer.model
@model.setter
def model(self, model: Model):
self._tokenizer.model = model
@property
def normalizer(self) -> Normalizer:
return self._tokenizer.normalizer
@normalizer.setter
def normalizer(self, normalizer: Normalizer):
self._tokenizer.normalizer = normalizer
@property
def pre_tokenizer(self) -> PreTokenizer:
return self._tokenizer.pre_tokenizer
@pre_tokenizer.setter
def pre_tokenizer(self, pre_tokenizer: PreTokenizer):
self._tokenizer.pre_tokenizer = pre_tokenizer
@property
def post_processor(self) -> PostProcessor:
return self._tokenizer.post_processor
@post_processor.setter
def post_processor(self, post_processor: PostProcessor):
self._tokenizer.post_processor = post_processor
@property
def decoder(self) -> Decoder:
return self._tokenizer.decoder
@decoder.setter
def decoder(self, decoder: Decoder):
self._tokenizer.decoder = decoder
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py | from .base_tokenizer import BaseTokenizer
from .bert_wordpiece import BertWordPieceTokenizer
from .byte_level_bpe import ByteLevelBPETokenizer
from .char_level_bpe import CharBPETokenizer
from .sentencepiece_bpe import SentencePieceBPETokenizer
from .sentencepiece_unigram import SentencePieceUnigramTokenizer
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py | from typing import Dict, Iterator, List, Optional, Tuple, Union
from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers
from ..models import BPE
from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str
from .base_tokenizer import BaseTokenizer
class CharBPETokenizer(BaseTokenizer):
"""Original BPE Tokenizer
Represents the BPE algorithm, as introduced by Rico Sennrich
(https://arxiv.org/abs/1508.07909)
The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original
Sennrich subword-nmt implementation by the following options that you can deactivate:
- adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by:
* removing any control characters and replacing all whitespaces by the classic one.
* handle chinese chars by putting spaces around them.
* strip all accents.
- spitting on punctuation in addition to whitespaces (deactivate it with
`split_on_whitespace_only=True`)
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
unk_token: Union[str, AddedToken] = "<unk>",
suffix: str = "</w>",
dropout: Optional[float] = None,
lowercase: bool = False,
unicode_normalizer: Optional[str] = None,
bert_normalizer: bool = True,
split_on_whitespace_only: bool = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(
BPE(
vocab,
merges,
dropout=dropout,
unk_token=str(unk_token),
end_of_word_suffix=suffix,
)
)
else:
tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix))
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
# Check for Unicode normalization first (before everything else)
normalizers = []
if unicode_normalizer:
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
if bert_normalizer:
normalizers += [BertNormalizer(lowercase=False)]
if lowercase:
normalizers += [Lowercase()]
# Create the normalizer structure
if len(normalizers) > 0:
if len(normalizers) > 1:
tokenizer.normalizer = Sequence(normalizers)
else:
tokenizer.normalizer = normalizers[0]
if split_on_whitespace_only:
tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit()
else:
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tokenizer.decoder = decoders.BPEDecoder(suffix=suffix)
parameters = {
"model": "BPE",
"unk_token": unk_token,
"suffix": suffix,
"dropout": dropout,
"lowercase": lowercase,
"unicode_normalizer": unicode_normalizer,
"bert_normalizer": bert_normalizer,
"split_on_whitespace_only": split_on_whitespace_only,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return CharBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
suffix: Optional[str] = "</w>",
show_progress: bool = True,
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
end_of_word_suffix=suffix,
show_progress=show_progress,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
special_tokens: List[Union[str, AddedToken]] = ["<unk>"],
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
suffix: Optional[str] = "</w>",
show_progress: bool = True,
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=special_tokens,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
end_of_word_suffix=suffix,
show_progress=show_progress,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/bert_wordpiece.py | from typing import Dict, Iterator, List, Optional, Union
from tokenizers import AddedToken, Tokenizer, decoders, trainers
from tokenizers.models import WordPiece
from tokenizers.normalizers import BertNormalizer
from tokenizers.pre_tokenizers import BertPreTokenizer
from tokenizers.processors import BertProcessing
from .base_tokenizer import BaseTokenizer
class BertWordPieceTokenizer(BaseTokenizer):
"""Bert WordPiece Tokenizer"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
unk_token: Union[str, AddedToken] = "[UNK]",
sep_token: Union[str, AddedToken] = "[SEP]",
cls_token: Union[str, AddedToken] = "[CLS]",
pad_token: Union[str, AddedToken] = "[PAD]",
mask_token: Union[str, AddedToken] = "[MASK]",
clean_text: bool = True,
handle_chinese_chars: bool = True,
strip_accents: Optional[bool] = None,
lowercase: bool = True,
wordpieces_prefix: str = "##",
):
if vocab is not None:
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token)))
else:
tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token)))
# Let the tokenizer know about special tokens if they are part of the vocab
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
if tokenizer.token_to_id(str(sep_token)) is not None:
tokenizer.add_special_tokens([str(sep_token)])
if tokenizer.token_to_id(str(cls_token)) is not None:
tokenizer.add_special_tokens([str(cls_token)])
if tokenizer.token_to_id(str(pad_token)) is not None:
tokenizer.add_special_tokens([str(pad_token)])
if tokenizer.token_to_id(str(mask_token)) is not None:
tokenizer.add_special_tokens([str(mask_token)])
tokenizer.normalizer = BertNormalizer(
clean_text=clean_text,
handle_chinese_chars=handle_chinese_chars,
strip_accents=strip_accents,
lowercase=lowercase,
)
tokenizer.pre_tokenizer = BertPreTokenizer()
if vocab is not None:
sep_token_id = tokenizer.token_to_id(str(sep_token))
if sep_token_id is None:
raise TypeError("sep_token not found in the vocabulary")
cls_token_id = tokenizer.token_to_id(str(cls_token))
if cls_token_id is None:
raise TypeError("cls_token not found in the vocabulary")
tokenizer.post_processor = BertProcessing((str(sep_token), sep_token_id), (str(cls_token), cls_token_id))
tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix)
parameters = {
"model": "BertWordPiece",
"unk_token": unk_token,
"sep_token": sep_token,
"cls_token": cls_token,
"pad_token": pad_token,
"mask_token": mask_token,
"clean_text": clean_text,
"handle_chinese_chars": handle_chinese_chars,
"strip_accents": strip_accents,
"lowercase": lowercase,
"wordpieces_prefix": wordpieces_prefix,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab: str, **kwargs):
vocab = WordPiece.read_file(vocab)
return BertWordPieceTokenizer(vocab, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
special_tokens: List[Union[str, AddedToken]] = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]",
],
show_progress: bool = True,
wordpieces_prefix: str = "##",
):
"""Train the model using the given files"""
trainer = trainers.WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
special_tokens=special_tokens,
show_progress=show_progress,
continuing_subword_prefix=wordpieces_prefix,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
special_tokens: List[Union[str, AddedToken]] = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]",
],
show_progress: bool = True,
wordpieces_prefix: str = "##",
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
special_tokens=special_tokens,
show_progress=show_progress,
continuing_subword_prefix=wordpieces_prefix,
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi | # Generated content DO NOT EDIT
class PreTokenizer:
"""
Base class for all pre-tokenizers
This class is not supposed to be instantiated directly. Instead, any implementation of a
PreTokenizer will return an instance of this class when instantiated.
"""
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class BertPreTokenizer(PreTokenizer):
"""
BertPreTokenizer
This pre-tokenizer splits tokens on spaces, and also on punctuation.
Each occurence of a punctuation character will be treated separately.
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class ByteLevel(PreTokenizer):
"""
ByteLevel PreTokenizer
This pre-tokenizer takes care of replacing all bytes of the given string
with a corresponding representation, as well as splitting into words.
Args:
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to add a space to the first word if there isn't already one. This
lets us treat `hello` exactly like `say hello`.
use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`):
Set this to :obj:`False` to prevent this `pre_tokenizer` from using
the GPT2 specific regexp for spliting on whitespace.
"""
def __init__(self, add_prefix_space=True, use_regex=True):
pass
@staticmethod
def alphabet():
"""
Returns the alphabet used by this PreTokenizer.
Since the ByteLevel works as its name suggests, at the byte level, it
encodes each byte value to a unique visible character. This means that there is a
total of 256 different characters composing this alphabet.
Returns:
:obj:`List[str]`: A list of characters that compose the alphabet
"""
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class CharDelimiterSplit(PreTokenizer):
"""
This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)`
Args:
delimiter: str:
The delimiter char that will be used to split input
"""
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Digits(PreTokenizer):
"""
This pre-tokenizer simply splits using the digits in separate tokens
Args:
individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`):
If set to True, digits will each be separated as follows::
"Call 123 please" -> "Call ", "1", "2", "3", " please"
If set to False, digits will grouped as follows::
"Call 123 please" -> "Call ", "123", " please"
"""
def __init__(self, individual_digits=False):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Metaspace(PreTokenizer):
"""
Metaspace pre-tokenizer
This pre-tokenizer replaces any whitespace by the provided replacement character.
It then tries to split on these spaces.
Args:
replacement (:obj:`str`, `optional`, defaults to :obj:`▁`):
The replacement character. Must be exactly one character. By default we
use the `▁` (U+2581) meta symbol (Same as in SentencePiece).
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to add a space to the first word if there isn't already one. This
lets us treat `hello` exactly like `say hello`.
"""
def __init__(self, replacement="_", add_prefix_space=True):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Punctuation(PreTokenizer):
"""
This pre-tokenizer simply splits on punctuation as individual characters.
Args:
behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
The behavior to use when splitting.
Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next",
"contiguous"
"""
def __init__(self, behavior="isolated"):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Sequence(PreTokenizer):
"""
This pre-tokenizer composes other pre_tokenizers and applies them in sequence
"""
def __init__(self, pretokenizers):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Split(PreTokenizer):
"""
Split PreTokenizer
This versatile pre-tokenizer splits using the provided pattern and
according to the provided behavior. The pattern can be inverted by
making use of the invert flag.
Args:
pattern (:obj:`str` or :class:`~tokenizers.Regex`):
A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex`
behavior (:class:`~tokenizers.SplitDelimiterBehavior`):
The behavior to use when splitting.
Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
"contiguous"
invert (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to invert the pattern.
"""
def __init__(self, pattern, behavior, invert=False):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class UnicodeScripts(PreTokenizer):
"""
This pre-tokenizer splits on characters that belong to different language family
It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt
Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too.
This mimicks SentencePiece Unigram implementation.
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class Whitespace(PreTokenizer):
"""
This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+`
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
class WhitespaceSplit(PreTokenizer):
"""
This pre-tokenizer simply splits on the whitespace. Works like `.split()`
"""
def __init__(self):
pass
def pre_tokenize(self, pretok):
"""
Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place
This method allows to modify a :class:`~tokenizers.PreTokenizedString` to
keep track of the pre-tokenization, and leverage the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you just want to see the result of
the pre-tokenization of a raw string, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str`
Args:
pretok (:class:`~tokenizers.PreTokenizedString):
The pre-tokenized string on which to apply this
:class:`~tokenizers.pre_tokenizers.PreTokenizer`
"""
pass
def pre_tokenize_str(self, sequence):
"""
Pre tokenize the given string
This method provides a way to visualize the effect of a
:class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the
alignment, nor does it provide all the capabilities of the
:class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use
:meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize`
Args:
sequence (:obj:`str`):
A string to pre-tokeize
Returns:
:obj:`List[Tuple[str, Offsets]]`:
A list of tuple with the pre-tokenized parts and their offsets
"""
pass
| 0 |
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers | hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py | # Generated content DO NOT EDIT
from .. import pre_tokenizers
PreTokenizer = pre_tokenizers.PreTokenizer
BertPreTokenizer = pre_tokenizers.BertPreTokenizer
ByteLevel = pre_tokenizers.ByteLevel
CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit
Digits = pre_tokenizers.Digits
Metaspace = pre_tokenizers.Metaspace
Punctuation = pre_tokenizers.Punctuation
Sequence = pre_tokenizers.Sequence
Split = pre_tokenizers.Split
UnicodeScripts = pre_tokenizers.UnicodeScripts
Whitespace = pre_tokenizers.Whitespace
WhitespaceSplit = pre_tokenizers.WhitespaceSplit
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/tests/utils.py | import multiprocessing as mp
import os
import pytest
import requests
DATA_PATH = os.path.join("tests", "data")
def download(url, with_filename=None):
filename = with_filename if with_filename is not None else url.rsplit("/")[-1]
filepath = os.path.join(DATA_PATH, filename)
if not os.path.exists(filepath):
with open(filepath, "wb") as f:
response = requests.get(url, stream=True)
response.raise_for_status()
for chunk in response.iter_content(1024):
f.write(chunk)
return filepath
@pytest.fixture(scope="session")
def data_dir():
assert os.getcwd().endswith("python")
exist = os.path.exists(DATA_PATH) and os.path.isdir(DATA_PATH)
if not exist:
os.mkdir(DATA_PATH)
@pytest.fixture(scope="session")
def roberta_files(data_dir):
return {
"vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json"),
"merges": download("https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt"),
}
@pytest.fixture(scope="session")
def bert_files(data_dir):
return {
"vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt"),
}
@pytest.fixture(scope="session")
def openai_files(data_dir):
return {
"vocab": download("https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json"),
"merges": download("https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt"),
}
@pytest.fixture(scope="session")
def train_files(data_dir):
big = download("https://norvig.com/big.txt")
small = os.path.join(DATA_PATH, "small.txt")
with open(small, "w") as f:
with open(big, "r") as g:
for i, line in enumerate(g):
f.write(line)
if i > 100:
break
return {
"small": small,
"big": big,
}
@pytest.fixture(scope="session")
def albert_base(data_dir):
return download("https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-tokenizer.json")
@pytest.fixture(scope="session")
def doc_wiki_tokenizer(data_dir):
return download(
"https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json",
"tokenizer-wiki.json",
)
@pytest.fixture(scope="session")
def doc_pipeline_bert_tokenizer(data_dir):
return download(
"https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json",
"bert-wiki.json",
)
# On MacOS Python 3.8+ the default was modified to `spawn`, we need `fork` in tests.
mp.set_start_method("fork")
def multiprocessing_with_parallelism(tokenizer, enabled: bool):
"""
This helper can be used to test that disabling parallelism avoids dead locks when the
same tokenizer is used after forking.
"""
# It's essential to this test that we call 'encode' or 'encode_batch'
# before the fork. This causes the main process to "lock" some resources
# provided by the Rust "rayon" crate that are needed for parallel processing.
tokenizer.encode("Hi")
tokenizer.encode_batch(["hi", "there"])
def encode(tokenizer):
tokenizer.encode("Hi")
tokenizer.encode_batch(["hi", "there"])
# Make sure this environment variable is set before the fork happens
os.environ["TOKENIZERS_PARALLELISM"] = str(enabled)
p = mp.Process(target=encode, args=(tokenizer,))
p.start()
p.join(timeout=1)
# At this point the process should have successfully exited, depending on whether parallelism
# was activated or not. So we check the status and kill it if needed
alive = p.is_alive()
if alive:
p.terminate()
assert (alive and mp.get_start_method() == "fork") == enabled
| 0 |
hf_public_repos/tokenizers/bindings/python | hf_public_repos/tokenizers/bindings/python/tests/test_serialization.py | import json
import os
import unittest
import tqdm
from huggingface_hub import HfApi, cached_download, hf_hub_url
from tokenizers import Tokenizer
from .utils import albert_base, data_dir
class TestSerialization:
def test_full_serialization_albert(self, albert_base):
# Check we can read this file.
# This used to fail because of BufReader that would fail because the
# file exceeds the buffer capacity
tokenizer = Tokenizer.from_file(albert_base)
def check(tokenizer_file) -> bool:
with open(tokenizer_file, "r") as f:
data = json.load(f)
if "pre_tokenizer" not in data:
return True
if "type" not in data["pre_tokenizer"]:
return False
if data["pre_tokenizer"]["type"] == "Sequence":
for pre_tok in data["pre_tokenizer"]["pretokenizers"]:
if "type" not in pre_tok:
return False
return True
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
if os.getenv("RUN_SLOW") != "1":
return unittest.skip("use `RUN_SLOW=1` to run")(test_case)
else:
return test_case
@slow
class TestFullDeserialization(unittest.TestCase):
def test_full_deserialization_hub(self):
# Check we can read this file.
# This used to fail because of BufReader that would fail because the
# file exceeds the buffer capacity
api = HfApi()
not_loadable = []
invalid_pre_tokenizer = []
# models = api.list_models(filter="transformers")
# for model in tqdm.tqdm(models):
# model_id = model.modelId
# for model_file in model.siblings:
# filename = model_file.rfilename
# if filename == "tokenizer.json":
# all_models.append((model_id, filename))
all_models = [("HueyNemud/das22-10-camembert_pretrained", "tokenizer.json")]
for model_id, filename in tqdm.tqdm(all_models):
tokenizer_file = cached_download(hf_hub_url(model_id, filename=filename))
is_ok = check(tokenizer_file)
if not is_ok:
print(f"{model_id} is affected by no type")
invalid_pre_tokenizer.append(model_id)
try:
Tokenizer.from_file(tokenizer_file)
except Exception as e:
print(f"{model_id} is not loadable: {e}")
not_loadable.append(model_id)
except:
print(f"{model_id} is not loadable: Rust error")
not_loadable.append(model_id)
self.assertEqual(invalid_pre_tokenizer, [])
self.assertEqual(not_loadable, [])
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/documentation/test_tutorial_train_from_iterators.py | import gzip
import os
import datasets
import pytest
from ..utils import data_dir, train_files
class TestTrainFromIterators:
@staticmethod
def get_tokenizer_trainer():
# START init_tokenizer_trainer
from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, trainers
tokenizer = Tokenizer(models.Unigram())
tokenizer.normalizer = normalizers.NFKC()
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel()
tokenizer.decoder = decoders.ByteLevel()
trainer = trainers.UnigramTrainer(
vocab_size=20000,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
special_tokens=["<PAD>", "<BOS>", "<EOS>"],
)
# END init_tokenizer_trainer
trainer.show_progress = False
return tokenizer, trainer
@staticmethod
def load_dummy_dataset():
# START load_dataset
import datasets
dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train+test+validation")
# END load_dataset
@pytest.fixture(scope="class")
def setup_gzip_files(self, train_files):
with open(train_files["small"], "rt") as small:
for n in range(3):
path = f"data/my-file.{n}.gz"
with gzip.open(path, "wt") as f:
f.write(small.read())
def test_train_basic(self):
tokenizer, trainer = self.get_tokenizer_trainer()
# START train_basic
# First few lines of the "Zen of Python" https://www.python.org/dev/peps/pep-0020/
data = [
"Beautiful is better than ugly."
"Explicit is better than implicit."
"Simple is better than complex."
"Complex is better than complicated."
"Flat is better than nested."
"Sparse is better than dense."
"Readability counts."
]
tokenizer.train_from_iterator(data, trainer=trainer)
# END train_basic
def test_datasets(self):
tokenizer, trainer = self.get_tokenizer_trainer()
# In order to keep tests fast, we only use the first 100 examples
os.environ["TOKENIZERS_PARALLELISM"] = "true"
dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train[0:100]")
# START def_batch_iterator
def batch_iterator(batch_size=1000):
for i in range(0, len(dataset), batch_size):
yield dataset[i : i + batch_size]["text"]
# END def_batch_iterator
# START train_datasets
tokenizer.train_from_iterator(batch_iterator(), trainer=trainer, length=len(dataset))
# END train_datasets
def test_gzip(self, setup_gzip_files):
tokenizer, trainer = self.get_tokenizer_trainer()
# START single_gzip
import gzip
with gzip.open("data/my-file.0.gz", "rt") as f:
tokenizer.train_from_iterator(f, trainer=trainer)
# END single_gzip
# START multi_gzip
files = ["data/my-file.0.gz", "data/my-file.1.gz", "data/my-file.2.gz"]
def gzip_iterator():
for path in files:
with gzip.open(path, "rt") as f:
for line in f:
yield line
tokenizer.train_from_iterator(gzip_iterator(), trainer=trainer)
# END multi_gzip
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/documentation/test_quicktour.py | from tokenizers import Tokenizer
from tokenizers.models import BPE
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.trainers import BpeTrainer
from ..utils import data_dir, doc_wiki_tokenizer
disable_printing = True
original_print = print
def print(*args, **kwargs):
if not disable_printing:
original_print(*args, **kwargs)
class TestQuicktour:
# This method contains everything we don't want to run
@staticmethod
def slow_train():
tokenizer, trainer = TestQuicktour.get_tokenizer_trainer()
# START train
files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]]
tokenizer.train(files, trainer)
# END train
# START save
tokenizer.save("data/tokenizer-wiki.json")
# END save
@staticmethod
def get_tokenizer_trainer():
# START init_tokenizer
from tokenizers import Tokenizer
from tokenizers.models import BPE
tokenizer = Tokenizer(BPE(unk_token="[UNK]"))
# END init_tokenizer
# START init_trainer
from tokenizers.trainers import BpeTrainer
trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
# END init_trainer
# START init_pretok
from tokenizers.pre_tokenizers import Whitespace
tokenizer.pre_tokenizer = Whitespace()
# END init_pretok
return tokenizer, trainer
def test_quicktour(self, doc_wiki_tokenizer):
def print(*args, **kwargs):
pass
try:
# START reload_tokenizer
tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json")
# END reload_tokenizer
except Exception:
tokenizer = Tokenizer.from_file(doc_wiki_tokenizer)
# START encode
output = tokenizer.encode("Hello, y'all! How are you 😁 ?")
# END encode
# START print_tokens
print(output.tokens)
# ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"]
# END print_tokens
assert output.tokens == [
"Hello",
",",
"y",
"'",
"all",
"!",
"How",
"are",
"you",
"[UNK]",
"?",
]
# START print_ids
print(output.ids)
# [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
# END print_ids
assert output.ids == [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
# START print_offsets
print(output.offsets[9])
# (26, 27)
# END print_offsets
assert output.offsets[9] == (26, 27)
# START use_offsets
sentence = "Hello, y'all! How are you 😁 ?"
sentence[26:27]
# "😁"
# END use_offsets
assert sentence[26:27] == "😁"
# START check_sep
tokenizer.token_to_id("[SEP]")
# 2
# END check_sep
assert tokenizer.token_to_id("[SEP]") == 2
# START init_template_processing
from tokenizers.processors import TemplateProcessing
tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[
("[CLS]", tokenizer.token_to_id("[CLS]")),
("[SEP]", tokenizer.token_to_id("[SEP]")),
],
)
# END init_template_processing
# START print_special_tokens
output = tokenizer.encode("Hello, y'all! How are you 😁 ?")
print(output.tokens)
# ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"]
# END print_special_tokens
assert output.tokens == [
"[CLS]",
"Hello",
",",
"y",
"'",
"all",
"!",
"How",
"are",
"you",
"[UNK]",
"?",
"[SEP]",
]
# START print_special_tokens_pair
output = tokenizer.encode("Hello, y'all!", "How are you 😁 ?")
print(output.tokens)
# ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"]
# END print_special_tokens_pair
assert output.tokens == [
"[CLS]",
"Hello",
",",
"y",
"'",
"all",
"!",
"[SEP]",
"How",
"are",
"you",
"[UNK]",
"?",
"[SEP]",
]
# START print_type_ids
print(output.type_ids)
# [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
# END print_type_ids
assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
# START encode_batch
output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"])
# END encode_batch
# START encode_batch_pair
output = tokenizer.encode_batch(
[["Hello, y'all!", "How are you 😁 ?"], ["Hello to you too!", "I'm fine, thank you!"]]
)
# END encode_batch_pair
# START enable_padding
tokenizer.enable_padding(pad_id=3, pad_token="[PAD]")
# END enable_padding
# START print_batch_tokens
output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"])
print(output[1].tokens)
# ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
# END print_batch_tokens
assert output[1].tokens == ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
# START print_attention_mask
print(output[1].attention_mask)
# [1, 1, 1, 1, 1, 1, 1, 0]
# END print_attention_mask
assert output[1].attention_mask == [1, 1, 1, 1, 1, 1, 1, 0]
if __name__ == "__main__":
import os
from urllib import request
from zipfile import ZipFile
disable_printing = False
if not os.path.isdir("data/wikitext-103-raw"):
print("Downloading wikitext-103...")
wiki_text, _ = request.urlretrieve(
"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip"
)
with ZipFile(wiki_text, "r") as z:
print("Unzipping in data...")
z.extractall("data")
print("Now training...")
TestQuicktour.slow_train()
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/documentation/test_pipeline.py | from tokenizers import Tokenizer
from ..utils import data_dir, doc_pipeline_bert_tokenizer, doc_wiki_tokenizer
disable_printing = True
original_print = print
def print(*args, **kwargs):
if not disable_printing:
original_print(*args, **kwargs)
class TestPipeline:
def test_pipeline(self, doc_wiki_tokenizer):
try:
# START reload_tokenizer
from tokenizers import Tokenizer
tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json")
# END reload_tokenizer
except Exception:
tokenizer = Tokenizer.from_file(doc_wiki_tokenizer)
# START setup_normalizer
from tokenizers import normalizers
from tokenizers.normalizers import NFD, StripAccents
normalizer = normalizers.Sequence([NFD(), StripAccents()])
# END setup_normalizer
# START test_normalizer
normalizer.normalize_str("Héllò hôw are ü?")
# "Hello how are u?"
# END test_normalizer
assert normalizer.normalize_str("Héllò hôw are ü?") == "Hello how are u?"
# START replace_normalizer
tokenizer.normalizer = normalizer
# END replace_normalizer
# START setup_pre_tokenizer
from tokenizers.pre_tokenizers import Whitespace
pre_tokenizer = Whitespace()
pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.")
# [("Hello", (0, 5)), ("!", (5, 6)), ("How", (7, 10)), ("are", (11, 14)), ("you", (15, 18)),
# ("?", (18, 19)), ("I", (20, 21)), ("'", (21, 22)), ('m', (22, 23)), ("fine", (24, 28)),
# (",", (28, 29)), ("thank", (30, 35)), ("you", (36, 39)), (".", (39, 40))]
# END setup_pre_tokenizer
assert pre_tokenizer.pre_tokenize_str("Hello! How are you? I'm fine, thank you.") == [
("Hello", (0, 5)),
("!", (5, 6)),
("How", (7, 10)),
("are", (11, 14)),
("you", (15, 18)),
("?", (18, 19)),
("I", (20, 21)),
("'", (21, 22)),
("m", (22, 23)),
("fine", (24, 28)),
(",", (28, 29)),
("thank", (30, 35)),
("you", (36, 39)),
(".", (39, 40)),
]
# START combine_pre_tokenizer
from tokenizers import pre_tokenizers
from tokenizers.pre_tokenizers import Digits
pre_tokenizer = pre_tokenizers.Sequence([Whitespace(), Digits(individual_digits=True)])
pre_tokenizer.pre_tokenize_str("Call 911!")
# [("Call", (0, 4)), ("9", (5, 6)), ("1", (6, 7)), ("1", (7, 8)), ("!", (8, 9))]
# END combine_pre_tokenizer
assert pre_tokenizer.pre_tokenize_str("Call 911!") == [
("Call", (0, 4)),
("9", (5, 6)),
("1", (6, 7)),
("1", (7, 8)),
("!", (8, 9)),
]
# START replace_pre_tokenizer
tokenizer.pre_tokenizer = pre_tokenizer
# END replace_pre_tokenizer
# START setup_processor
from tokenizers.processors import TemplateProcessing
tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[("[CLS]", 1), ("[SEP]", 2)],
)
# END setup_processor
# START test_decoding
output = tokenizer.encode("Hello, y'all! How are you 😁 ?")
print(output.ids)
# [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]
tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2])
# "Hello , y ' all ! How are you ?"
# END test_decoding
assert output.ids == [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]
assert (
tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2])
== "Hello , y ' all ! How are you ?"
)
@staticmethod
def slow_train():
# START bert_setup_tokenizer
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
bert_tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
# END bert_setup_tokenizer
# START bert_setup_normalizer
from tokenizers import normalizers
from tokenizers.normalizers import NFD, Lowercase, StripAccents
bert_tokenizer.normalizer = normalizers.Sequence([NFD(), Lowercase(), StripAccents()])
# END bert_setup_normalizer
# START bert_setup_pre_tokenizer
from tokenizers.pre_tokenizers import Whitespace
bert_tokenizer.pre_tokenizer = Whitespace()
# END bert_setup_pre_tokenizer
# START bert_setup_processor
from tokenizers.processors import TemplateProcessing
bert_tokenizer.post_processor = TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[
("[CLS]", 1),
("[SEP]", 2),
],
)
# END bert_setup_processor
# START bert_train_tokenizer
from tokenizers.trainers import WordPieceTrainer
trainer = WordPieceTrainer(vocab_size=30522, special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]]
bert_tokenizer.train(files, trainer)
bert_tokenizer.save("data/bert-wiki.json")
# END bert_train_tokenizer
def test_bert_example(self, doc_pipeline_bert_tokenizer):
try:
bert_tokenizer = Tokenizer.from_file("data/bert-wiki.json")
except Exception:
bert_tokenizer = Tokenizer.from_file(doc_pipeline_bert_tokenizer)
# START bert_test_decoding
output = bert_tokenizer.encode("Welcome to the 🤗 Tokenizers library.")
print(output.tokens)
# ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"]
bert_tokenizer.decode(output.ids)
# "welcome to the tok ##eni ##zer ##s library ."
# END bert_test_decoding
assert bert_tokenizer.decode(output.ids) == "welcome to the tok ##eni ##zer ##s library ."
# START bert_proper_decoding
from tokenizers import decoders
bert_tokenizer.decoder = decoders.WordPiece()
bert_tokenizer.decode(output.ids)
# "welcome to the tokenizers library."
# END bert_proper_decoding
assert bert_tokenizer.decode(output.ids) == "welcome to the tokenizers library."
if __name__ == "__main__":
import os
from urllib import request
from zipfile import ZipFile
disable_printing = False
if not os.path.isdir("data/wikitext-103-raw"):
print("Downloading wikitext-103...")
wiki_text, _ = request.urlretrieve(
"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip"
)
with ZipFile(wiki_text, "r") as z:
print("Unzipping in data...")
z.extractall("data")
print("Now training...")
TestPipeline.slow_train()
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/implementations/test_bert_wordpiece.py | import pytest
from tokenizers import BertWordPieceTokenizer
from ..utils import bert_files, data_dir, multiprocessing_with_parallelism
class TestBertWordPieceTokenizer:
def test_basic_encode(self, bert_files):
tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])
# Encode with special tokens by default
output = tokenizer.encode("My name is John", "pair")
assert output.ids == [101, 2026, 2171, 2003, 2198, 102, 3940, 102]
assert output.tokens == [
"[CLS]",
"my",
"name",
"is",
"john",
"[SEP]",
"pair",
"[SEP]",
]
assert output.offsets == [
(0, 0),
(0, 2),
(3, 7),
(8, 10),
(11, 15),
(0, 0),
(0, 4),
(0, 0),
]
assert output.type_ids == [0, 0, 0, 0, 0, 0, 1, 1]
# Can encode without the special tokens
output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
assert output.ids == [2026, 2171, 2003, 2198, 3940]
assert output.tokens == ["my", "name", "is", "john", "pair"]
assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)]
assert output.type_ids == [0, 0, 0, 0, 1]
def test_multiprocessing_with_parallelism(self, bert_files):
tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = BertWordPieceTokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["a", "sentence"]
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/implementations/test_base_tokenizer.py | import pytest
from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, processors
from tokenizers.implementations import BaseTokenizer
class TestBaseTokenizer:
def test_get_set_components(self):
toki = Tokenizer(models.BPE())
toki.normalizer = normalizers.NFC()
toki.pre_tokenizer = pre_tokenizers.ByteLevel()
toki.post_processor = processors.BertProcessing(("A", 0), ("B", 1))
toki.decoder = decoders.ByteLevel()
tokenizer = BaseTokenizer(toki)
assert isinstance(tokenizer.model, models.BPE)
assert isinstance(tokenizer.normalizer, normalizers.NFC)
assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.ByteLevel)
assert isinstance(tokenizer.post_processor, processors.BertProcessing)
assert isinstance(tokenizer.decoder, decoders.ByteLevel)
tokenizer.model = models.Unigram()
assert isinstance(tokenizer.model, models.Unigram)
tokenizer.normalizer = normalizers.NFD()
assert isinstance(tokenizer.normalizer, normalizers.NFD)
tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.Whitespace)
tokenizer.post_processor = processors.ByteLevel()
assert isinstance(tokenizer.post_processor, processors.ByteLevel)
tokenizer.decoder = decoders.WordPiece()
assert isinstance(tokenizer.decoder, decoders.WordPiece)
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/implementations/test_char_bpe.py | import pytest
from tokenizers import CharBPETokenizer
from ..utils import data_dir, multiprocessing_with_parallelism, openai_files
class TestCharBPETokenizer:
def test_basic_encode(self, openai_files):
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
output = tokenizer.encode("My name is John", "pair")
assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688]
assert output.tokens == [
"<unk>",
"y</w>",
"name</w>",
"is</w>",
"<unk>",
"o",
"hn</w>",
"pair</w>",
]
assert output.offsets == [
(0, 1),
(1, 2),
(3, 7),
(8, 10),
(11, 12),
(12, 13),
(13, 15),
(0, 4),
]
assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 1]
def test_lowercase(self, openai_files):
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True)
output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
assert output.ids == [547, 1362, 544, 2476, 2688]
assert output.tokens == ["my</w>", "name</w>", "is</w>", "john</w>", "pair</w>"]
assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)]
assert output.type_ids == [0, 0, 0, 0, 1]
def test_decoding(self, openai_files):
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True)
decoded = tokenizer.decode(tokenizer.encode("my name is john").ids)
assert decoded == "my name is john"
def test_multiprocessing_with_parallelism(self, openai_files):
tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"])
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = CharBPETokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["A</w>", "sentence</w>"]
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/implementations/test_sentencepiece.py | import os
import pytest
from tokenizers import SentencePieceBPETokenizer, SentencePieceUnigramTokenizer
class TestSentencePieceBPE:
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = SentencePieceBPETokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["▁A", "▁sentence"]
class TestSentencePieceUnigram:
def test_train(self, tmpdir):
p = tmpdir.mkdir("tmpdir").join("file.txt")
p.write("A first sentence\nAnother sentence\nAnd a last one")
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train(files=str(p), show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e"]
with pytest.raises(Exception) as excinfo:
_ = tokenizer.encode("A sentence 🤗")
assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing"
def test_train_with_unk_token(self, tmpdir):
p = tmpdir.mkdir("tmpdir").join("file.txt")
p.write("A first sentence\nAnother sentence\nAnd a last one")
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train(files=str(p), show_progress=False, special_tokens=["<unk>"], unk_token="<unk>")
output = tokenizer.encode("A sentence 🤗")
assert output.ids[-1] == 0
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e", "▁", "🤗"]
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e"]
with pytest.raises(Exception) as excinfo:
_ = tokenizer.encode("A sentence 🤗")
assert str(excinfo.value) == "Encountered an unknown token but `unk_id` is missing"
def test_train_from_iterator_with_unk_token(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train_from_iterator(
text, vocab_size=100, show_progress=False, special_tokens=["<unk>"], unk_token="<unk>"
)
output = tokenizer.encode("A sentence 🤗")
assert output.ids[-1] == 0
assert output.tokens == ["▁A", "▁", "s", "en", "t", "en", "c", "e", "▁", "🤗"]
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/implementations/test_byte_level_bpe.py | import pytest
from tokenizers import ByteLevelBPETokenizer
from ..utils import data_dir, multiprocessing_with_parallelism, roberta_files
class TestByteLevelBPE:
def test_basic_encode(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
assert output.ids == [133, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
assert output.tokens == [
"The",
"Ġquick",
"Ġbrown",
"Ġfox",
"Ġjumps",
"Ġover",
"Ġthe",
"Ġlazy",
"Ġdog",
]
assert output.offsets == [
(0, 3),
(3, 9),
(9, 15),
(15, 19),
(19, 25),
(25, 30),
(30, 34),
(34, 39),
(39, 43),
]
def test_add_prefix_space(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(
roberta_files["vocab"], roberta_files["merges"], add_prefix_space=True
)
output = tokenizer.encode("The quick brown fox jumps over the lazy dog")
assert output.ids == [20, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
assert output.tokens == [
"ĠThe",
"Ġquick",
"Ġbrown",
"Ġfox",
"Ġjumps",
"Ġover",
"Ġthe",
"Ġlazy",
"Ġdog",
]
assert output.offsets == [
(0, 3),
(3, 9),
(9, 15),
(15, 19),
(19, 25),
(25, 30),
(30, 34),
(34, 39),
(39, 43),
]
def test_lowerspace(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(
roberta_files["vocab"],
roberta_files["merges"],
add_prefix_space=True,
lowercase=True,
)
output = tokenizer.encode("The Quick Brown Fox Jumps Over The Lazy Dog")
assert output.ids == [5, 2119, 6219, 23602, 13855, 81, 5, 22414, 2335]
assert output.tokens == [
"Ġthe",
"Ġquick",
"Ġbrown",
"Ġfox",
"Ġjumps",
"Ġover",
"Ġthe",
"Ġlazy",
"Ġdog",
]
def test_multiprocessing_with_parallelism(self, roberta_files):
tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"])
multiprocessing_with_parallelism(tokenizer, False)
multiprocessing_with_parallelism(tokenizer, True)
def test_train_from_iterator(self):
text = ["A first sentence", "Another sentence", "And a last one"]
tokenizer = ByteLevelBPETokenizer()
tokenizer.train_from_iterator(text, show_progress=False)
output = tokenizer.encode("A sentence")
assert output.tokens == ["A", "Ġsentence"]
| 0 |
hf_public_repos/tokenizers/bindings/python/tests | hf_public_repos/tokenizers/bindings/python/tests/bindings/test_processors.py | import json
import pickle
import pytest
from tokenizers import Tokenizer
from tokenizers.models import BPE
from tokenizers.pre_tokenizers import ByteLevel as ByteLevelPreTokenizer
from tokenizers.processors import (
BertProcessing,
ByteLevel,
PostProcessor,
RobertaProcessing,
Sequence,
TemplateProcessing,
)
from ..utils import data_dir, roberta_files
class TestBertProcessing:
def test_instantiate(self):
processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1))
assert processor is not None
assert isinstance(processor, PostProcessor)
assert isinstance(processor, BertProcessing)
assert isinstance(
pickle.loads(pickle.dumps(BertProcessing(("[SEP]", 0), ("[CLS]", 1)))),
BertProcessing,
)
def test_processing(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_special_tokens(["[SEP]", "[CLS]"])
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1))
output = tokenizer.encode("my name", "pair")
assert output.tokens == ["[CLS]", "my", "name", "[SEP]", "pair", "[SEP]"]
assert output.ids == [1, 2, 3, 0, 6, 0]
class TestRobertaProcessing:
def test_instantiate(self):
processor = RobertaProcessing(("</s>", 1), ("<s>", 0))
assert processor is not None
assert isinstance(processor, PostProcessor)
assert isinstance(processor, RobertaProcessing)
assert isinstance(
pickle.loads(pickle.dumps(RobertaProcessing(("</s>", 1), ("<s>", 0)))),
RobertaProcessing,
)
def test_processing(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_special_tokens(["<s>", "</s>"])
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0))
output = tokenizer.encode("my name", "pair")
assert output.tokens == ["<s>", "my", "name", "</s>", "</s>", "pair", "</s>"]
assert output.ids == [0, 2, 3, 1, 1, 6, 1]
class TestByteLevelProcessing:
def test_instantiate(self):
assert ByteLevel() is not None
assert ByteLevel(trim_offsets=True) is not None
assert isinstance(ByteLevel(), PostProcessor)
assert isinstance(ByteLevel(), ByteLevel)
assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel)
def test_processing(self, roberta_files):
# Deprecated in 0.9
with pytest.deprecated_call():
tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"]))
tokenizer.pre_tokenizer = ByteLevelPreTokenizer(add_prefix_space=True)
# Keeps original offsets
output = tokenizer.encode("My name is John")
assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"]
assert output.offsets == [(0, 2), (2, 7), (7, 10), (10, 15)]
# Trims offsets when activated
tokenizer.post_processor = ByteLevel(trim_offsets=True)
output = tokenizer.encode("My name is John")
assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"]
assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15)]
def test_manual_reload(self):
byte_level = ByteLevel()
state = json.loads(byte_level.__getstate__())
reloaded = ByteLevel(**state)
assert isinstance(reloaded, ByteLevel)
class TestTemplateProcessing:
def get_bert(self):
return TemplateProcessing(
single=["[CLS]", "$0", "[SEP]"],
pair=["[CLS]", "$A", "[SEP]", "$B:1", "[SEP]:1"],
special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
)
def get_roberta(self):
return TemplateProcessing(
single="<s> $0 </s>",
pair="<s> $A </s> </s> $B </s>",
special_tokens=[("<s>", 0), ("</s>", 1)],
)
def get_t5_squad(self):
# >>> from transformers import AutoTokenizer
# >>> tok = AutoTokenizer.from_pretrained("t5-small")
# >>> tok.tokenize("question: ")
# ['▁question', ':']
# >>> tok.tokenize("context: ")
# ['▁context', ':']
# >>> tok.encode("context: ")
# [2625, 10]
# >>> tok.encode("question: ")
# [822, 10]
return TemplateProcessing(
single=["$0"],
pair=["Q", "$A", "C", "$B"],
special_tokens=[
{
"id": "Q",
"ids": [2625, 10],
"tokens": ["_question", ":"],
},
{
"id": "C",
"ids": [822, 10],
"tokens": ["_context", ":"],
},
],
)
def test_instantiate(self):
bert = self.get_bert()
assert bert is not None
assert isinstance(bert, PostProcessor)
assert isinstance(bert, TemplateProcessing)
assert isinstance(pickle.loads(pickle.dumps(bert)), TemplateProcessing)
# It is absolutely legal to have tokens with spaces in the name:
processor = TemplateProcessing(
single=["[ C L S ]", "Token with space"],
special_tokens=[("[ C L S ]", 0), ("Token with space", 1)],
)
# Sequence identifiers must be well formed:
with pytest.raises(Exception, match="Cannot build Piece"):
processor = TemplateProcessing(single="[CLS] $$ [SEP]")
with pytest.raises(Exception, match="Cannot build Piece"):
processor = TemplateProcessing(single="[CLS] $A: [SEP]")
# Special tokens must be provided when used in template:
with pytest.raises(Exception, match="Missing SpecialToken\\(s\\) with id\\(s\\)"):
processor = TemplateProcessing(single=["[CLS]"])
def test_bert_parity(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_special_tokens(["[SEP]", "[CLS]"])
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1))
original = tokenizer.encode("my name", "pair")
tokenizer.post_processor = self.get_bert()
template = tokenizer.encode("my name", "pair")
assert original.ids == template.ids
def test_roberta_parity(self):
tokenizer = Tokenizer(BPE())
tokenizer.add_special_tokens(["<s>", "</s>"])
tokenizer.add_tokens(["my", "name", "is", "john", "pair"])
tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0))
original = tokenizer.encode("my name is john", "pair")
tokenizer.post_processor = self.get_roberta()
template = tokenizer.encode("my name is john", "pair")
assert original.ids == template.ids
class TestSequenceProcessing:
def test_sequence_processing(self):
assert Sequence([]) is not None
assert Sequence([ByteLevel()]) is not None
assert isinstance(Sequence([]), PostProcessor)
assert isinstance(Sequence([]), Sequence)
serialized = pickle.dumps(Sequence([]))
assert isinstance(pickle.loads(serialized), Sequence)
def test_post_process(self):
byte_level = ByteLevel(trim_offsets=True)
template = TemplateProcessing(
single=["[CLS]", "$0", "[SEP]"],
pair=["[CLS]:0", "$A", "[SEP]:0", "$B:1", "[SEP]:1"],
special_tokens=[("[CLS]", 1), ("[SEP]", 0)],
)
tokenizer = Tokenizer(BPE())
tokenizer.add_special_tokens(["[SEP]", "[CLS]"])
tokenizer.add_tokens(["my", "name", "is", "Ġjohn", "pair"])
tokenizer.post_processor = template
# Before the sequence
original = tokenizer.encode("my name is Ġjohn")
assert original.ids == [1, 2, 3, 4, 5, 0]
assert original.type_ids == [0, 0, 0, 0, 0, 0]
assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0)]
pair = tokenizer.encode("my name is Ġjohn", "pair")
# assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0]
assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1]
assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0), (0, 4), (0, 0)]
processor = Sequence([byte_level, template])
tokenizer.post_processor = processor
original = tokenizer.encode("my name is Ġjohn")
assert original.ids == [1, 2, 3, 4, 5, 0]
assert original.type_ids == [0, 0, 0, 0, 0, 0]
# Offsets ARE trimmed
assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0)]
pair = tokenizer.encode("my name is Ġjohn", "pair")
# assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0]
assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1]
assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0), (0, 4), (0, 0)]
| 0 |