Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Languages:
Thai
Size:
100K - 1M
Tags:
word-tokenization
License:
add dataset_info in dataset metadata
Browse files
README.md
CHANGED
@@ -20,6 +20,44 @@ paperswithcode_id: null
|
|
20 |
pretty_name: best2009
|
21 |
tags:
|
22 |
- word-tokenization
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
---
|
24 |
|
25 |
# Dataset Card for `best2009`
|
|
|
20 |
pretty_name: best2009
|
21 |
tags:
|
22 |
- word-tokenization
|
23 |
+
dataset_info:
|
24 |
+
features:
|
25 |
+
- name: fname
|
26 |
+
dtype: string
|
27 |
+
- name: char
|
28 |
+
sequence: string
|
29 |
+
- name: char_type
|
30 |
+
sequence:
|
31 |
+
class_label:
|
32 |
+
names:
|
33 |
+
0: b_e
|
34 |
+
1: c
|
35 |
+
2: d
|
36 |
+
3: n
|
37 |
+
4: o
|
38 |
+
5: p
|
39 |
+
6: q
|
40 |
+
7: s
|
41 |
+
8: s_e
|
42 |
+
9: t
|
43 |
+
10: v
|
44 |
+
11: w
|
45 |
+
- name: is_beginning
|
46 |
+
sequence:
|
47 |
+
class_label:
|
48 |
+
names:
|
49 |
+
0: neg
|
50 |
+
1: pos
|
51 |
+
config_name: best2009
|
52 |
+
splits:
|
53 |
+
- name: test
|
54 |
+
num_bytes: 10498726
|
55 |
+
num_examples: 2252
|
56 |
+
- name: train
|
57 |
+
num_bytes: 483129998
|
58 |
+
num_examples: 148995
|
59 |
+
download_size: 13891260
|
60 |
+
dataset_size: 493628724
|
61 |
---
|
62 |
|
63 |
# Dataset Card for `best2009`
|