Datasets:

Modalities:
Tabular
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 2,704 Bytes
e39fe4e
 
 
 
 
 
 
 
 
 
 
 
 
8046cf8
e39fe4e
8046cf8
 
e39fe4e
de6faf1
e39fe4e
 
de6faf1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
---
dataset_info:
  features:
  - name: x
    dtype: float64
  - name: y
    dtype: float64
  - name: language
    dtype: string
  - name: corpus
    dtype: string
  splits:
  - name: train
    num_bytes: 247037602
    num_examples: 5785741
  download_size: 112131877
  dataset_size: 247037602
---

# Dataset Card for "roots-viz-data"

```python
import os
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import TruncatedSVD
from tqdm.notebook import tqdm
from openTSNE import TSNE
import datashader as ds
import colorcet as cc

import vectorizers
from vectorizers.transformers import CountFeatureCompressionTransformer, InformationWeightTransformer

from dask.distributed import Client, LocalCluster
import dask.dataframe as dd
import dask_ml.feature_extraction.text
import dask.bag as db

from transformers import AutoTokenizer, AutoModel
from huggingface_hub import notebook_login, HfApi, hf_hub_download, Repository
from datasets import load_dataset
from datasets.utils.py_utils import convert_file_size_to_int

def batch_tokenize(batch):
    return {'tokenized': [' '.join(e.tokens) for e in tokenizer(batch['text']).encodings]}

dset = dset.map(batch_tokenize, batched=True, batch_size=64, num_proc=28)

max_shard_size = convert_file_size_to_int('300MB')
dataset_nbytes = dset.data.nbytes
num_shards = int(dataset_nbytes / max_shard_size) + 1
num_shards = max(num_shards, 1)
print(f"Sharding into {num_shards} files.")
os.makedirs(f"{dset_name}/tokenized", exist_ok=True)
for shard_index in tqdm(range(num_shards)):
    shard = dset.shard(num_shards=num_shards, index=shard_index, contiguous=True)
    shard.to_parquet(f"{dset_name}/tokenized/tokenized-{shard_index:03d}.parquet")

client = Client()
client

df = dd.read_parquet(f'{dset_name}/tokenized/')
vect = dask_ml.feature_extraction.text.CountVectorizer(tokenizer=str.split,
                                                       token_pattern=None,
                                                       vocabulary=vocab)
tokenized_bag = df['tokenized'].to_bag()
X = vect.transform(tokenized_bag)

counts = X.compute()
client.shutdown()

tfidf_transformer = TfidfTransformer(sublinear_tf=True, norm="l2")
tfidf = tfidf_transformer.fit_transform(counts)

svd = TruncatedSVD(n_components=160)
X_svd = svd.fit_transform(tfidf)

tsne = TSNE(
    perplexity=30,
    n_jobs=28,
    random_state=42,
    verbose=True,
)

tsne_embedding = tsne.fit(X)

df = pd.DataFrame(data=tsne_embedding, columns=['x','y'])
agg = ds.Canvas(plot_height=600, plot_width=600).points(df, 'x', 'y')
img = ds.tf.shade(agg, cmap=cc.fire, how='eq_hist')
ds.tf.set_background(img, "black")
```