cakiki commited on
Commit
de6faf1
1 Parent(s): 8046cf8

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +73 -1
README.md CHANGED
@@ -16,6 +16,78 @@ dataset_info:
16
  download_size: 112131877
17
  dataset_size: 247037602
18
  ---
 
19
  # Dataset Card for "roots-viz-data"
20
 
21
- [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  download_size: 112131877
17
  dataset_size: 247037602
18
  ---
19
+
20
  # Dataset Card for "roots-viz-data"
21
 
22
+ ```python
23
+ import os
24
+ import numpy as np
25
+ import pandas as pd
26
+ from sklearn.feature_extraction.text import TfidfTransformer
27
+ from sklearn.decomposition import TruncatedSVD
28
+ from tqdm.notebook import tqdm
29
+ from openTSNE import TSNE
30
+ import datashader as ds
31
+ import colorcet as cc
32
+
33
+ import vectorizers
34
+ from vectorizers.transformers import CountFeatureCompressionTransformer, InformationWeightTransformer
35
+
36
+ from dask.distributed import Client, LocalCluster
37
+ import dask.dataframe as dd
38
+ import dask_ml.feature_extraction.text
39
+ import dask.bag as db
40
+
41
+ from transformers import AutoTokenizer, AutoModel
42
+ from huggingface_hub import notebook_login, HfApi, hf_hub_download, Repository
43
+ from datasets import load_dataset
44
+ from datasets.utils.py_utils import convert_file_size_to_int
45
+
46
+ def batch_tokenize(batch):
47
+ return {'tokenized': [' '.join(e.tokens) for e in tokenizer(batch['text']).encodings]}
48
+
49
+ dset = dset.map(batch_tokenize, batched=True, batch_size=64, num_proc=28)
50
+
51
+ max_shard_size = convert_file_size_to_int('300MB')
52
+ dataset_nbytes = dset.data.nbytes
53
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
54
+ num_shards = max(num_shards, 1)
55
+ print(f"Sharding into {num_shards} files.")
56
+ os.makedirs(f"{dset_name}/tokenized", exist_ok=True)
57
+ for shard_index in tqdm(range(num_shards)):
58
+ shard = dset.shard(num_shards=num_shards, index=shard_index, contiguous=True)
59
+ shard.to_parquet(f"{dset_name}/tokenized/tokenized-{shard_index:03d}.parquet")
60
+
61
+ client = Client()
62
+ client
63
+
64
+ df = dd.read_parquet(f'{dset_name}/tokenized/')
65
+ vect = dask_ml.feature_extraction.text.CountVectorizer(tokenizer=str.split,
66
+ token_pattern=None,
67
+ vocabulary=vocab)
68
+ tokenized_bag = df['tokenized'].to_bag()
69
+ X = vect.transform(tokenized_bag)
70
+
71
+ counts = X.compute()
72
+ client.shutdown()
73
+
74
+ tfidf_transformer = TfidfTransformer(sublinear_tf=True, norm="l2")
75
+ tfidf = tfidf_transformer.fit_transform(counts)
76
+
77
+ svd = TruncatedSVD(n_components=160)
78
+ X_svd = svd.fit_transform(tfidf)
79
+
80
+ tsne = TSNE(
81
+ perplexity=30,
82
+ n_jobs=28,
83
+ random_state=42,
84
+ verbose=True,
85
+ )
86
+
87
+ tsne_embedding = tsne.fit(X)
88
+
89
+ df = pd.DataFrame(data=tsne_embedding, columns=['x','y'])
90
+ agg = ds.Canvas(plot_height=600, plot_width=600).points(df, 'x', 'y')
91
+ img = ds.tf.shade(agg, cmap=cc.fire, how='eq_hist')
92
+ ds.tf.set_background(img, "black")
93
+ ```