Reshinth Adithyan commited on
Commit
d79b272
1 Parent(s): f8a2041
Files changed (1) hide show
  1. app.py +57 -3
app.py CHANGED
@@ -1,3 +1,57 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:de22bb7aaf923e4b64e060fe5a80fc664f2094abdf204dc0ca37258ceb0e9806
3
- size 2496
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import datasets
3
+ import os
4
+ import json
5
+ from transformers import AutoTokenizer
6
+ import ast
7
+
8
+ CACHE_DIR = "cache_ds/" #Use this to build the dataset
9
+ contribution_json = "contributors.json"
10
+
11
+ contribution_dict = json.load(open(contribution_json,"r"))
12
+
13
+
14
+ splits = ['EuroParliamentProceedings', 'TED2020', 'PileOfLaw', 'StackExchange_ver2', 'GithubIssues', 'Opensubtitles', 'USPTO', 'S2ORC', 'DevDocs', 'CodePileReddit2022', 'DMMath', 'Gutenberg', 'USENET', 'GithubDiff', 'Enwiki', 'GNOME', 'ASFPublicMail', 'PileV2Reddit2020', 'CodePilePosts', 'Discourse', 'Tanzil', 'arXiv', 'UbuntuIRC', 'PubMed', 'CodePileReddit2020', 'CodePileReddit2021', 'GlobalVoices', 'FreeLaw_Options', 'PileV2Posts','Bible']
15
+
16
+ cached_ds = os.listdir(CACHE_DIR)
17
+ tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b')
18
+
19
+
20
+ def load_page(split):
21
+ with st.spinner('Downloading and buidling dataset...'):
22
+ if split not in cached_ds:
23
+ ds = datasets.load_dataset('CarperAI/pile-v2-small-filtered',"train", data_files="data/"+split+"/data.json")
24
+ else:
25
+ ds = datasets.load_from_disk(CACHE_DIR+split)
26
+ print("Sucessfully loaded "+split)
27
+ st.title("Dataset Explorer")
28
+ st.write(f"# {split}")
29
+ st.caption(f"Contributors: {','.join(contribution_dict[split])}")
30
+ with st.form("dataset_form"):
31
+ index = st.slider('Select a row', 0, len(ds['train'])-1, 0)
32
+ if st.form_submit_button("Load"):
33
+ st.write(f"Row {index}")
34
+ data = ds[index]
35
+ content = data["text"]
36
+ meta = data["meta"]
37
+ with st.expander("Render Content"):
38
+ st.write(content)
39
+ st.write("### Content:")
40
+ st.text(content)
41
+ st.write("### Meta:")
42
+ st.write(ast.literal_eval(meta))
43
+ tokenized = tokenizer(content, return_length=True)['length'][0]
44
+ token_count_metric = st.metric("Token Count",value=tokenized,delta=2048-tokenized)
45
+
46
+
47
+
48
+ demo_name = st.sidebar.selectbox("Choose a demo", splits)
49
+ load_page(demo_name)
50
+ # st.write(f"Loaded {ds} with {len(dataset['train'])} rows")
51
+
52
+ # st.sidebar.title('Pile v2 Explorer')
53
+ # split = st.sidebar.selectbox('Select a split', splits)
54
+ # st.sidebar.write('You selected:', split)
55
+ # dataset = datasets.load_dataset('CarperAI/pile-v2-small-filtered', data_dir="data/"+split+"/data.json")
56
+ # index = st.sidebar.slider('Select a row', 0, len(dataset['train'])-1, 0)
57
+ # st.write(dataset['train'][index]['text'])