Noah Shinn
commited on
Commit
•
71dfae6
1
Parent(s):
cd44703
rm script
Browse files
test.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
-
import pandas as pd
|
3 |
-
import matplotlib.pyplot as plt
|
4 |
-
|
5 |
-
df1 = pd.read_parquet('data/train-pairs.parquet')
|
6 |
-
df2 = pd.read_parquet('data/test-pairs.parquet')
|
7 |
-
|
8 |
-
tokenizer = AutoTokenizer.from_pretrained('bigcode/santacoder')
|
9 |
-
|
10 |
-
df = pd.concat([df1, df2], axis=0)
|
11 |
-
df['tokens'] = df['declarations'].apply(lambda x: tokenizer.tokenize(x))
|
12 |
-
|
13 |
-
mean_tokens = df['tokens'].apply(lambda x: len(x)).mean()
|
14 |
-
max_tokens = df['tokens'].apply(lambda x: len(x)).max()
|
15 |
-
min_tokens = df['tokens'].apply(lambda x: len(x)).min()
|
16 |
-
num_long_items_2048 = df[df['tokens'].apply(lambda x: len(x) > 2048)].shape[0]
|
17 |
-
proportion_2048 = num_long_items_2048 / df.shape[0]
|
18 |
-
num_long_items_256 = df[df['tokens'].apply(lambda x: len(x) > 256)].shape[0]
|
19 |
-
proportion_256 = num_long_items_256 / df.shape[0]
|
20 |
-
|
21 |
-
plt.hist(df['tokens'].apply(lambda x: len(x)), bins=25, range=(0, 2048))
|
22 |
-
plt.xlim(0, 2048)
|
23 |
-
plt.xlabel('Number of Tokens')
|
24 |
-
plt.ylabel('Count')
|
25 |
-
plt.title('Distribution of Type Declaration Num Tokens')
|
26 |
-
|
27 |
-
# Add a label to the plot with the mean, max, min, and proportion
|
28 |
-
label1 = f"Mean: {mean_tokens:.2f}\nMax: {max_tokens}\nMin: {min_tokens}\nProportion > 256: {proportion_256:.2f}\nProportion > 2048: {proportion_2048:.2f}"
|
29 |
-
plt.gca().text(0.95, 0.95, label1, transform=plt.gca().transAxes, fontsize=14, verticalalignment='top', horizontalalignment='right')
|
30 |
-
|
31 |
-
plt.savefig('declaration_token_distr.png')
|
32 |
-
plt.show()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|