Alex Tyshka commited on
Commit
cc5ee73
1 Parent(s): 37ea192

Initial commit

Browse files
Files changed (3) hide show
  1. app.py +66 -0
  2. model.pkl +3 -0
  3. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import torch
3
+ import numpy as np
4
+ import gradio as gr
5
+ from nltk import word_tokenize, sent_tokenize
6
+ from scipy.stats import shapiro
7
+ from transformers import GPT2LMHeadModel, GPT2TokenizerFast
8
+
9
+ model = GPT2LMHeadModel.from_pretrained('gpt2-large').to('cuda')
10
+ tokenizer: GPT2TokenizerFast = GPT2TokenizerFast.from_pretrained('gpt2-large')
11
+
12
+ with open('model.pkl', 'rb') as f:
13
+ lr_model = pickle.load(f)
14
+
15
+ def get_perplexity(text: str):
16
+ tokens = tokenizer(text, return_tensors='pt', truncation=True, return_offsets_mapping=True)
17
+ inputs = tokens.input_ids.to('cuda')
18
+ targets = inputs.clone()
19
+ with torch.no_grad():
20
+ outputs = model(inputs, labels=targets)
21
+ labels = targets.to(outputs.logits.device)
22
+ # Shift so that tokens < n predict n
23
+ shift_logits = outputs.logits[..., :-1, :].contiguous()
24
+ shift_labels = labels[..., 1:].contiguous()
25
+ perplexities = torch.nn.functional.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), reduce=False)
26
+ output = []
27
+ targets = targets.to('cpu')[0].tolist()
28
+ # tokens = tokenizer.convert_ids_to_tokens(targets)
29
+ offsets = tokens.offset_mapping[0].tolist()
30
+ print(perplexities.to('cpu').tolist())
31
+ perplexities = perplexities.to('cpu').numpy()
32
+ perplexities = perplexities / np.max(perplexities)
33
+ perplexities = perplexities.tolist()
34
+ print(perplexities)
35
+ # output.append((text[:offsets[0][1]], 0))
36
+ # for offset, p in zip(offsets[1:], perplexities):
37
+ # output.append((text[offset[0]:offset[1]], p))
38
+ # print(type(p))
39
+ output.append((text[:tokens.word_to_chars(0)[1]], 0))
40
+ for word_id, p in zip(tokens.word_ids()[1:], perplexities):
41
+ if word_id == len(output):
42
+ span = tokens.word_to_chars(word_id)
43
+ output.append((text[span[0]:span[1]], p))
44
+ return outputs.loss, output
45
+
46
+
47
+
48
+ def score_text(text):
49
+ perplexity, word_perplexities = get_perplexity(text)
50
+ lengths = []
51
+ for sentence in sent_tokenize(text):
52
+ lengths.append(len(word_tokenize(sentence)))
53
+ scores = lr_model.predict_proba([[perplexity.item(), np.mean(lengths), np.std(lengths), shapiro(lengths).pvalue if len(lengths) > 2 else 0.5]])[0]
54
+
55
+ return {'Human': scores[0], 'AI': scores[1]}, word_perplexities
56
+
57
+ sample_text = """
58
+ The Saturn V is a type of rocket that was developed by NASA in the 1960s to support the Apollo program, which aimed to land humans on the Moon.
59
+ It remains the most powerful rocket ever built, and its five F-1 engines generated more than 7.5 million pounds of thrust at liftoff.
60
+ The Saturn V was used for all of the Apollo missions to the Moon, as well as the launch of the Skylab space station.
61
+ Despite its impressive capabilities, the Saturn V was only used for a brief period of time before being retired in 1973.
62
+ Nevertheless, it remains a landmark achievement in the history of space exploration and a symbol of human ingenuity and determination."""
63
+
64
+ demo = gr.Interface(fn=score_text, inputs=[gr.Textbox(label="Text to score", lines=5, value=sample_text)], outputs=[gr.Label(), gr.HighlightedText()] )
65
+
66
+ demo.launch()
model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fd5c62c983e46a73eadb4ce7f1d772799f3a2fc9f62c41d7d50beba93fc9140
3
+ size 706
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ datasets
2
+ transformers
3
+ torch
4
+ nltk
5
+ scipy