kaitehtzeng commited on
Commit
cd8f06d
1 Parent(s): 3e9f9c0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +313 -0
app.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """After model-fitting
3
+ Automatically generated by Colaboratory.
4
+ Original file is located at
5
+ https://colab.research.google.com/#fileId=https%3A//storage.googleapis.com/kaggle-colab-exported-notebooks/after-model-fitting-b220d687-d8e5-4eb5-aafd-6a7e94d72073.ipynb%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com/20240128/auto/storage/goog4_request%26X-Goog-Date%3D20240128T102031Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D31877cdd720f27bacaa0efcdbe500b0697792af355976ce5280054514cedfe1be4c17db45656212f46a080c0a7f0369fbd3d051fd9be4a1275e0ea4bd55be70f65a681f6868cda1616ea83b3c65a363b81d4f59b864aa1aa82188ce4bbfca0d326422ccfaf462a4a322a86e8d752e875e2c7940fde584e9a1f0e25847bb77ad8e0131724aaec47d49e4ab42a1d2be2199c9053a26a40f3bf2a31489822ec9bb6dd378bec74e97866da9613ee7c54c6ed2ce69eee5fe34ea90293cb546e4cb1f84b3fcc6563aea8318d70e68b71e43b6d85e04a20e01980dd0c94bb837aa81446d9ecfdad1d56cbc1c940670eba9cf9dc647a8972ac13c6af15a28da735db694f
6
+ """
7
+
8
+ # IMPORTANT: RUN THIS CELL IN ORDER TO IMPORT YOUR KAGGLE DATA SOURCES
9
+ # TO THE CORRECT LOCATION (/kaggle/input) IN YOUR NOTEBOOK,
10
+ # THEN FEEL FREE TO DELETE THIS CELL.
11
+ # NOTE: THIS NOTEBOOK ENVIRONMENT DIFFERS FROM KAGGLE'S PYTHON
12
+ # ENVIRONMENT SO THERE MAY BE MISSING LIBRARIES USED BY YOUR
13
+ # NOTEBOOK.
14
+
15
+ import os
16
+ import sys
17
+ from tempfile import NamedTemporaryFile
18
+ from urllib.request import urlopen
19
+ from urllib.parse import unquote, urlparse
20
+ from urllib.error import HTTPError
21
+ from zipfile import ZipFile
22
+ import tarfile
23
+ import shutil
24
+
25
+ CHUNK_SIZE = 40960
26
+ DATA_SOURCE_MAPPING = 'llm-detect-ai-generated-text:https%3A%2F%2Fstorage.googleapis.com%2Fkaggle-competitions-data%2Fkaggle-v2%2F61542%2F7516023%2Fbundle%2Farchive.zip%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com%252F20240128%252Fauto%252Fstorage%252Fgoog4_request%26X-Goog-Date%3D20240128T102030Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D038d55997cf8a860737caadb5837a5ebfaaf8477d4523afa1008387fe39c3a0c58c1ddc811284f559dbb78fd8e0f8230fca333e828951b69e5d935955b9163461cbd2f4f8b3f321dd0e73d767e2ef1a8ceb52512ef8f8d99fd19c92abf23c5a856ebd3d9ed4ee28b4c31b83427a7dc10052602e6d604e2c55f51d8e26da1e2dacb2e720476c3b874b22d5a03e8dde81374f227c87a024dea36e5973a7cabcccdcec804ba2fd73b5397d7d334be750de7ea9d4a2c2dcb12b93f4d75c18f063ebf02ff802e8912122dbd5b25695e7658bffc61997b9893958b304068a6e593653b14959b5355f4b8bb09d5d01768dda2839e271941fabfddf3cc5d8cbc5cd06746,argugpt:https%3A%2F%2Fstorage.googleapis.com%2Fkaggle-data-sets%2F3946973%2F6867914%2Fbundle%2Farchive.zip%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com%252F20240128%252Fauto%252Fstorage%252Fgoog4_request%26X-Goog-Date%3D20240128T102030Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D490ee9c880e3988ac2d0ceedc2936a72525b02e00898ca8feae1456ecdd6a542f952cedb096ce8474098bc29e06744cea2433b38c55accab1c9656f43d1baccccd2b36486e1075525b59c4f61326c5a819dc3f1bed35c76c73ef646f21d71bf8f3e8d7eb94e6c21068392293b9ba1e7fc8ac286eb68a727ac479118880aeff2c08f2e3e013aa0e888c099fb5a54a83920cebbf3ca011d818e66787427bfddf16de31a61552638a21cf583099a16a3cc660817297abdd494a926a3d58196778021bc6ea4b20d0923d7fb588d4857e95dce2979e3b246e6e282ef0b0fcabaecd2dd632c413f7f723e1178d080fc89fb31cd9a4564c84b11062fb9229d61d2dbf4e,daigt-proper-train-dataset:https%3A%2F%2Fstorage.googleapis.com%2Fkaggle-data-sets%2F3942644%2F6890527%2Fbundle%2Farchive.zip%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com%252F20240128%252Fauto%252Fstorage%252Fgoog4_request%26X-Goog-Date%3D20240128T102031Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D352a1df1e329069e50e0d64cb012986e5c75605e915c0b16383182a8618769c5ee4e3dd3f59448b11d64187657833f7f3f3e30c7c21fc343af2c51111074ea60e70e904833ef6a3aa4ad4b4864d89b924a3f063e71c41dbee1bdf1d453dc2cbe62e8819854b6e71040ca0014522e9651b9e8e6640c6caee259e981486a3ee0793ee7f56068c3d7efe66941530d2669bb8d3f989fe7b4056a81f76b0870fa2cf21cce8641b4f3e8c0b90fab4ef495464f2700bd99f20d4d94e86c11bc06301b1fc49a63bee1db180b733a12dc20b3b0f109c15b172c1cf0f91234176030f5c2241e7f646d99238ff63fc36ca1b0419463f38fe3bd477790b060c88c2bc9441ac0'
27
+
28
+ KAGGLE_INPUT_PATH='/kaggle/input'
29
+ KAGGLE_WORKING_PATH='/kaggle/working'
30
+ KAGGLE_SYMLINK='kaggle'
31
+
32
+ !umount /kaggle/input/ 2> /dev/null
33
+ shutil.rmtree('/kaggle/input', ignore_errors=True)
34
+ os.makedirs(KAGGLE_INPUT_PATH, 0o777, exist_ok=True)
35
+ os.makedirs(KAGGLE_WORKING_PATH, 0o777, exist_ok=True)
36
+
37
+ try:
38
+ os.symlink(KAGGLE_INPUT_PATH, os.path.join("..", 'input'), target_is_directory=True)
39
+ except FileExistsError:
40
+ pass
41
+ try:
42
+ os.symlink(KAGGLE_WORKING_PATH, os.path.join("..", 'working'), target_is_directory=True)
43
+ except FileExistsError:
44
+ pass
45
+
46
+ for data_source_mapping in DATA_SOURCE_MAPPING.split(','):
47
+ directory, download_url_encoded = data_source_mapping.split(':')
48
+ download_url = unquote(download_url_encoded)
49
+ filename = urlparse(download_url).path
50
+ destination_path = os.path.join(KAGGLE_INPUT_PATH, directory)
51
+ try:
52
+ with urlopen(download_url) as fileres, NamedTemporaryFile() as tfile:
53
+ total_length = fileres.headers['content-length']
54
+ print(f'Downloading {directory}, {total_length} bytes compressed')
55
+ dl = 0
56
+ data = fileres.read(CHUNK_SIZE)
57
+ while len(data) > 0:
58
+ dl += len(data)
59
+ tfile.write(data)
60
+ done = int(50 * dl / int(total_length))
61
+ sys.stdout.write(f"\r[{'=' * done}{' ' * (50-done)}] {dl} bytes downloaded")
62
+ sys.stdout.flush()
63
+ data = fileres.read(CHUNK_SIZE)
64
+ if filename.endswith('.zip'):
65
+ with ZipFile(tfile) as zfile:
66
+ zfile.extractall(destination_path)
67
+ else:
68
+ with tarfile.open(tfile.name) as tarfile:
69
+ tarfile.extractall(destination_path)
70
+ print(f'\nDownloaded and uncompressed: {directory}')
71
+ except HTTPError as e:
72
+ print(f'Failed to load (likely expired) {download_url} to path {destination_path}')
73
+ continue
74
+ except OSError as e:
75
+ print(f'Failed to load {download_url} to path {destination_path}')
76
+ continue
77
+
78
+ print('Data source import complete.')
79
+
80
+ # This Python 3 environment comes with many helpful analytics libraries installed
81
+ # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
82
+ # For example, here's several helpful packages to load
83
+
84
+ import numpy as np # linear algebra
85
+ import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
86
+
87
+ # Input data files are available in the read-only "../input/" directory
88
+ # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
89
+
90
+ import os
91
+ for dirname, _, filenames in os.walk('/kaggle/input'):
92
+ for filename in filenames:
93
+ print(os.path.join(dirname, filename))
94
+
95
+ # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
96
+ # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
97
+
98
+ !git clone https://huggingface.co/spaces/kaitehtzeng/primary_app
99
+
100
+ """## Import Necessary Library"""
101
+
102
+ import torch.nn.functional as F
103
+ from transformers import AutoModel
104
+ from transformers import AutoTokenizer
105
+ from tokenizers import Tokenizer, trainers, pre_tokenizers, models
106
+ from transformers import DebertaTokenizer
107
+ from sklearn.model_selection import train_test_split
108
+ import torch
109
+ import torch.nn as nn
110
+ import numpy as np
111
+ import pandas as pd
112
+ from tqdm.notebook import tqdm
113
+ import matplotlib.pyplot as plt
114
+ import nltk
115
+ from nltk.corpus import stopwords
116
+ from nltk.tokenize import word_tokenize
117
+ from nltk.tokenize.treebank import TreebankWordDetokenizer
118
+ from collections import Counter
119
+ #import spacy
120
+ import re
121
+ import gc
122
+ # ----------
123
+ import os
124
+
125
+ config = {
126
+ 'model': '/kaggle/input/transformers-model-downloader-pytorch-tf2-0/microsoft/deberta-v3-base',
127
+ 'dropout': 0.2,
128
+ 'max_length': 512,
129
+ 'batch_size':3,
130
+ 'epochs': 1,
131
+ 'lr': 1e-5,
132
+ 'device': 'cuda' if torch.cuda.is_available() else 'cpu',
133
+ 'scheduler': 'CosineAnnealingWarmRestarts'
134
+ }
135
+
136
+ """### Preparation
137
+ Comparing two essays. <br>
138
+ One predicted written by students, one predicted written by LLM
139
+ """
140
+
141
+ train_essays = pd.read_csv("/kaggle/input/llm-detect-ai-generated-text/train_essays.csv")
142
+ external = pd.read_csv("/kaggle/input/daigt-proper-train-dataset/train_drcat_04.csv")
143
+
144
+ df = pd.concat([
145
+ external[external.source=="persuade_corpus"].sample(10000,random_state=101),
146
+ external[external.source!='persuade_corpus']
147
+ ])
148
+ df = df.reset_index()
149
+
150
+ df['stratify'] = df.label.astype(str)+df.source.astype(str)
151
+ train_df,val_df = train_test_split(df,test_size=0.2,random_state = 101,stratify=df['stratify'])
152
+ train_df, val_df = train_df.reset_index(), val_df.reset_index()
153
+
154
+ import transformers
155
+ print('transformers version:', transformers.__version__)
156
+
157
+ #train_df,val_df = train_test_split(train_essays,test_size=0.2,random_state = 101)
158
+ #train_df, val_df = train_df.reset_index(), val_df.reset_index()
159
+ #print('dataframe shapes:',train_df.shape, val_df.shape)
160
+
161
+ tokenizer = AutoTokenizer.from_pretrained(config['model'])
162
+ tokenizer.train_new_from_iterator(train_essays['text'], 52000)
163
+
164
+ """### Building Training Dataset and Loader"""
165
+
166
+ class EssayDataset:
167
+ def __init__(self, df, config,tokenizer, is_test = False):
168
+ self.df = df
169
+ self.tokenizer = tokenizer
170
+ self.is_test = is_test
171
+ self.config = config
172
+
173
+ def token_start(self, idx):
174
+ sample_text = self.df.loc[idx,'text']
175
+
176
+ tokenized = tokenizer.encode_plus(sample_text,
177
+ None,
178
+ add_special_tokens=True,
179
+ max_length= self.config['max_length'],
180
+ truncation=True,
181
+ padding="max_length"
182
+ )
183
+
184
+ inputs = {
185
+ "input_ids": torch.tensor(tokenized['input_ids'],dtype=torch.long),
186
+ "token_type_ids": torch.tensor(tokenized['token_type_ids'],dtype=torch.long),
187
+ "attention_mask": torch.tensor(tokenized['attention_mask'],dtype = torch.long)
188
+ }
189
+
190
+ return inputs
191
+
192
+
193
+ def __getitem__(self,idx):
194
+
195
+ input_text = self.token_start(idx)
196
+
197
+ if self.is_test:
198
+ return input_text
199
+
200
+ else:
201
+ labels = self.df.loc[idx,'label']
202
+ targets = {'labels' : torch.tensor(labels,dtype = torch.float32)}
203
+
204
+ return input_text,targets
205
+
206
+ def __len__(self):
207
+ return len(self.df)
208
+
209
+ eval_ds = EssayDataset(val_df,config,tokenizer = tokenizer,is_test=True)
210
+
211
+ eval_loader = torch.utils.data.DataLoader(eval_ds,
212
+ batch_size= config['batch_size'])
213
+
214
+ """Build the Model"""
215
+
216
+ class mymodel(nn.Module):
217
+
218
+ def __init__(self,config):
219
+ super(mymodel,self).__init__()
220
+
221
+ self.model_name = config['model']
222
+ self.deberta = AutoModel.from_pretrained(self.model_name)
223
+ #12801 = len(tokenizer)
224
+ self.deberta.resize_token_embeddings(128001)
225
+ self.dropout = nn.Dropout(config['dropout'])
226
+ self.fn0 = nn.Linear(self.deberta.config.hidden_size,256)
227
+ self.fn2 = nn.Linear(256,1)
228
+ self.pooling = MeanPooling()
229
+
230
+ def forward(self, input):
231
+ output = self.deberta(**input,return_dict = True)
232
+ output = self.pooling(output['last_hidden_state'],input['attention_mask'])
233
+ output = self.dropout(output)
234
+ output = self.fn0(output)
235
+ output = self.dropout(output)
236
+ output = self.fn2(output)
237
+ output = torch.sigmoid(output)
238
+ return output
239
+
240
+ import torch.nn as nn
241
+ class MeanPooling(nn.Module):
242
+ def __init__(self):
243
+ super(MeanPooling,self).__init__()
244
+
245
+
246
+ def forward(self,last_hidden_state, attention_mask):
247
+ new_weight = attention_mask.unsqueeze(-1).expand(last_hidden_state.size()).float()
248
+ final = torch.sum(new_weight*last_hidden_state,1)
249
+ total_weight = new_weight.sum(1)
250
+ total_weight = torch.clamp(total_weight, min = 1e-9)
251
+ mean_embedding = final/total_weight
252
+
253
+ return mean_embedding
254
+
255
+ model = mymodel(config).to(device=config['device'])
256
+ model.load_state_dict(torch.load('/kaggle/input/fine-tune-model/my_model.pth'))
257
+ model.eval()
258
+
259
+ #preds = []
260
+ #for (inputs) in eval_loader:
261
+ # inputs = {k:inputs[k].to(device=config['device']) for k in inputs.keys()}
262
+ #
263
+ # outputs = model(inputs)
264
+ # preds.append(outputs.detach().cpu())
265
+
266
+ #preds = torch.concat(preds)
267
+
268
+ #val_df['preds'] = preds.numpy()
269
+ #val_df['AI'] = val_df['preds']>0.5
270
+
271
+ #sample_predict_AI = val_df.loc[val_df['AI'] == True].iloc[0]['text']
272
+ #sample_predict_student = val_df.loc[val_df['AI'] == False].iloc[0]['text']
273
+
274
+ #sample_predict_AI
275
+
276
+ #sample_predict_student
277
+
278
+ def trial(text):
279
+
280
+ tokenized = tokenizer.encode_plus(text,
281
+ None,
282
+ add_special_tokens=True,
283
+ max_length= config['max_length'],
284
+ truncation=True,
285
+ padding="max_length"
286
+ )
287
+ inputs = {
288
+ "input_ids": torch.tensor(tokenized['input_ids'],dtype=torch.long),
289
+ "token_type_ids": torch.tensor(tokenized['token_type_ids'],dtype=torch.long),
290
+ "attention_mask": torch.tensor(tokenized['attention_mask'],dtype = torch.long)
291
+ }
292
+ inputs = {k:inputs[k].unsqueeze(0).to(device=config['device']) for k in inputs.keys()}
293
+
294
+ if model(inputs).item()>=0.5:
295
+ return "AI"
296
+ else:
297
+ return "Student"
298
+
299
+ !pip install -q gradio==3.45.0
300
+
301
+ import gradio as gr
302
+
303
+ trial('hello fuck you')
304
+
305
+
306
+
307
+ demo = gr.Interface(
308
+ fn=trial,
309
+ inputs=gr.Textbox(placeholder="..."),
310
+ outputs="textbox"
311
+ )
312
+
313
+ demo.launch(share=True)