kaitehtzeng commited on
Commit
1446f10
1 Parent(s): cd8f06d

Delete after_model_fitting.py

Browse files
Files changed (1) hide show
  1. after_model_fitting.py +0 -324
after_model_fitting.py DELETED
@@ -1,324 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """After model-fitting
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/#fileId=https%3A//storage.googleapis.com/kaggle-colab-exported-notebooks/after-model-fitting-b220d687-d8e5-4eb5-aafd-6a7e94d72073.ipynb%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com/20240128/auto/storage/goog4_request%26X-Goog-Date%3D20240128T102031Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D31877cdd720f27bacaa0efcdbe500b0697792af355976ce5280054514cedfe1be4c17db45656212f46a080c0a7f0369fbd3d051fd9be4a1275e0ea4bd55be70f65a681f6868cda1616ea83b3c65a363b81d4f59b864aa1aa82188ce4bbfca0d326422ccfaf462a4a322a86e8d752e875e2c7940fde584e9a1f0e25847bb77ad8e0131724aaec47d49e4ab42a1d2be2199c9053a26a40f3bf2a31489822ec9bb6dd378bec74e97866da9613ee7c54c6ed2ce69eee5fe34ea90293cb546e4cb1f84b3fcc6563aea8318d70e68b71e43b6d85e04a20e01980dd0c94bb837aa81446d9ecfdad1d56cbc1c940670eba9cf9dc647a8972ac13c6af15a28da735db694f
8
- """
9
-
10
- # IMPORTANT: RUN THIS CELL IN ORDER TO IMPORT YOUR KAGGLE DATA SOURCES
11
- # TO THE CORRECT LOCATION (/kaggle/input) IN YOUR NOTEBOOK,
12
- # THEN FEEL FREE TO DELETE THIS CELL.
13
- # NOTE: THIS NOTEBOOK ENVIRONMENT DIFFERS FROM KAGGLE'S PYTHON
14
- # ENVIRONMENT SO THERE MAY BE MISSING LIBRARIES USED BY YOUR
15
- # NOTEBOOK.
16
-
17
- import os
18
- import sys
19
- from tempfile import NamedTemporaryFile
20
- from urllib.request import urlopen
21
- from urllib.parse import unquote, urlparse
22
- from urllib.error import HTTPError
23
- from zipfile import ZipFile
24
- import tarfile
25
- import shutil
26
-
27
- CHUNK_SIZE = 40960
28
- DATA_SOURCE_MAPPING = 'llm-detect-ai-generated-text:https%3A%2F%2Fstorage.googleapis.com%2Fkaggle-competitions-data%2Fkaggle-v2%2F61542%2F7516023%2Fbundle%2Farchive.zip%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com%252F20240128%252Fauto%252Fstorage%252Fgoog4_request%26X-Goog-Date%3D20240128T102030Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D038d55997cf8a860737caadb5837a5ebfaaf8477d4523afa1008387fe39c3a0c58c1ddc811284f559dbb78fd8e0f8230fca333e828951b69e5d935955b9163461cbd2f4f8b3f321dd0e73d767e2ef1a8ceb52512ef8f8d99fd19c92abf23c5a856ebd3d9ed4ee28b4c31b83427a7dc10052602e6d604e2c55f51d8e26da1e2dacb2e720476c3b874b22d5a03e8dde81374f227c87a024dea36e5973a7cabcccdcec804ba2fd73b5397d7d334be750de7ea9d4a2c2dcb12b93f4d75c18f063ebf02ff802e8912122dbd5b25695e7658bffc61997b9893958b304068a6e593653b14959b5355f4b8bb09d5d01768dda2839e271941fabfddf3cc5d8cbc5cd06746,argugpt:https%3A%2F%2Fstorage.googleapis.com%2Fkaggle-data-sets%2F3946973%2F6867914%2Fbundle%2Farchive.zip%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com%252F20240128%252Fauto%252Fstorage%252Fgoog4_request%26X-Goog-Date%3D20240128T102030Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D490ee9c880e3988ac2d0ceedc2936a72525b02e00898ca8feae1456ecdd6a542f952cedb096ce8474098bc29e06744cea2433b38c55accab1c9656f43d1baccccd2b36486e1075525b59c4f61326c5a819dc3f1bed35c76c73ef646f21d71bf8f3e8d7eb94e6c21068392293b9ba1e7fc8ac286eb68a727ac479118880aeff2c08f2e3e013aa0e888c099fb5a54a83920cebbf3ca011d818e66787427bfddf16de31a61552638a21cf583099a16a3cc660817297abdd494a926a3d58196778021bc6ea4b20d0923d7fb588d4857e95dce2979e3b246e6e282ef0b0fcabaecd2dd632c413f7f723e1178d080fc89fb31cd9a4564c84b11062fb9229d61d2dbf4e,daigt-proper-train-dataset:https%3A%2F%2Fstorage.googleapis.com%2Fkaggle-data-sets%2F3942644%2F6890527%2Fbundle%2Farchive.zip%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com%252F20240128%252Fauto%252Fstorage%252Fgoog4_request%26X-Goog-Date%3D20240128T102031Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D352a1df1e329069e50e0d64cb012986e5c75605e915c0b16383182a8618769c5ee4e3dd3f59448b11d64187657833f7f3f3e30c7c21fc343af2c51111074ea60e70e904833ef6a3aa4ad4b4864d89b924a3f063e71c41dbee1bdf1d453dc2cbe62e8819854b6e71040ca0014522e9651b9e8e6640c6caee259e981486a3ee0793ee7f56068c3d7efe66941530d2669bb8d3f989fe7b4056a81f76b0870fa2cf21cce8641b4f3e8c0b90fab4ef495464f2700bd99f20d4d94e86c11bc06301b1fc49a63bee1db180b733a12dc20b3b0f109c15b172c1cf0f91234176030f5c2241e7f646d99238ff63fc36ca1b0419463f38fe3bd477790b060c88c2bc9441ac0'
29
-
30
- KAGGLE_INPUT_PATH='/kaggle/input'
31
- KAGGLE_WORKING_PATH='/kaggle/working'
32
- KAGGLE_SYMLINK='kaggle'
33
-
34
- !umount /kaggle/input/ 2> /dev/null
35
- shutil.rmtree('/kaggle/input', ignore_errors=True)
36
- os.makedirs(KAGGLE_INPUT_PATH, 0o777, exist_ok=True)
37
- os.makedirs(KAGGLE_WORKING_PATH, 0o777, exist_ok=True)
38
-
39
- try:
40
- os.symlink(KAGGLE_INPUT_PATH, os.path.join("..", 'input'), target_is_directory=True)
41
- except FileExistsError:
42
- pass
43
- try:
44
- os.symlink(KAGGLE_WORKING_PATH, os.path.join("..", 'working'), target_is_directory=True)
45
- except FileExistsError:
46
- pass
47
-
48
- for data_source_mapping in DATA_SOURCE_MAPPING.split(','):
49
- directory, download_url_encoded = data_source_mapping.split(':')
50
- download_url = unquote(download_url_encoded)
51
- filename = urlparse(download_url).path
52
- destination_path = os.path.join(KAGGLE_INPUT_PATH, directory)
53
- try:
54
- with urlopen(download_url) as fileres, NamedTemporaryFile() as tfile:
55
- total_length = fileres.headers['content-length']
56
- print(f'Downloading {directory}, {total_length} bytes compressed')
57
- dl = 0
58
- data = fileres.read(CHUNK_SIZE)
59
- while len(data) > 0:
60
- dl += len(data)
61
- tfile.write(data)
62
- done = int(50 * dl / int(total_length))
63
- sys.stdout.write(f"\r[{'=' * done}{' ' * (50-done)}] {dl} bytes downloaded")
64
- sys.stdout.flush()
65
- data = fileres.read(CHUNK_SIZE)
66
- if filename.endswith('.zip'):
67
- with ZipFile(tfile) as zfile:
68
- zfile.extractall(destination_path)
69
- else:
70
- with tarfile.open(tfile.name) as tarfile:
71
- tarfile.extractall(destination_path)
72
- print(f'\nDownloaded and uncompressed: {directory}')
73
- except HTTPError as e:
74
- print(f'Failed to load (likely expired) {download_url} to path {destination_path}')
75
- continue
76
- except OSError as e:
77
- print(f'Failed to load {download_url} to path {destination_path}')
78
- continue
79
-
80
- print('Data source import complete.')
81
-
82
- # This Python 3 environment comes with many helpful analytics libraries installed
83
- # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
84
- # For example, here's several helpful packages to load
85
-
86
- import numpy as np # linear algebra
87
- import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
88
-
89
- # Input data files are available in the read-only "../input/" directory
90
- # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
91
-
92
- import os
93
- for dirname, _, filenames in os.walk('/kaggle/input'):
94
- for filename in filenames:
95
- print(os.path.join(dirname, filename))
96
-
97
- # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
98
- # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
99
-
100
- !git clone https://huggingface.co/spaces/kaitehtzeng/primary_app
101
-
102
- """## Import Necessary Library"""
103
-
104
- import torch.nn.functional as F
105
- from transformers import AutoModel
106
- from transformers import AutoTokenizer
107
- from tokenizers import Tokenizer, trainers, pre_tokenizers, models
108
- from transformers import DebertaTokenizer
109
- from sklearn.model_selection import train_test_split
110
- import torch
111
- import torch.nn as nn
112
- import numpy as np
113
- import pandas as pd
114
- from tqdm.notebook import tqdm
115
- import matplotlib.pyplot as plt
116
- import nltk
117
- from nltk.corpus import stopwords
118
- from nltk.tokenize import word_tokenize
119
- from nltk.tokenize.treebank import TreebankWordDetokenizer
120
- from collections import Counter
121
- #import spacy
122
- import re
123
- import gc
124
- # ----------
125
- import os
126
-
127
- config = {
128
- 'model': '/kaggle/input/transformers-model-downloader-pytorch-tf2-0/microsoft/deberta-v3-base',
129
- 'dropout': 0.2,
130
- 'max_length': 512,
131
- 'batch_size':3,
132
- 'epochs': 1,
133
- 'lr': 1e-5,
134
- 'device': 'cuda' if torch.cuda.is_available() else 'cpu',
135
- 'scheduler': 'CosineAnnealingWarmRestarts'
136
- }
137
-
138
- """### Preparation
139
- Comparing two essays. <br>
140
- One predicted written by students, one predicted written by LLM
141
- """
142
-
143
- train_essays = pd.read_csv("/kaggle/input/llm-detect-ai-generated-text/train_essays.csv")
144
- external = pd.read_csv("/kaggle/input/daigt-proper-train-dataset/train_drcat_04.csv")
145
-
146
- df = pd.concat([
147
- external[external.source=="persuade_corpus"].sample(10000,random_state=101),
148
- external[external.source!='persuade_corpus']
149
- ])
150
- df = df.reset_index()
151
-
152
- df['stratify'] = df.label.astype(str)+df.source.astype(str)
153
- train_df,val_df = train_test_split(df,test_size=0.2,random_state = 101,stratify=df['stratify'])
154
- train_df, val_df = train_df.reset_index(), val_df.reset_index()
155
-
156
- import transformers
157
- print('transformers version:', transformers.__version__)
158
-
159
- #train_df,val_df = train_test_split(train_essays,test_size=0.2,random_state = 101)
160
- #train_df, val_df = train_df.reset_index(), val_df.reset_index()
161
- #print('dataframe shapes:',train_df.shape, val_df.shape)
162
-
163
- tokenizer = AutoTokenizer.from_pretrained(config['model'])
164
- tokenizer.train_new_from_iterator(train_essays['text'], 52000)
165
-
166
- """### Building Training Dataset and Loader"""
167
-
168
- class EssayDataset:
169
- def __init__(self, df, config,tokenizer, is_test = False):
170
- self.df = df
171
- self.tokenizer = tokenizer
172
- self.is_test = is_test
173
- self.config = config
174
-
175
- def token_start(self, idx):
176
- sample_text = self.df.loc[idx,'text']
177
-
178
- tokenized = tokenizer.encode_plus(sample_text,
179
- None,
180
- add_special_tokens=True,
181
- max_length= self.config['max_length'],
182
- truncation=True,
183
- padding="max_length"
184
- )
185
-
186
- inputs = {
187
- "input_ids": torch.tensor(tokenized['input_ids'],dtype=torch.long),
188
- "token_type_ids": torch.tensor(tokenized['token_type_ids'],dtype=torch.long),
189
- "attention_mask": torch.tensor(tokenized['attention_mask'],dtype = torch.long)
190
- }
191
-
192
- return inputs
193
-
194
-
195
- def __getitem__(self,idx):
196
-
197
- input_text = self.token_start(idx)
198
-
199
- if self.is_test:
200
- return input_text
201
-
202
- else:
203
- labels = self.df.loc[idx,'label']
204
- targets = {'labels' : torch.tensor(labels,dtype = torch.float32)}
205
-
206
- return input_text,targets
207
-
208
- def __len__(self):
209
- return len(self.df)
210
-
211
- eval_ds = EssayDataset(val_df,config,tokenizer = tokenizer,is_test=True)
212
-
213
- eval_loader = torch.utils.data.DataLoader(eval_ds,
214
- batch_size= config['batch_size'])
215
-
216
- """Build the Model"""
217
-
218
- class mymodel(nn.Module):
219
-
220
- def __init__(self,config):
221
- super(mymodel,self).__init__()
222
-
223
- self.model_name = config['model']
224
- self.deberta = AutoModel.from_pretrained(self.model_name)
225
- #12801 = len(tokenizer)
226
- self.deberta.resize_token_embeddings(128001)
227
- self.dropout = nn.Dropout(config['dropout'])
228
- self.fn0 = nn.Linear(self.deberta.config.hidden_size,256)
229
- self.fn2 = nn.Linear(256,1)
230
- self.pooling = MeanPooling()
231
-
232
- def forward(self, input):
233
- output = self.deberta(**input,return_dict = True)
234
- output = self.pooling(output['last_hidden_state'],input['attention_mask'])
235
- output = self.dropout(output)
236
- output = self.fn0(output)
237
- output = self.dropout(output)
238
- output = self.fn2(output)
239
- output = torch.sigmoid(output)
240
- return output
241
-
242
- import torch.nn as nn
243
- class MeanPooling(nn.Module):
244
- def __init__(self):
245
- super(MeanPooling,self).__init__()
246
-
247
-
248
- def forward(self,last_hidden_state, attention_mask):
249
- new_weight = attention_mask.unsqueeze(-1).expand(last_hidden_state.size()).float()
250
- final = torch.sum(new_weight*last_hidden_state,1)
251
- total_weight = new_weight.sum(1)
252
- total_weight = torch.clamp(total_weight, min = 1e-9)
253
- mean_embedding = final/total_weight
254
-
255
- return mean_embedding
256
-
257
- model = mymodel(config).to(device=config['device'])
258
- model.load_state_dict(torch.load('/kaggle/input/fine-tune-model/my_model.pth'))
259
- model.eval()
260
-
261
- #preds = []
262
- #for (inputs) in eval_loader:
263
- # inputs = {k:inputs[k].to(device=config['device']) for k in inputs.keys()}
264
- #
265
- # outputs = model(inputs)
266
- # preds.append(outputs.detach().cpu())
267
-
268
- #preds = torch.concat(preds)
269
-
270
- #val_df['preds'] = preds.numpy()
271
- #val_df['AI'] = val_df['preds']>0.5
272
-
273
- #sample_predict_AI = val_df.loc[val_df['AI'] == True].iloc[0]['text']
274
- #sample_predict_student = val_df.loc[val_df['AI'] == False].iloc[0]['text']
275
-
276
- #sample_predict_AI
277
-
278
- #sample_predict_student
279
-
280
- def trial(text):
281
-
282
- tokenized = tokenizer.encode_plus(text,
283
- None,
284
- add_special_tokens=True,
285
- max_length= config['max_length'],
286
- truncation=True,
287
- padding="max_length"
288
- )
289
- inputs = {
290
- "input_ids": torch.tensor(tokenized['input_ids'],dtype=torch.long),
291
- "token_type_ids": torch.tensor(tokenized['token_type_ids'],dtype=torch.long),
292
- "attention_mask": torch.tensor(tokenized['attention_mask'],dtype = torch.long)
293
- }
294
- inputs = {k:inputs[k].unsqueeze(0).to(device=config['device']) for k in inputs.keys()}
295
-
296
- if model(inputs).item()>=0.5:
297
- return "AI"
298
- else:
299
- return "Student"
300
-
301
- !pip install -q gradio==3.45.0
302
-
303
- import gradio as gr
304
-
305
- trial('hello fuck you')
306
-
307
-
308
-
309
- demo = gr.Interface(
310
- fn=trial,
311
- inputs=gr.Textbox(placeholder="..."),
312
- outputs="textbox"
313
- )
314
-
315
- demo.launch(share=True)
316
-
317
- """### Model
318
- Fine tuning the deberta-v3-base model with new-added layers
319
-
320
- The model is later used to participate the Kaggle Competition:LLM - Detect AI Generated Text.
321
- The Auc of the model is 0.75
322
- """
323
-
324
- !git push