kaitehtzeng commited on
Commit
92f189a
1 Parent(s): 1446f10

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -118
app.py CHANGED
@@ -1,6 +1,8 @@
1
  # -*- coding: utf-8 -*-
2
  """After model-fitting
 
3
  Automatically generated by Colaboratory.
 
4
  Original file is located at
5
  https://colab.research.google.com/#fileId=https%3A//storage.googleapis.com/kaggle-colab-exported-notebooks/after-model-fitting-b220d687-d8e5-4eb5-aafd-6a7e94d72073.ipynb%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com/20240128/auto/storage/goog4_request%26X-Goog-Date%3D20240128T102031Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D31877cdd720f27bacaa0efcdbe500b0697792af355976ce5280054514cedfe1be4c17db45656212f46a080c0a7f0369fbd3d051fd9be4a1275e0ea4bd55be70f65a681f6868cda1616ea83b3c65a363b81d4f59b864aa1aa82188ce4bbfca0d326422ccfaf462a4a322a86e8d752e875e2c7940fde584e9a1f0e25847bb77ad8e0131724aaec47d49e4ab42a1d2be2199c9053a26a40f3bf2a31489822ec9bb6dd378bec74e97866da9613ee7c54c6ed2ce69eee5fe34ea90293cb546e4cb1f84b3fcc6563aea8318d70e68b71e43b6d85e04a20e01980dd0c94bb837aa81446d9ecfdad1d56cbc1c940670eba9cf9dc647a8972ac13c6af15a28da735db694f
6
  """
@@ -22,61 +24,6 @@ from zipfile import ZipFile
22
  import tarfile
23
  import shutil
24
 
25
- CHUNK_SIZE = 40960
26
- DATA_SOURCE_MAPPING = 'llm-detect-ai-generated-text:https%3A%2F%2Fstorage.googleapis.com%2Fkaggle-competitions-data%2Fkaggle-v2%2F61542%2F7516023%2Fbundle%2Farchive.zip%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com%252F20240128%252Fauto%252Fstorage%252Fgoog4_request%26X-Goog-Date%3D20240128T102030Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D038d55997cf8a860737caadb5837a5ebfaaf8477d4523afa1008387fe39c3a0c58c1ddc811284f559dbb78fd8e0f8230fca333e828951b69e5d935955b9163461cbd2f4f8b3f321dd0e73d767e2ef1a8ceb52512ef8f8d99fd19c92abf23c5a856ebd3d9ed4ee28b4c31b83427a7dc10052602e6d604e2c55f51d8e26da1e2dacb2e720476c3b874b22d5a03e8dde81374f227c87a024dea36e5973a7cabcccdcec804ba2fd73b5397d7d334be750de7ea9d4a2c2dcb12b93f4d75c18f063ebf02ff802e8912122dbd5b25695e7658bffc61997b9893958b304068a6e593653b14959b5355f4b8bb09d5d01768dda2839e271941fabfddf3cc5d8cbc5cd06746,argugpt:https%3A%2F%2Fstorage.googleapis.com%2Fkaggle-data-sets%2F3946973%2F6867914%2Fbundle%2Farchive.zip%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com%252F20240128%252Fauto%252Fstorage%252Fgoog4_request%26X-Goog-Date%3D20240128T102030Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D490ee9c880e3988ac2d0ceedc2936a72525b02e00898ca8feae1456ecdd6a542f952cedb096ce8474098bc29e06744cea2433b38c55accab1c9656f43d1baccccd2b36486e1075525b59c4f61326c5a819dc3f1bed35c76c73ef646f21d71bf8f3e8d7eb94e6c21068392293b9ba1e7fc8ac286eb68a727ac479118880aeff2c08f2e3e013aa0e888c099fb5a54a83920cebbf3ca011d818e66787427bfddf16de31a61552638a21cf583099a16a3cc660817297abdd494a926a3d58196778021bc6ea4b20d0923d7fb588d4857e95dce2979e3b246e6e282ef0b0fcabaecd2dd632c413f7f723e1178d080fc89fb31cd9a4564c84b11062fb9229d61d2dbf4e,daigt-proper-train-dataset:https%3A%2F%2Fstorage.googleapis.com%2Fkaggle-data-sets%2F3942644%2F6890527%2Fbundle%2Farchive.zip%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com%252F20240128%252Fauto%252Fstorage%252Fgoog4_request%26X-Goog-Date%3D20240128T102031Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D352a1df1e329069e50e0d64cb012986e5c75605e915c0b16383182a8618769c5ee4e3dd3f59448b11d64187657833f7f3f3e30c7c21fc343af2c51111074ea60e70e904833ef6a3aa4ad4b4864d89b924a3f063e71c41dbee1bdf1d453dc2cbe62e8819854b6e71040ca0014522e9651b9e8e6640c6caee259e981486a3ee0793ee7f56068c3d7efe66941530d2669bb8d3f989fe7b4056a81f76b0870fa2cf21cce8641b4f3e8c0b90fab4ef495464f2700bd99f20d4d94e86c11bc06301b1fc49a63bee1db180b733a12dc20b3b0f109c15b172c1cf0f91234176030f5c2241e7f646d99238ff63fc36ca1b0419463f38fe3bd477790b060c88c2bc9441ac0'
27
-
28
- KAGGLE_INPUT_PATH='/kaggle/input'
29
- KAGGLE_WORKING_PATH='/kaggle/working'
30
- KAGGLE_SYMLINK='kaggle'
31
-
32
- !umount /kaggle/input/ 2> /dev/null
33
- shutil.rmtree('/kaggle/input', ignore_errors=True)
34
- os.makedirs(KAGGLE_INPUT_PATH, 0o777, exist_ok=True)
35
- os.makedirs(KAGGLE_WORKING_PATH, 0o777, exist_ok=True)
36
-
37
- try:
38
- os.symlink(KAGGLE_INPUT_PATH, os.path.join("..", 'input'), target_is_directory=True)
39
- except FileExistsError:
40
- pass
41
- try:
42
- os.symlink(KAGGLE_WORKING_PATH, os.path.join("..", 'working'), target_is_directory=True)
43
- except FileExistsError:
44
- pass
45
-
46
- for data_source_mapping in DATA_SOURCE_MAPPING.split(','):
47
- directory, download_url_encoded = data_source_mapping.split(':')
48
- download_url = unquote(download_url_encoded)
49
- filename = urlparse(download_url).path
50
- destination_path = os.path.join(KAGGLE_INPUT_PATH, directory)
51
- try:
52
- with urlopen(download_url) as fileres, NamedTemporaryFile() as tfile:
53
- total_length = fileres.headers['content-length']
54
- print(f'Downloading {directory}, {total_length} bytes compressed')
55
- dl = 0
56
- data = fileres.read(CHUNK_SIZE)
57
- while len(data) > 0:
58
- dl += len(data)
59
- tfile.write(data)
60
- done = int(50 * dl / int(total_length))
61
- sys.stdout.write(f"\r[{'=' * done}{' ' * (50-done)}] {dl} bytes downloaded")
62
- sys.stdout.flush()
63
- data = fileres.read(CHUNK_SIZE)
64
- if filename.endswith('.zip'):
65
- with ZipFile(tfile) as zfile:
66
- zfile.extractall(destination_path)
67
- else:
68
- with tarfile.open(tfile.name) as tarfile:
69
- tarfile.extractall(destination_path)
70
- print(f'\nDownloaded and uncompressed: {directory}')
71
- except HTTPError as e:
72
- print(f'Failed to load (likely expired) {download_url} to path {destination_path}')
73
- continue
74
- except OSError as e:
75
- print(f'Failed to load {download_url} to path {destination_path}')
76
- continue
77
-
78
- print('Data source import complete.')
79
-
80
  # This Python 3 environment comes with many helpful analytics libraries installed
81
  # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
82
  # For example, here's several helpful packages to load
@@ -95,7 +42,6 @@ for dirname, _, filenames in os.walk('/kaggle/input'):
95
  # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
96
  # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
97
 
98
- !git clone https://huggingface.co/spaces/kaitehtzeng/primary_app
99
 
100
  """## Import Necessary Library"""
101
 
@@ -138,18 +84,8 @@ Comparing two essays. <br>
138
  One predicted written by students, one predicted written by LLM
139
  """
140
 
141
- train_essays = pd.read_csv("/kaggle/input/llm-detect-ai-generated-text/train_essays.csv")
142
- external = pd.read_csv("/kaggle/input/daigt-proper-train-dataset/train_drcat_04.csv")
143
-
144
- df = pd.concat([
145
- external[external.source=="persuade_corpus"].sample(10000,random_state=101),
146
- external[external.source!='persuade_corpus']
147
- ])
148
- df = df.reset_index()
149
 
150
- df['stratify'] = df.label.astype(str)+df.source.astype(str)
151
- train_df,val_df = train_test_split(df,test_size=0.2,random_state = 101,stratify=df['stratify'])
152
- train_df, val_df = train_df.reset_index(), val_df.reset_index()
153
 
154
  import transformers
155
  print('transformers version:', transformers.__version__)
@@ -161,55 +97,7 @@ print('transformers version:', transformers.__version__)
161
  tokenizer = AutoTokenizer.from_pretrained(config['model'])
162
  tokenizer.train_new_from_iterator(train_essays['text'], 52000)
163
 
164
- """### Building Training Dataset and Loader"""
165
-
166
- class EssayDataset:
167
- def __init__(self, df, config,tokenizer, is_test = False):
168
- self.df = df
169
- self.tokenizer = tokenizer
170
- self.is_test = is_test
171
- self.config = config
172
-
173
- def token_start(self, idx):
174
- sample_text = self.df.loc[idx,'text']
175
-
176
- tokenized = tokenizer.encode_plus(sample_text,
177
- None,
178
- add_special_tokens=True,
179
- max_length= self.config['max_length'],
180
- truncation=True,
181
- padding="max_length"
182
- )
183
-
184
- inputs = {
185
- "input_ids": torch.tensor(tokenized['input_ids'],dtype=torch.long),
186
- "token_type_ids": torch.tensor(tokenized['token_type_ids'],dtype=torch.long),
187
- "attention_mask": torch.tensor(tokenized['attention_mask'],dtype = torch.long)
188
- }
189
-
190
- return inputs
191
-
192
-
193
- def __getitem__(self,idx):
194
-
195
- input_text = self.token_start(idx)
196
-
197
- if self.is_test:
198
- return input_text
199
-
200
- else:
201
- labels = self.df.loc[idx,'label']
202
- targets = {'labels' : torch.tensor(labels,dtype = torch.float32)}
203
 
204
- return input_text,targets
205
-
206
- def __len__(self):
207
- return len(self.df)
208
-
209
- eval_ds = EssayDataset(val_df,config,tokenizer = tokenizer,is_test=True)
210
-
211
- eval_loader = torch.utils.data.DataLoader(eval_ds,
212
- batch_size= config['batch_size'])
213
 
214
  """Build the Model"""
215
 
@@ -220,7 +108,7 @@ class mymodel(nn.Module):
220
 
221
  self.model_name = config['model']
222
  self.deberta = AutoModel.from_pretrained(self.model_name)
223
- #12801 = len(tokenizer)
224
  self.deberta.resize_token_embeddings(128001)
225
  self.dropout = nn.Dropout(config['dropout'])
226
  self.fn0 = nn.Linear(self.deberta.config.hidden_size,256)
@@ -300,7 +188,6 @@ def trial(text):
300
 
301
  import gradio as gr
302
 
303
- trial('hello fuck you')
304
 
305
 
306
 
@@ -310,4 +197,12 @@ demo = gr.Interface(
310
  outputs="textbox"
311
  )
312
 
313
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
1
  # -*- coding: utf-8 -*-
2
  """After model-fitting
3
+
4
  Automatically generated by Colaboratory.
5
+
6
  Original file is located at
7
  https://colab.research.google.com/#fileId=https%3A//storage.googleapis.com/kaggle-colab-exported-notebooks/after-model-fitting-b220d687-d8e5-4eb5-aafd-6a7e94d72073.ipynb%3FX-Goog-Algorithm%3DGOOG4-RSA-SHA256%26X-Goog-Credential%3Dgcp-kaggle-com%2540kaggle-161607.iam.gserviceaccount.com/20240128/auto/storage/goog4_request%26X-Goog-Date%3D20240128T102031Z%26X-Goog-Expires%3D259200%26X-Goog-SignedHeaders%3Dhost%26X-Goog-Signature%3D31877cdd720f27bacaa0efcdbe500b0697792af355976ce5280054514cedfe1be4c17db45656212f46a080c0a7f0369fbd3d051fd9be4a1275e0ea4bd55be70f65a681f6868cda1616ea83b3c65a363b81d4f59b864aa1aa82188ce4bbfca0d326422ccfaf462a4a322a86e8d752e875e2c7940fde584e9a1f0e25847bb77ad8e0131724aaec47d49e4ab42a1d2be2199c9053a26a40f3bf2a31489822ec9bb6dd378bec74e97866da9613ee7c54c6ed2ce69eee5fe34ea90293cb546e4cb1f84b3fcc6563aea8318d70e68b71e43b6d85e04a20e01980dd0c94bb837aa81446d9ecfdad1d56cbc1c940670eba9cf9dc647a8972ac13c6af15a28da735db694f
8
  """
 
24
  import tarfile
25
  import shutil
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  # This Python 3 environment comes with many helpful analytics libraries installed
28
  # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
29
  # For example, here's several helpful packages to load
 
42
  # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
43
  # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
44
 
 
45
 
46
  """## Import Necessary Library"""
47
 
 
84
  One predicted written by students, one predicted written by LLM
85
  """
86
 
87
+ train_essays = pd.read_csv("D:\first app\train_essays.csv")
 
 
 
 
 
 
 
88
 
 
 
 
89
 
90
  import transformers
91
  print('transformers version:', transformers.__version__)
 
97
  tokenizer = AutoTokenizer.from_pretrained(config['model'])
98
  tokenizer.train_new_from_iterator(train_essays['text'], 52000)
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
 
 
 
 
 
 
 
 
 
101
 
102
  """Build the Model"""
103
 
 
108
 
109
  self.model_name = config['model']
110
  self.deberta = AutoModel.from_pretrained(self.model_name)
111
+ #128001 = len(tokenizer)
112
  self.deberta.resize_token_embeddings(128001)
113
  self.dropout = nn.Dropout(config['dropout'])
114
  self.fn0 = nn.Linear(self.deberta.config.hidden_size,256)
 
188
 
189
  import gradio as gr
190
 
 
191
 
192
 
193
 
 
197
  outputs="textbox"
198
  )
199
 
200
+ demo.launch(share=True)
201
+
202
+ """### Model
203
+ Fine tuning the deberta-v3-base model with new-added layers
204
+
205
+ The model is later used to participate the Kaggle Competition:LLM - Detect AI Generated Text.
206
+ The Auc of the model is 0.75
207
+ """
208
+