Transformers
PyTorch
code
custom_code
Inference Endpoints
Dejiao Z commited on
Commit
c273c0d
·
1 Parent(s): 2228afc

update readme

Browse files
.ipynb_checkpoints/modeling_codesage-checkpoint.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
4
+
5
+ import math
6
+ import torch
7
+ import torch.utils.checkpoint
8
+ from torch import nn
9
+ from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss
10
+ from transformers.activations import ACT2FN
11
+ from transformers.modeling_utils import Conv1D, PreTrainedModel
12
+ from transformers.utils import logging
13
+ from .config_codesage import CodeSageConfig
14
+ from transformers.modeling_outputs import (
15
+ BaseModelOutputWithPooling,
16
+ MaskedLMOutput,
17
+ SequenceClassifierOutput
18
+ )
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+ CODESAGE_PRETRAINED_MODEL_ARCHIVE_LIST = [
23
+ "codesage/codesage-small-v2",
24
+ "codesage/codesage-base-v2",
25
+ "codesage/codesage-large-v2",
26
+ # See all CodeSage models at https://huggingface.co/models?filter=codesage
27
+ ]
28
+
29
+
30
+ class CodeSageAttention(nn.Module):
31
+ def __init__(self, config):
32
+ super().__init__()
33
+
34
+ self.hidden_size = config.hidden_size
35
+ self.num_heads = config.num_attention_heads
36
+ self.head_dim = config.hidden_size // self.num_heads
37
+ if self.head_dim * self.num_heads != config.hidden_size:
38
+ raise ValueError(
39
+ f"`hidden_size` must be divisible by num_heads "
40
+ f"(got `hidden_size`: {config.hidden_size} and `num_heads`: {self.num_heads})."
41
+ )
42
+
43
+ self.c_attn = Conv1D(3 * self.hidden_size, self.hidden_size)
44
+ self.c_proj = Conv1D(self.hidden_size, self.hidden_size)
45
+
46
+ self.attention_dropout = nn.Dropout(config.attention_dropout_prob)
47
+ self.residual_dropout = nn.Dropout(config.residual_dropout_prob)
48
+
49
+ def attn(self, query, key, value, attention_mask=None, head_mask=None):
50
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
51
+ attn_weights = attn_weights / math.sqrt(self.head_dim)
52
+ if attention_mask is not None:
53
+ attn_weights = attn_weights + attention_mask
54
+
55
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
56
+ attn_weights = self.attention_dropout(attn_weights)
57
+ if head_mask is not None:
58
+ attn_weights = attn_weights * head_mask
59
+
60
+ attn_output = torch.matmul(attn_weights, value)
61
+ return attn_output, attn_weights
62
+
63
+ def split_heads(self, tensor, num_heads, attn_head_size):
64
+ """
65
+ Splits hidden_size dim into attn_head_size and num_heads
66
+ """
67
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
68
+ tensor = tensor.view(*new_shape)
69
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
70
+
71
+ def merge_heads(self, tensor, num_heads, attn_head_size):
72
+ """
73
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
74
+ """
75
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
76
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
77
+ return tensor.view(new_shape)
78
+
79
+ def forward(
80
+ self,
81
+ hidden_states,
82
+ attention_mask=None,
83
+ head_mask=None,
84
+ output_attentions=False,
85
+ ):
86
+ query, key, value = self.c_attn(hidden_states).split(self.hidden_size, dim=2)
87
+ query = self.split_heads(query, self.num_heads, self.head_dim)
88
+ key = self.split_heads(key, self.num_heads, self.head_dim)
89
+ value = self.split_heads(value, self.num_heads, self.head_dim)
90
+
91
+ attn_output, attn_weights = self.attn(query, key, value, attention_mask, head_mask)
92
+
93
+ attn_output = self.merge_heads(attn_output, self.num_heads, self.head_dim)
94
+ attn_output = self.c_proj(attn_output)
95
+ attn_output = self.residual_dropout(attn_output)
96
+
97
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
98
+ return outputs # a, present, (attentions)
99
+
100
+
101
+ class CodeSageMLP(nn.Module):
102
+ def __init__(self, intermediate_size, config):
103
+ super().__init__()
104
+
105
+ self.c_fc = Conv1D(intermediate_size, config.hidden_size)
106
+ self.act = ACT2FN[config.activation_function]
107
+ self.c_proj = Conv1D(config.hidden_size, intermediate_size)
108
+ self.dropout = nn.Dropout(config.residual_dropout_prob)
109
+
110
+ def forward(self, hidden_states):
111
+ hidden_states = self.c_fc(hidden_states)
112
+ hidden_states = self.act(hidden_states)
113
+ hidden_states = self.c_proj(hidden_states)
114
+ hidden_states = self.dropout(hidden_states)
115
+ return hidden_states
116
+
117
+
118
+ class CodeSageBlock(nn.Module):
119
+ def __init__(self, config):
120
+ super().__init__()
121
+ hidden_size = config.hidden_size
122
+ inner_dim = config.intermediate_size if config.intermediate_size is not None else 4 * hidden_size
123
+ self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
124
+ self.attn = CodeSageAttention(config)
125
+ self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
126
+ self.mlp = CodeSageMLP(inner_dim, config)
127
+
128
+ def forward(
129
+ self,
130
+ hidden_states,
131
+ attention_mask=None,
132
+ head_mask=None,
133
+ output_attentions=False,
134
+ ):
135
+ residual = hidden_states
136
+ hidden_states = self.ln_1(hidden_states)
137
+ attn_outputs = self.attn(
138
+ hidden_states,
139
+ attention_mask=attention_mask,
140
+ head_mask=head_mask,
141
+ output_attentions=output_attentions
142
+ )
143
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
144
+ outputs = attn_outputs[1:]
145
+ hidden_states = attn_output + residual
146
+
147
+ residual = hidden_states
148
+ hidden_states = self.ln_2(hidden_states)
149
+ feed_forward_hidden_states = self.mlp(hidden_states)
150
+ hidden_states = residual + feed_forward_hidden_states
151
+
152
+ outputs = (hidden_states,) + outputs[1:]
153
+ return outputs # hidden_states, present, (attentions)
154
+
155
+
156
+ class CodeSagePreTrainedModel(PreTrainedModel):
157
+ config_class = CodeSageConfig
158
+ base_model_prefix = "transformer"
159
+
160
+ def _init_weights(self, module):
161
+ """Initialize the weights."""
162
+ if isinstance(module, (nn.Linear, Conv1D)):
163
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
164
+ if module.bias is not None:
165
+ module.bias.data.zero_()
166
+ elif isinstance(module, nn.Embedding):
167
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
168
+ if module.padding_idx is not None:
169
+ module.weight.data[module.padding_idx].zero_()
170
+ elif isinstance(module, nn.LayerNorm):
171
+ module.bias.data.zero_()
172
+ module.weight.data.fill_(1.0)
173
+
174
+
175
+ class CodeSageModel(CodeSagePreTrainedModel):
176
+ def __init__(self, config):
177
+ super().__init__(config)
178
+
179
+ self.wte = nn.Embedding(config.vocab_size, config.hidden_size)
180
+ self.wpe = nn.Embedding(config.max_position_embeddings, config.hidden_size)
181
+
182
+ self.drop = nn.Dropout(config.embedding_dropout_prob)
183
+ self.h = nn.ModuleList([CodeSageBlock(config) for _ in range(config.num_hidden_layers)])
184
+ self.ln_f = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
185
+
186
+ self.init_weights()
187
+
188
+ def get_input_embeddings(self):
189
+ return self.wte
190
+
191
+ def set_input_embeddings(self, new_embeddings: torch.Tensor):
192
+ self.wte = new_embeddings
193
+
194
+ def forward(
195
+ self,
196
+ input_ids=None,
197
+ attention_mask=None,
198
+ position_ids=None,
199
+ head_mask=None,
200
+ inputs_embeds=None,
201
+ output_attentions=None,
202
+ output_hidden_states=None,
203
+ return_dict=None
204
+ ):
205
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
206
+ output_hidden_states = (
207
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
208
+ )
209
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
210
+
211
+ if input_ids is not None and inputs_embeds is not None:
212
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
213
+ if input_ids is not None:
214
+ input_shape = input_ids.size()
215
+ elif inputs_embeds is not None:
216
+ input_shape = inputs_embeds.size()[:-1]
217
+ else:
218
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
219
+
220
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
221
+ if position_ids is None:
222
+ position_ids = torch.arange(input_shape[-1], dtype=torch.long, device=device)
223
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
224
+ else:
225
+ position_ids = position_ids.view(-1, input_shape[-1])
226
+
227
+ extended_attention_mask = None
228
+ if attention_mask is not None:
229
+ assert attention_mask.dim() == 2
230
+ extended_attention_mask = attention_mask[:, None, None, :]
231
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
232
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
233
+
234
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
235
+ if inputs_embeds is None:
236
+ inputs_embeds = self.wte(input_ids)
237
+
238
+ position_embeds = self.wpe(position_ids)
239
+ hidden_states = inputs_embeds + position_embeds
240
+
241
+ hidden_states = self.drop(hidden_states)
242
+ output_shape = input_shape + (hidden_states.size(-1),)
243
+
244
+ all_self_attentions = () if output_attentions else None
245
+ all_hidden_states = () if output_hidden_states else None
246
+ for i, block in enumerate(self.h):
247
+ if output_hidden_states:
248
+ all_hidden_states = all_hidden_states + (hidden_states,)
249
+
250
+ outputs = block(
251
+ hidden_states,
252
+ attention_mask=extended_attention_mask,
253
+ head_mask=head_mask[i],
254
+ output_attentions=output_attentions,
255
+ )
256
+
257
+ hidden_states = outputs[0]
258
+ if output_attentions:
259
+ all_self_attentions = all_self_attentions + (outputs[1],)
260
+
261
+ hidden_states = self.ln_f(hidden_states)
262
+ hidden_states = hidden_states.view(*output_shape)
263
+ if output_hidden_states:
264
+ all_hidden_states = all_hidden_states + (hidden_states,)
265
+
266
+ pooled_output = None # max-pooled output
267
+ if attention_mask is not None:
268
+ pooled_output = (hidden_states * attention_mask[:, :, None]).sum(1) / attention_mask.sum(1)[:, None]
269
+
270
+ if not return_dict:
271
+ return tuple(
272
+ v
273
+ for v in [hidden_states, pooled_output, all_hidden_states, all_self_attentions]
274
+ if v is not None
275
+ )
276
+
277
+ return BaseModelOutputWithPooling(
278
+ last_hidden_state=hidden_states,
279
+ pooler_output=pooled_output,
280
+ hidden_states=all_hidden_states,
281
+ attentions=all_self_attentions
282
+ )
283
+
284
+
285
+ class CodeSageForMaskedLM(CodeSagePreTrainedModel):
286
+ _tied_weights_keys = ["lm_head.weight"]
287
+
288
+ def __init__(self, config):
289
+ super().__init__(config)
290
+ self.transformer = CodeSageModel(config)
291
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
292
+
293
+ self.init_weights()
294
+
295
+ def get_output_embeddings(self):
296
+ return self.lm_head
297
+
298
+ def set_output_embeddings(self, new_embeddings):
299
+ self.lm_head = new_embeddings
300
+
301
+ def forward(
302
+ self,
303
+ input_ids=None,
304
+ attention_mask=None,
305
+ position_ids=None,
306
+ head_mask=None,
307
+ inputs_embeds=None,
308
+ labels=None,
309
+ output_attentions=None,
310
+ output_hidden_states=None,
311
+ return_dict=None
312
+ ):
313
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
314
+
315
+ transformer_outputs = self.transformer(
316
+ input_ids,
317
+ attention_mask=attention_mask,
318
+ position_ids=position_ids,
319
+ head_mask=head_mask,
320
+ inputs_embeds=inputs_embeds,
321
+ output_attentions=output_attentions,
322
+ output_hidden_states=output_hidden_states,
323
+ return_dict=return_dict
324
+ )
325
+ hidden_states = transformer_outputs[0]
326
+ lm_logits = self.lm_head(hidden_states)
327
+
328
+ masked_lm_loss = None
329
+ if labels is not None:
330
+ loss_fct = CrossEntropyLoss()
331
+ masked_lm_loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
332
+
333
+ if not return_dict:
334
+ output = (lm_logits,) + transformer_outputs[1:]
335
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
336
+
337
+ return MaskedLMOutput(
338
+ loss=masked_lm_loss,
339
+ logits=lm_logits,
340
+ hidden_states=transformer_outputs.hidden_states,
341
+ attentions=transformer_outputs.attentions,
342
+ )
343
+
344
+
345
+ class CodeSageForSequenceClassification(CodeSagePreTrainedModel):
346
+
347
+ def __init__(self, config):
348
+ super().__init__(config)
349
+ self.num_labels = config.num_labels
350
+ self.config = config
351
+
352
+ self.transformer = CodeSageModel(config)
353
+ classifier_dropout = (
354
+ config.classifier_dropout
355
+ if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None
356
+ else config.residual_dropout_prob
357
+ )
358
+ self.dropout = nn.Dropout(classifier_dropout)
359
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
360
+
361
+ # Initialize weights and apply final processing
362
+ self.post_init()
363
+
364
+ def forward(
365
+ self,
366
+ input_ids=None,
367
+ attention_mask=None,
368
+ position_ids=None,
369
+ head_mask=None,
370
+ inputs_embeds=None,
371
+ labels=None,
372
+ output_attentions=None,
373
+ output_hidden_states=None,
374
+ return_dict=None,
375
+ ):
376
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
377
+ assert attention_mask is not None, "attention_mask is needed to perform max-pooling"
378
+
379
+ outputs = self.transformer(
380
+ input_ids,
381
+ attention_mask=attention_mask,
382
+ position_ids=position_ids,
383
+ head_mask=head_mask,
384
+ inputs_embeds=inputs_embeds,
385
+ output_attentions=output_attentions,
386
+ output_hidden_states=output_hidden_states,
387
+ return_dict=return_dict,
388
+ )
389
+
390
+ pooled_output = outputs[1]
391
+ pooled_output = self.dropout(pooled_output)
392
+ logits = self.classifier(pooled_output)
393
+
394
+ loss = None
395
+ if labels is not None:
396
+ if self.config.problem_type is None:
397
+ if self.num_labels == 1:
398
+ self.config.problem_type = "regression"
399
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
400
+ self.config.problem_type = "single_label_classification"
401
+ else:
402
+ self.config.problem_type = "multi_label_classification"
403
+
404
+ if self.config.problem_type == "regression":
405
+ loss_fct = MSELoss()
406
+ if self.num_labels == 1:
407
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
408
+ else:
409
+ loss = loss_fct(logits, labels)
410
+ elif self.config.problem_type == "single_label_classification":
411
+ loss_fct = CrossEntropyLoss()
412
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
413
+ elif self.config.problem_type == "multi_label_classification":
414
+ loss_fct = BCEWithLogitsLoss()
415
+ loss = loss_fct(logits, labels)
416
+
417
+ if not return_dict:
418
+ output = (logits,) + outputs[2:]
419
+ return ((loss,) + output) if loss is not None else output
420
+
421
+ return SequenceClassifierOutput(
422
+ loss=loss,
423
+ logits=logits,
424
+ hidden_states=outputs.hidden_states,
425
+ attentions=outputs.attentions,
426
+ )
.ipynb_checkpoints/tokenization_codesage-checkpoint.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from functools import lru_cache
4
+ from typing import List, Optional, Tuple
5
+
6
+ import regex as re
7
+
8
+ from transformers import AddedToken, PreTrainedTokenizer
9
+ import logging
10
+
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ VOCAB_FILES_NAMES = {
15
+ "vocab_file": "vocab.json",
16
+ "merges_file": "merges.txt",
17
+ }
18
+
19
+ # Taken from
20
+ # https://github.com/huggingface/transformers/blob/8aca43bdb3cb9a5020f6d57589d85679dc873b1c/src/transformers/models/gpt2/tokenization_gpt2.py#L62-L84
21
+ @lru_cache()
22
+ def bytes_to_unicode():
23
+ """
24
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
25
+ characters the bpe code barfs on.
26
+
27
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
28
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
29
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
30
+ tables between utf-8 bytes and unicode strings.
31
+ """
32
+ bs = (
33
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
34
+ )
35
+ cs = bs[:]
36
+ n = 0
37
+ for b in range(2**8):
38
+ if b not in bs:
39
+ bs.append(b)
40
+ cs.append(2**8 + n)
41
+ n += 1
42
+ cs = [chr(n) for n in cs]
43
+ return dict(zip(bs, cs))
44
+
45
+
46
+ def get_pairs(word):
47
+ """
48
+ Return set of symbol pairs in a word.
49
+
50
+ Word is represented as tuple of symbols (symbols being variable-length strings).
51
+ """
52
+ pairs = set()
53
+ prev_char = word[0]
54
+ for char in word[1:]:
55
+ pairs.add((prev_char, char))
56
+ prev_char = char
57
+ return pairs
58
+
59
+
60
+ class CodeSageTokenizer(PreTrainedTokenizer):
61
+ """A thin wrapper of the starcoder tokenizer.
62
+ See HuggingFace for further documentation on general tokenizer methods.
63
+ """
64
+
65
+ vocab_files_names = VOCAB_FILES_NAMES
66
+ model_input_names = ["input_ids", "attention_mask"]
67
+
68
+ def __init__(
69
+ self,
70
+ vocab_file,
71
+ merges_file,
72
+ errors="replace",
73
+ unk_token="<|endoftext|>",
74
+ bos_token="<|endoftext|>",
75
+ eos_token="<|endoftext|>",
76
+ pad_token=None,
77
+ add_prefix_space=False,
78
+ add_bos_token=False,
79
+ add_eos_token=True,
80
+ **kwargs,
81
+ ):
82
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
83
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
84
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
85
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
86
+
87
+ self.add_bos_token = add_bos_token
88
+ self.add_eos_token = add_eos_token
89
+
90
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
91
+ self.encoder = json.load(vocab_handle)
92
+ self.decoder = {v: k for k, v in self.encoder.items()}
93
+ self.errors = errors # how to handle errors in decoding
94
+ self.byte_encoder = bytes_to_unicode()
95
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
96
+ with open(merges_file, encoding="utf-8") as merges_handle:
97
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
98
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
99
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
100
+ self.cache = {}
101
+ self.add_prefix_space = add_prefix_space
102
+
103
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
104
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
105
+
106
+ super().__init__(
107
+ errors=errors,
108
+ unk_token=unk_token,
109
+ bos_token=bos_token,
110
+ eos_token=eos_token,
111
+ pad_token=pad_token,
112
+ add_prefix_space=add_prefix_space,
113
+ add_bos_token=add_bos_token,
114
+ add_eos_token=add_eos_token,
115
+ **kwargs,
116
+ )
117
+
118
+ @property
119
+ def vocab_size(self):
120
+ return len(self.encoder)
121
+
122
+ def get_vocab(self):
123
+ return dict(self.encoder, **self.added_tokens_encoder)
124
+
125
+ def bpe(self, token):
126
+ if token in self.cache:
127
+ return self.cache[token]
128
+ word = tuple(token)
129
+ pairs = get_pairs(word)
130
+
131
+ if not pairs:
132
+ return token
133
+
134
+ while True:
135
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
136
+ if bigram not in self.bpe_ranks:
137
+ break
138
+ first, second = bigram
139
+ new_word = []
140
+ i = 0
141
+ while i < len(word):
142
+ try:
143
+ j = word.index(first, i)
144
+ except ValueError:
145
+ new_word.extend(word[i:])
146
+ break
147
+ else:
148
+ new_word.extend(word[i:j])
149
+ i = j
150
+
151
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
152
+ new_word.append(first + second)
153
+ i += 2
154
+ else:
155
+ new_word.append(word[i])
156
+ i += 1
157
+ new_word = tuple(new_word)
158
+ word = new_word
159
+ if len(word) == 1:
160
+ break
161
+ else:
162
+ pairs = get_pairs(word)
163
+ word = " ".join(word)
164
+ self.cache[token] = word
165
+ return word
166
+
167
+ def build_inputs_with_special_tokens(
168
+ self,
169
+ token_ids_0: List[int],
170
+ token_ids_1: Optional[List[int]] = None) -> List[int]:
171
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
172
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
173
+
174
+ output = bos_token_id + token_ids_0 + eos_token_id
175
+
176
+ if token_ids_1 is not None:
177
+ output = output + bos_token_id + token_ids_1 + eos_token_id
178
+
179
+ return output
180
+
181
+ def get_special_tokens_mask(
182
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
183
+ ) -> List[int]:
184
+ """
185
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
186
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
187
+
188
+ Args:
189
+ token_ids_0 (`List[int]`):
190
+ List of IDs.
191
+ token_ids_1 (`List[int]`, *optional*):
192
+ Optional second list of IDs for sequence pairs.
193
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
194
+ Whether or not the token list is already formatted with special tokens for the model.
195
+
196
+ Returns:
197
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
198
+ """
199
+ if already_has_special_tokens:
200
+ return super().get_special_tokens_mask(
201
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
202
+ )
203
+
204
+ if not self.add_bos_token:
205
+ return super().get_special_tokens_mask(
206
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False
207
+ )
208
+
209
+ if token_ids_1 is None:
210
+ return [1] + ([0] * len(token_ids_0))
211
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
212
+
213
+ def _tokenize(self, text):
214
+ """Tokenize a string."""
215
+ bpe_tokens = []
216
+ for token in re.findall(self.pat, text):
217
+ token = "".join(
218
+ self.byte_encoder[b] for b in token.encode("utf-8")
219
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
220
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
221
+ return bpe_tokens
222
+
223
+ def _convert_token_to_id(self, token):
224
+ """Converts a token (str) in an id using the vocab."""
225
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
226
+
227
+ def _convert_id_to_token(self, index):
228
+ """Converts an index (integer) in a token (str) using the vocab."""
229
+ return self.decoder.get(index)
230
+
231
+ def convert_tokens_to_string(self, tokens):
232
+ """Converts a sequence of tokens (string) in a single string."""
233
+ text = "".join(tokens)
234
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
235
+ return text
236
+
237
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
238
+ if not os.path.isdir(save_directory):
239
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
240
+ return
241
+ vocab_file = os.path.join(
242
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
243
+ )
244
+ merge_file = os.path.join(
245
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
246
+ )
247
+
248
+ with open(vocab_file, "w", encoding="utf-8") as f:
249
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
250
+
251
+ index = 0
252
+ with open(merge_file, "w", encoding="utf-8") as writer:
253
+ writer.write("#version: 0.2\n")
254
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
255
+ if index != token_index:
256
+ logger.warning(
257
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
258
+ " Please check that the tokenizer is not corrupted!"
259
+ )
260
+ index = token_index
261
+ writer.write(" ".join(bpe_tokens) + "\n")
262
+ index += 1
263
+
264
+ return vocab_file, merge_file
265
+
266
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
267
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
268
+ if is_split_into_words or add_prefix_space:
269
+ text = " " + text
270
+ return (text, kwargs)
271
+
272
+ @property
273
+ def default_chat_template(self):
274
+ """
275
+ A simple chat template that ignores role information and just concatenates messages with EOS tokens.
276
+ """
277
+ return "{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}"
README.md CHANGED
@@ -1,3 +1,53 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ datasets:
4
+ - bigcode/the-stack-dedup
5
+ - bigcode/the-stack-v2
6
+
7
+ library_name: transformers
8
+ language:
9
+ - code
10
+ ---
11
+
12
+ ## CodeSage-Small-v2
13
+
14
+ ### Model description
15
+ CodeSage is a family of open code embedding models with an encoder architecture that supports a wide range of source code understanding tasks. It was initially introduced in the paper:
16
+
17
+ [Code Representation Learning At Scale by Dejiao Zhang*, Wasi Uddin Ahmad*, et al.](https://arxiv.org/abs/2402.01935)
18
+
19
+ For this V2 model, we enhanced semantic search performance by improving the quality of the contrastive learning data through [consistency filtering](https://arxiv.org/abs/2209.11755). Starting from the pretrained checkpoint (trained with both Masked Language Modeling (MLM) and deobfuscation [Section 3.1](https://arxiv.org/abs/2402.01935)) from our V1 model training (Zhang et al., 2023), we applied contrastive learning with the filtered data. Unlike the V1 model, we extracted the initial set of (text, code) pairs—specifically, summaries and function/class bodies—from [The Stack V2](https://huggingface.co/datasets/bigcode/the-stack-v2) data instead of using the [V1](https://huggingface.co/datasets/bigcode/the-stack-dedup) data. We employed simple rule-based filtering as detailed in our previous work. We then applied consistency filtering to further refine the data. While using The Stack V2 resulted in minor performance boosts on downstream tasks, the majority of the performance improvements came from the consistency filtering.
20
+
21
+ ### Training Data
22
+ This pretrained checkpoint is the same as those used by our V1 model ([codesage/codesage-small](https://huggingface.co/codesage/codesage-small), which is trained on [The Stack](https://huggingface.co/datasets/bigcode/the-stack-dedup) data. The constative learning data are extracted from [The Stack V2](https://huggingface.co/datasets/bigcode/the-stack-v2). Same as our V1 model, we supported nine languages as follows: c, c-sharp, go, java, javascript, typescript, php, python, ruby.
23
+
24
+ ### How to use
25
+ This checkpoint consists of an encoder (130M model), which can be used to extract code embeddings of 1024 dimension. It can be easily loaded using the AutoModel functionality and employs the [Starcoder Tokenizer](https://arxiv.org/pdf/2305.06161.pdf).
26
+
27
+ ```
28
+ from transformers import AutoModel, AutoTokenizer
29
+
30
+ checkpoint = "codesage/codesage-small-v2"
31
+ device = "cuda" # for GPU usage or "cpu" for CPU usage
32
+
33
+ # Note: CodeSage requires adding eos token at the end of each tokenized sequence
34
+
35
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True, add_eos_token=True)
36
+
37
+ model = AutoModel.from_pretrained(checkpoint, trust_remote_code=True).to(device)
38
+
39
+ inputs = tokenizer.encode("def print_hello_world():\tprint('Hello World!')", return_tensors="pt").to(device)
40
+ embedding = model(inputs)[0]
41
+ ```
42
+
43
+ ### BibTeX entry and citation info
44
+ ```
45
+ @inproceedings{
46
+ zhang2024code,
47
+ title={{CODE} {REPRESENTATION} {LEARNING} {AT} {SCALE}},
48
+ author={Dejiao Zhang and Wasi Uddin Ahmad and Ming Tan and Hantian Ding and Ramesh Nallapati and Dan Roth and Xiaofei Ma and Bing Xiang},
49
+ booktitle={The Twelfth International Conference on Learning Representations},
50
+ year={2024},
51
+ url={https://openreview.net/forum?id=vfzRRjumpX}
52
+ }
53
+ ```