codelion commited on
Commit
5c38735
1 Parent(s): 09731af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -9,7 +9,7 @@ description = """# <p style="text-align: center; color: white;"> 🎅 <span styl
9
  a 1.1B parameter model for code generation in Python, Java & JavaScript. The model can also do infilling, just specify where you would like the model to complete code
10
  with the <span style='color: #ff75b3;'>&lt;FILL-HERE&gt;</span> token.</span>"""
11
 
12
- token = os.environ["HUB_TOKEN"]
13
  device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
14
 
15
 
@@ -21,7 +21,7 @@ EOD = "<|endoftext|>"
21
 
22
  GENERATION_TITLE= "<p style='font-size: 16px; color: white;'>Generated code:</p>"
23
 
24
- tokenizer_fim = AutoTokenizer.from_pretrained("lambdasec/santafixer", use_auth_token=token, padding_side="left")
25
 
26
  tokenizer_fim.add_special_tokens({
27
  "additional_special_tokens": [EOD, FIM_PREFIX, FIM_MIDDLE, FIM_SUFFIX, FIM_PAD],
 
9
  a 1.1B parameter model for code generation in Python, Java & JavaScript. The model can also do infilling, just specify where you would like the model to complete code
10
  with the <span style='color: #ff75b3;'>&lt;FILL-HERE&gt;</span> token.</span>"""
11
 
12
+ # token = os.environ["HUB_TOKEN"]
13
  device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
14
 
15
 
 
21
 
22
  GENERATION_TITLE= "<p style='font-size: 16px; color: white;'>Generated code:</p>"
23
 
24
+ tokenizer_fim = AutoTokenizer.from_pretrained("lambdasec/santafixer", padding_side="left")
25
 
26
  tokenizer_fim.add_special_tokens({
27
  "additional_special_tokens": [EOD, FIM_PREFIX, FIM_MIDDLE, FIM_SUFFIX, FIM_PAD],