Antoine Chaffin commited on
Commit
4491e36
1 Parent(s): a2f05a9

Moving the auth token to where the model is loaded

Browse files
Files changed (2) hide show
  1. app.py +2 -0
  2. watermark.py +0 -3
app.py CHANGED
@@ -8,6 +8,8 @@ import time
8
  import gradio as gr
9
  from transformers import AutoModelForCausalLM
10
 
 
 
11
  device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
12
 
13
  parser = argparse.ArgumentParser(description='Generative Text Watermarking demo')
 
8
  import gradio as gr
9
  from transformers import AutoModelForCausalLM
10
 
11
+ hf_token = os.getenv('HF_TOKEN')
12
+
13
  device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
14
 
15
  parser = argparse.ArgumentParser(description='Generative Text Watermarking demo')
watermark.py CHANGED
@@ -8,9 +8,6 @@ from scipy.special import gamma, gammainc, gammaincc, betainc
8
  from scipy.optimize import fminbound
9
  import numpy as np
10
 
11
- import os
12
-
13
- hf_token = os.getenv('HF_TOKEN')
14
 
15
 
16
  device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
 
8
  from scipy.optimize import fminbound
9
  import numpy as np
10
 
 
 
 
11
 
12
 
13
  device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')