ronald commited on
Commit
e9b74d8
·
1 Parent(s): 0b96aa2
Files changed (2) hide show
  1. app.py +2 -2
  2. my_perplexity.py +3 -3
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import evaluate
2
  from evaluate.utils import launch_gradio_widget
3
 
4
- CACHE_DIR="/gfs/team/nlp/users/rcardena/tools/huggingface/evaluate"
5
- module = evaluate.load("my_perplexity", module_type="measurement",cache_dir=CACHE_DIR)
6
  launch_gradio_widget(module)
 
1
  import evaluate
2
  from evaluate.utils import launch_gradio_widget
3
 
4
+ # CACHE_DIR="/gfs/team/nlp/users/rcardena/tools/huggingface/evaluate"
5
+ module = evaluate.load("my_perplexity", module_type="measurement")#,cache_dir=CACHE_DIR)
6
  launch_gradio_widget(module)
my_perplexity.py CHANGED
@@ -106,7 +106,7 @@ class MyPerplexity(evaluate.Metric):
106
 
107
  def _compute(self, predictions, model_id, batch_size: int = 16, add_start_token: bool = True, device=None):
108
 
109
- CACHE_DIR="/gfs/team/nlp/users/rcardena/tools/huggingface/evaluate"
110
  if device is not None:
111
  assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
112
  if device == "gpu":
@@ -114,12 +114,12 @@ class MyPerplexity(evaluate.Metric):
114
  else:
115
  device = "cuda" if torch.cuda.is_available() else "cpu"
116
 
117
- model = AutoModelForCausalLM.from_pretrained(model_id,cache_dir=CACHE_DIR)
118
  model = model.to(device)
119
 
120
  tokenizer = AutoTokenizer.from_pretrained(
121
  model_id,
122
- cache_dir=CACHE_DIR,
123
  use_fast="cnn_dailymail" not in model_id,
124
  )
125
 
 
106
 
107
  def _compute(self, predictions, model_id, batch_size: int = 16, add_start_token: bool = True, device=None):
108
 
109
+ # CACHE_DIR="/gfs/team/nlp/users/rcardena/tools/huggingface/evaluate"
110
  if device is not None:
111
  assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
112
  if device == "gpu":
 
114
  else:
115
  device = "cuda" if torch.cuda.is_available() else "cpu"
116
 
117
+ model = AutoModelForCausalLM.from_pretrained(model_id)#,cache_dir=CACHE_DIR)
118
  model = model.to(device)
119
 
120
  tokenizer = AutoTokenizer.from_pretrained(
121
  model_id,
122
+ # cache_dir=CACHE_DIR,
123
  use_fast="cnn_dailymail" not in model_id,
124
  )
125