mlwong commited on
Commit
1e2fdae
1 Parent(s): 7172d33

Update versions

Browse files
app.py CHANGED
@@ -1,6 +1,7 @@
1
  from logging import PlaceHolder
2
  import gradio as gr
3
  import os, sys
 
4
  from npc_bert_models.gradio_demo import *
5
  from npc_bert_models.mlm_module import NpcBertMLM
6
  from npc_bert_models.cls_module import NpcBertCLS
@@ -9,7 +10,7 @@ from npc_bert_models.app_logger import get_logger
9
  import json
10
 
11
  class main_window():
12
- logger = get_logger('main')
13
  def __init__(self):
14
  self.interface = None
15
  self.examples = json.load(open("examples.json", 'r'))
 
1
  from logging import PlaceHolder
2
  import gradio as gr
3
  import os, sys
4
+ import spaces
5
  from npc_bert_models.gradio_demo import *
6
  from npc_bert_models.mlm_module import NpcBertMLM
7
  from npc_bert_models.cls_module import NpcBertCLS
 
10
  import json
11
 
12
  class main_window():
13
+ logger = get_logger('main', log_level='debug')
14
  def __init__(self):
15
  self.interface = None
16
  self.examples = json.load(open("examples.json", 'r'))
npc_bert_models/cls_module.py CHANGED
@@ -50,6 +50,7 @@ class NpcBertCLS():
50
  self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model)
51
  try:
52
  self.pipeline = hf_pipeline("text-classification", model=self.model, tokenizer=self.tokenizer, device='cuda')
 
53
  except Exception as e:
54
  self.pipeline = hf_pipeline("text-classification", model=self.model, tokenizer=self.tokenizer, device='cpu')
55
  self.logger.warning("No GPU!")
 
50
  self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model)
51
  try:
52
  self.pipeline = hf_pipeline("text-classification", model=self.model, tokenizer=self.tokenizer, device='cuda')
53
+ self.pipeline.model.to('cuda')
54
  except Exception as e:
55
  self.pipeline = hf_pipeline("text-classification", model=self.model, tokenizer=self.tokenizer, device='cpu')
56
  self.logger.warning("No GPU!")
npc_bert_models/mlm_module.py CHANGED
@@ -49,6 +49,7 @@ class NpcBertMLM():
49
  self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model)
50
  try:
51
  self.pipeline = hf_pipeline("fill-mask", model=self.model, tokenizer=self.tokenizer, device='cuda')
 
52
  except Exception as e:
53
  self.pipeline = hf_pipeline("fill-mask", model=self.model, tokenizer=self.tokenizer, device='cpu')
54
  self.logger.warning("No GPU")
@@ -75,6 +76,7 @@ class NpcBertMLM():
75
  if self.pipeline is None:
76
  msg = "Model was not initialized, have you run load()?"
77
  raise BrokenPipeError(msg)
 
78
  pipe_out = self.pipeline(*args)
79
  # Just use the first output
80
  if not isinstance(pipe_out[0], dict):
 
49
  self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model)
50
  try:
51
  self.pipeline = hf_pipeline("fill-mask", model=self.model, tokenizer=self.tokenizer, device='cuda')
52
+ self.pipeline.model.to('cuda')
53
  except Exception as e:
54
  self.pipeline = hf_pipeline("fill-mask", model=self.model, tokenizer=self.tokenizer, device='cpu')
55
  self.logger.warning("No GPU")
 
76
  if self.pipeline is None:
77
  msg = "Model was not initialized, have you run load()?"
78
  raise BrokenPipeError(msg)
79
+
80
  pipe_out = self.pipeline(*args)
81
  # Just use the first output
82
  if not isinstance(pipe_out[0], dict):
npc_bert_models/summary_module.py CHANGED
@@ -43,6 +43,7 @@ class NpcBertGPT2():
43
  early_stopping=True,
44
  no_repeat_ngram_size=5,
45
  max_new_tokens=60)
 
46
  except Exception as e:
47
  self.pipeline = hf_pipeline("text2text-generation",
48
  model=self.model,
@@ -80,7 +81,7 @@ class NpcBertGPT2():
80
  raise BrokenPipeError(msg)
81
 
82
  self.logger.info(f"Called with arguments {args = }")
83
-
84
  pipe_out, = self.pipeline(*args)
85
  pipe_out = pipe_out['generated_text']
86
  self.logger.info(f"Generated text: {pipe_out}")
 
43
  early_stopping=True,
44
  no_repeat_ngram_size=5,
45
  max_new_tokens=60)
46
+ self.pipeline.model.to('cuda')
47
  except Exception as e:
48
  self.pipeline = hf_pipeline("text2text-generation",
49
  model=self.model,
 
81
  raise BrokenPipeError(msg)
82
 
83
  self.logger.info(f"Called with arguments {args = }")
84
+ self.logger.info("Model: {self.pipeline.model}")
85
  pipe_out, = self.pipeline(*args)
86
  pipe_out = pipe_out['generated_text']
87
  self.logger.info(f"Generated text: {pipe_out}")
requirements.txt CHANGED
@@ -1,8 +1,7 @@
1
- torch >= 1.12.1
2
  scikit-learn >= 1.4.0
3
  pandas >= 2.1.4
4
- transformers >= 4.37.2
5
- numpy >= 1.26
6
  gradio >= 4.18, < 4.50
7
  scipy >= 1.12
8
  spaces
 
1
+ torch == 2.2.2
2
  scikit-learn >= 1.4.0
3
  pandas >= 2.1.4
4
+ transformers >= 4.37.2, < 4.50, < 2.0
 
5
  gradio >= 4.18, < 4.50
6
  scipy >= 1.12
7
  spaces