mlwong commited on
Commit
7172d33
1 Parent(s): 4d6ca0c

Compatibility to ZERO

Browse files
npc_bert_models/cls_module.py CHANGED
@@ -1,7 +1,9 @@
 
1
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
2
  from transformers import pipeline as hf_pipeline
3
  from pathlib import Path
4
  from typing import Any, Dict
 
5
  from .app_logger import get_logger
6
 
7
  class NpcBertCLS():
@@ -46,8 +48,14 @@ class NpcBertCLS():
46
 
47
  self.model = AutoModelForSequenceClassification.from_pretrained(self.pretrained_model)
48
  self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model)
49
- self.pipeline = hf_pipeline("text-classification", model=self.model, tokenizer=self.tokenizer, device='cpu')
 
 
 
 
 
50
 
 
51
  def __call__(self, *args: Any) -> Any:
52
  """Performs classification on the given reports.
53
 
 
1
+ import spaces.zero
2
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
  from transformers import pipeline as hf_pipeline
4
  from pathlib import Path
5
  from typing import Any, Dict
6
+ import spaces
7
  from .app_logger import get_logger
8
 
9
  class NpcBertCLS():
 
48
 
49
  self.model = AutoModelForSequenceClassification.from_pretrained(self.pretrained_model)
50
  self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model)
51
+ try:
52
+ self.pipeline = hf_pipeline("text-classification", model=self.model, tokenizer=self.tokenizer, device='cuda')
53
+ except Exception as e:
54
+ self.pipeline = hf_pipeline("text-classification", model=self.model, tokenizer=self.tokenizer, device='cpu')
55
+ self.logger.warning("No GPU!")
56
+ self.logger.exception(e)
57
 
58
+ @spaces.GPU
59
  def __call__(self, *args: Any) -> Any:
60
  """Performs classification on the given reports.
61
 
npc_bert_models/mlm_module.py CHANGED
@@ -2,6 +2,7 @@ from transformers import AutoTokenizer, AutoModelForMaskedLM
2
  from transformers import pipeline as hf_pipeline
3
  from pathlib import Path
4
  from .app_logger import get_logger
 
5
 
6
  class NpcBertMLM():
7
  r"""A class for performing masked language modeling with BERT.
@@ -46,8 +47,14 @@ class NpcBertMLM():
46
 
47
  self.model = AutoModelForMaskedLM.from_pretrained(self.pretrained_model)
48
  self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model)
49
- self.pipeline = hf_pipeline("fill-mask", model=self.model, tokenizer=self.tokenizer, device='cpu')
 
 
 
 
 
50
 
 
51
  def __call__(self, *args):
52
  """Performs masked language modeling prediction.
53
 
 
2
  from transformers import pipeline as hf_pipeline
3
  from pathlib import Path
4
  from .app_logger import get_logger
5
+ import spaces
6
 
7
  class NpcBertMLM():
8
  r"""A class for performing masked language modeling with BERT.
 
47
 
48
  self.model = AutoModelForMaskedLM.from_pretrained(self.pretrained_model)
49
  self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model)
50
+ try:
51
+ self.pipeline = hf_pipeline("fill-mask", model=self.model, tokenizer=self.tokenizer, device='cuda')
52
+ except Exception as e:
53
+ self.pipeline = hf_pipeline("fill-mask", model=self.model, tokenizer=self.tokenizer, device='cpu')
54
+ self.logger.warning("No GPU")
55
+ self.logger.exception(e)
56
 
57
+ @spaces.GPU
58
  def __call__(self, *args):
59
  """Performs masked language modeling prediction.
60
 
npc_bert_models/summary_module.py CHANGED
@@ -1,6 +1,7 @@
1
  from transformers import AutoTokenizer, EncoderDecoderModel
2
  from transformers import pipeline as hf_pipeline
3
  from pathlib import Path
 
4
  import re
5
  from .app_logger import get_logger
6
 
@@ -29,7 +30,21 @@ class NpcBertGPT2():
29
 
30
  self.model = EncoderDecoderModel.from_pretrained(self.pretrained_model)
31
  self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model)
32
- self.pipeline = hf_pipeline("text2text-generation",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  model=self.model,
34
  tokenizer=self.tokenizer,
35
  device='cpu',
@@ -40,7 +55,10 @@ class NpcBertGPT2():
40
  early_stopping=True,
41
  no_repeat_ngram_size=5,
42
  max_new_tokens=60)
 
 
43
 
 
44
  def __call__(self, *args):
45
  """Performs masked language modeling prediction.
46
 
 
1
  from transformers import AutoTokenizer, EncoderDecoderModel
2
  from transformers import pipeline as hf_pipeline
3
  from pathlib import Path
4
+ import spaces
5
  import re
6
  from .app_logger import get_logger
7
 
 
30
 
31
  self.model = EncoderDecoderModel.from_pretrained(self.pretrained_model)
32
  self.tokenizer = AutoTokenizer.from_pretrained(self.pretrained_model)
33
+
34
+ try:
35
+ self.pipeline = hf_pipeline("text2text-generation",
36
+ model=self.model,
37
+ tokenizer=self.tokenizer,
38
+ device='cuda',
39
+ num_beams=4,
40
+ do_sample=True,
41
+ top_k = 5,
42
+ temperature=.95,
43
+ early_stopping=True,
44
+ no_repeat_ngram_size=5,
45
+ max_new_tokens=60)
46
+ except Exception as e:
47
+ self.pipeline = hf_pipeline("text2text-generation",
48
  model=self.model,
49
  tokenizer=self.tokenizer,
50
  device='cpu',
 
55
  early_stopping=True,
56
  no_repeat_ngram_size=5,
57
  max_new_tokens=60)
58
+ self.logger.warning("No GPU!")
59
+ self.logger.exception(e)
60
 
61
+ @spaces.GPU
62
  def __call__(self, *args):
63
  """Performs masked language modeling prediction.
64
 
requirements.txt CHANGED
@@ -4,4 +4,6 @@ pandas >= 2.1.4
4
  transformers >= 4.37.2
5
  numpy >= 1.26
6
  gradio >= 4.18, < 4.50
7
- scipy >= 1.12
 
 
 
4
  transformers >= 4.37.2
5
  numpy >= 1.26
6
  gradio >= 4.18, < 4.50
7
+ scipy >= 1.12
8
+ spaces
9
+ python == 3.10.13