Sheng Lei commited on
Commit
99b5f7f
1 Parent(s): b6a1cff

Use right bert

Browse files
Files changed (2) hide show
  1. app.py +1 -2
  2. restrictedItems/train.py +1 -2
app.py CHANGED
@@ -10,9 +10,8 @@ def greet_json():
10
  from transformers import BertTokenizer, BertForSequenceClassification
11
  import torch
12
 
13
- from bertopic import BERTopic
14
 
15
- model = BERTopic.load("sleiyer/restricted_item_detector")
16
  # Load the trained model and tokenizer
17
  tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
18
 
 
10
  from transformers import BertTokenizer, BertForSequenceClassification
11
  import torch
12
 
 
13
 
14
+ model = BertForSequenceClassification.load("sleiyer/restricted_item_detector")
15
  # Load the trained model and tokenizer
16
  tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
17
 
restrictedItems/train.py CHANGED
@@ -74,7 +74,6 @@ dataset = restricted_dataset + normal_dataset
74
 
75
  # Split dataset
76
  train_texts, val_texts, train_labels, val_labels = train_test_split([item[0] for item in dataset], [item[1] for item in dataset], test_size=0.2)
77
- import pdb; pdb.set_trace()
78
 
79
  # Load pre-trained BERT tokenizer
80
  tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
@@ -129,4 +128,4 @@ trainer.train()
129
  # Evaluate model
130
  trainer.evaluate()
131
 
132
- model.save_pretrained('trained_model')
 
74
 
75
  # Split dataset
76
  train_texts, val_texts, train_labels, val_labels = train_test_split([item[0] for item in dataset], [item[1] for item in dataset], test_size=0.2)
 
77
 
78
  # Load pre-trained BERT tokenizer
79
  tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
 
128
  # Evaluate model
129
  trainer.evaluate()
130
 
131
+ model.push_to_hub("sleiyer/restricted_item_detector")