hm-auch commited on
Commit
86814fc
1 Parent(s): b0d155b

update init app, intro first common file for refactoring

Browse files
Files changed (2) hide show
  1. app.py +5 -17
  2. hscommon.py +13 -0
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import transformers
 
2
 
3
  import gradio as gr
4
  import tensorflow as tf
@@ -30,27 +31,14 @@ def compile_model(model):
30
  model.compile(optimizer=optimizer, loss=LOSS, metrics=[METRICS])
31
  return model
32
 
33
- hs_detection_model = tf.keras.models.load_model(MODEL_DIRECTORY, compile=False) #tf.keras.models.load_model('save/kerasmodel/model.h5') #tf.saved_model.load('save/model') #tf.keras.models.load_model('save/model')
34
  compile_model(hs_detection_model)
35
 
36
- def encode(sentences):
37
- return TOKENIZER.batch_encode_plus(
38
- sentences,
39
- max_length=MAX_SEQUENCE_LENGTH, # set the length of the sequences
40
- add_special_tokens=True, # add [CLS] and [SEP] tokens
41
- return_attention_mask=True,
42
- return_token_type_ids=False, # not needed for this type of ML task
43
- pad_to_max_length=True, # add 0 pad tokens to the sequences less than max_length
44
- return_tensors='tf'
45
- )
46
-
47
  def inference(sentence):
48
- print(sentence)
49
- encoded_sentence = encode([sentence])
50
- print(encoded_sentence)
51
  predicition = hs_detection_model.predict(encoded_sentence.values())
52
- print(predicition)
53
  return predicition
54
 
55
- iface = gr.Interface(fn=inference, inputs="text", outputs="text") #, live=True)
 
56
  iface.launch()
 
1
  import transformers
2
+ import hscommon
3
 
4
  import gradio as gr
5
  import tensorflow as tf
 
31
  model.compile(optimizer=optimizer, loss=LOSS, metrics=[METRICS])
32
  return model
33
 
34
+ hs_detection_model = tf.keras.models.load_model(MODEL_DIRECTORY, compile=False)
35
  compile_model(hs_detection_model)
36
 
 
 
 
 
 
 
 
 
 
 
 
37
  def inference(sentence):
38
+ encoded_sentence = hscommon.encode([sentence], TOKENIZER, MAX_SEQUENCE_LENGTH)
 
 
39
  predicition = hs_detection_model.predict(encoded_sentence.values())
 
40
  return predicition
41
 
42
+ input_sentence_text = gr.inputs.Textbox(placeholder="Hier den Satz eingeben, der Hassrede enthalten kann.")
43
+ iface = gr.Interface(fn=inference, inputs=input_sentence_text, outputs="text")
44
  iface.launch()
hscommon.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ def encode(sentences, tokenizer, sequence_length):
5
+ return tokenizer.batch_encode_plus(
6
+ sentences,
7
+ max_length=sequence_length, # set the length of the sequences
8
+ add_special_tokens=True, # add [CLS] and [SEP] tokens
9
+ return_attention_mask=True,
10
+ return_token_type_ids=False, # not needed for this type of ML task
11
+ pad_to_max_length=True, # add 0 pad tokens to the sequences less than max_length
12
+ return_tensors='tf'
13
+ )