tweetpie commited on
Commit
1a0fbdb
·
1 Parent(s): 9235cae

- model timing added

Browse files
Files changed (1) hide show
  1. app.py +25 -14
app.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import streamlit as st
2
  from transformers import pipeline
3
  from constants import tweet_generator_prompt, absa_prompt
@@ -6,10 +8,16 @@ from constants import tweet_generator_prompt, absa_prompt
6
  # Initialize the model and tokenizer once, to avoid reloading them on each user interaction
7
  @st.cache_resource
8
  def load_model():
 
9
  classification_pipe = pipeline(
10
  "text-classification", model="tweetpie/toxic-content-detector", top_k=None)
 
 
11
  absa_pipe = pipeline("text2text-generation", model="tweetpie/stance-aware-absa")
 
 
12
  tweet_generation_pipe = pipeline("text2text-generation", model="tweetpie/stance-directed-tweet-generator")
 
13
  return classification_pipe, absa_pipe, tweet_generation_pipe
14
 
15
 
@@ -48,17 +56,19 @@ classifier, absa, generator = load_model()
48
  if generate_button:
49
  with st.spinner('Generating the tweet...'):
50
  # Call the model with the aspects inputs
51
- generated_tweet = generator(
52
- tweet_generator_prompt.format(
53
- ideology=model_selection.lower(),
54
- pro_entities=pro_entities,
55
- anti_entities=anti_entities,
56
- neutral_entities=neutral_entities,
57
- pro_aspects=pro_aspects,
58
- anti_aspects=anti_aspects,
59
- neutral_aspects=neutral_aspects
60
- )
61
  )
 
 
 
 
62
 
63
  # Displaying the input and model's output
64
  st.write(f"Generated Tweet: {generated_tweet[0]['generated_text']}")
@@ -66,6 +76,7 @@ if generate_button:
66
  with st.spinner('Generating the Stance-Aware ABSA output...'):
67
  # Call the model with the aspects inputs
68
  absa_output = absa(absa_prompt.format(generated_tweet=generated_tweet[0]['generated_text']))
 
69
  stances = [x.strip() for x in absa_output[0]['generated_text'].split(',')]
70
 
71
  st.write("Stance-Aware ABSA Output:")
@@ -79,10 +90,10 @@ if generate_button:
79
  st.write("Toxicity Classifier Output:")
80
  for i in range(3):
81
  if output[i]['label'] == 'LABEL_0':
82
- # st.write(f"Non-Toxic Content: {output[i]['score']*100:.1f}%")
83
- print(f"Non-Toxic Content: {output[i]['score']*100:.1f}%")
84
  elif output[i]['label'] == 'LABEL_1':
85
- # st.write(f"Toxic Content: {output[i]['score']*100:.1f}%")
86
- print(f"Toxic Content: {output[i]['score']*100:.1f}%")
87
  else:
88
  continue
 
1
+ import time
2
+
3
  import streamlit as st
4
  from transformers import pipeline
5
  from constants import tweet_generator_prompt, absa_prompt
 
8
  # Initialize the model and tokenizer once, to avoid reloading them on each user interaction
9
  @st.cache_resource
10
  def load_model():
11
+ start = time.time()
12
  classification_pipe = pipeline(
13
  "text-classification", model="tweetpie/toxic-content-detector", top_k=None)
14
+ print(f"Time to load the classification model: {time.time() - start:.2f}s")
15
+ start = time.time()
16
  absa_pipe = pipeline("text2text-generation", model="tweetpie/stance-aware-absa")
17
+ print(f"Time to load the absa model: {time.time() - start:.2f}s")
18
+ start = time.time()
19
  tweet_generation_pipe = pipeline("text2text-generation", model="tweetpie/stance-directed-tweet-generator")
20
+ print(f"Time to load the tweet generation model: {time.time() - start:.2f}s")
21
  return classification_pipe, absa_pipe, tweet_generation_pipe
22
 
23
 
 
56
  if generate_button:
57
  with st.spinner('Generating the tweet...'):
58
  # Call the model with the aspects inputs
59
+ prompt = tweet_generator_prompt.format(
60
+ ideology=model_selection.lower(),
61
+ pro_entities=pro_entities,
62
+ anti_entities=anti_entities,
63
+ neutral_entities=neutral_entities,
64
+ pro_aspects=pro_aspects,
65
+ anti_aspects=anti_aspects,
66
+ neutral_aspects=neutral_aspects
 
 
67
  )
68
+ print("Prompt: ", prompt)
69
+ start = time.time()
70
+ generated_tweet = generator(prompt)
71
+ print(f"Time to generate the tweet: {time.time() - start:.2f}s")
72
 
73
  # Displaying the input and model's output
74
  st.write(f"Generated Tweet: {generated_tweet[0]['generated_text']}")
 
76
  with st.spinner('Generating the Stance-Aware ABSA output...'):
77
  # Call the model with the aspects inputs
78
  absa_output = absa(absa_prompt.format(generated_tweet=generated_tweet[0]['generated_text']))
79
+ print("ABSA Output: ", absa_output)
80
  stances = [x.strip() for x in absa_output[0]['generated_text'].split(',')]
81
 
82
  st.write("Stance-Aware ABSA Output:")
 
90
  st.write("Toxicity Classifier Output:")
91
  for i in range(3):
92
  if output[i]['label'] == 'LABEL_0':
93
+ st.write(f"Non-Toxic Content: {output[i]['score']*100:.1f}%")
94
+ # print(f"Non-Toxic Content: {output[i]['score']*100:.1f}%")
95
  elif output[i]['label'] == 'LABEL_1':
96
+ st.write(f"Toxic Content: {output[i]['score']*100:.1f}%")
97
+ # print(f"Toxic Content: {output[i]['score']*100:.1f}%")
98
  else:
99
  continue