tweetpie commited on
Commit
3ddf306
·
1 Parent(s): 0023fea

- tweet generator and absa models added

Browse files
Files changed (3) hide show
  1. app.py +47 -16
  2. constants.py +31 -0
  3. requirements.txt +2 -1
app.py CHANGED
@@ -1,11 +1,17 @@
1
  import streamlit as st
2
  from transformers import pipeline
 
 
3
 
4
  # Initialize the model and tokenizer once, to avoid reloading them on each user interaction
5
  @st.cache_resource
6
  def load_model():
7
- classifier = pipeline("text-classification", model="tweetpie/toxic-content-detector", return_all_scores=True)
8
- return classifier
 
 
 
 
9
 
10
  # Set up the title
11
  st.title("Toxic Content Classifier Dashboard")
@@ -13,7 +19,7 @@ st.title("Toxic Content Classifier Dashboard")
13
  # Top-level input for model selection
14
  model_selection = st.selectbox(
15
  "Select an ideology",
16
- options=['alm', 'blm'],
17
  index=0 # Default selection
18
  )
19
 
@@ -36,22 +42,47 @@ with col2:
36
  generate_button = st.button("Generate and classify toxicity")
37
 
38
  # Load the model
39
- classifier = load_model()
40
 
41
  # Process the input text and generate output
42
  if generate_button:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  with st.spinner('Classifying the toxicity...'):
44
  # Call the model with the input text
45
- input_text = "I love you"
46
- model_output = classifier(input_text)
47
  output = model_output[0]
48
-
49
- # Displaying the input and model's output
50
- st.write(f"Generated Tweet: {input_text}")
51
- for i in range(3):
52
- if output[i]['label'] == 'LABEL_0':
53
- st.write(f"Non-Toxic Content: {output[i]['score']*100:.1f}%")
54
- elif output[i]['label'] == 'LABEL_1':
55
- st.write(f"Toxic Content: {output[i]['score']*100:.1f}%")
56
- else:
57
- continue
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
  from transformers import pipeline
3
+ from constants import tweet_generator_prompt, absa_prompt
4
+
5
 
6
  # Initialize the model and tokenizer once, to avoid reloading them on each user interaction
7
  @st.cache_resource
8
  def load_model():
9
+ classification_pipe = pipeline(
10
+ "text-classification", model="tweetpie/toxic-content-detector", top_k=None)
11
+ absa_pipe = pipeline("text2text-generation", model="tweetpie/stance-aware-absa")
12
+ tweet_generation_pipe = pipeline("text2text-generation", model="tweetpie/stance-directed-tweet-generator")
13
+ return classification_pipe, absa_pipe, tweet_generation_pipe
14
+
15
 
16
  # Set up the title
17
  st.title("Toxic Content Classifier Dashboard")
 
19
  # Top-level input for model selection
20
  model_selection = st.selectbox(
21
  "Select an ideology",
22
+ options=['Left', 'Right'],
23
  index=0 # Default selection
24
  )
25
 
 
42
  generate_button = st.button("Generate and classify toxicity")
43
 
44
  # Load the model
45
+ classifier, absa, generator = load_model()
46
 
47
  # Process the input text and generate output
48
  if generate_button:
49
+ with st.spinner('Generating the tweet...'):
50
+ # Call the model with the aspects inputs
51
+ generated_tweet = generator(
52
+ tweet_generator_prompt.format(
53
+ ideology=model_selection.lower(),
54
+ pro_entities=pro_entities,
55
+ anti_entities=anti_entities,
56
+ neutral_entities=neutral_entities,
57
+ pro_aspects=pro_aspects,
58
+ anti_aspects=anti_aspects,
59
+ neutral_aspects=neutral_aspects
60
+ )
61
+ )
62
+
63
+ with st.spinner('Generating the Stance-Aware ABSA output...'):
64
+ # Call the model with the aspects inputs
65
+ absa_output = absa(absa_prompt.format(generated_tweet=generated_tweet[0]['generated_text']))
66
+ stances = [x.strip() for x in absa_output[0]['generated_text'].split(',')]
67
+
68
  with st.spinner('Classifying the toxicity...'):
69
  # Call the model with the input text
70
+ model_output = classifier(generated_tweet[0]['generated_text'])
 
71
  output = model_output[0]
72
+
73
+ # Displaying the input and model's output
74
+ st.write(f"Generated Tweet: {generated_tweet[0]['generated_text']}")
75
+
76
+ st.write("Stance-Aware ABSA Output:")
77
+ st.write(f"{stances}")
78
+
79
+ st.write("Toxicity Classifier Output:")
80
+ for i in range(3):
81
+ if output[i]['label'] == 'LABEL_0':
82
+ # st.write(f"Non-Toxic Content: {output[i]['score']*100:.1f}%")
83
+ print(f"Non-Toxic Content: {output[i]['score']*100:.1f}%")
84
+ elif output[i]['label'] == 'LABEL_1':
85
+ # st.write(f"Toxic Content: {output[i]['score']*100:.1f}%")
86
+ print(f"Toxic Content: {output[i]['score']*100:.1f}%")
87
+ else:
88
+ continue
constants.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tweet_generator_prompt = """
2
+ ideology: '{ideology}', pro entities: {pro_entities}, anti entities: {anti_entities},
3
+ neutral entities: {neutral_entities} based on following aspects pro aspects: {pro_aspects},
4
+ anti aspects: {anti_aspects}, neutral aspects: {neutral_aspects}
5
+ """
6
+
7
+ absa_prompt = """
8
+ Definition: The output will be the aspects (both implicit and explicit) and the aspects sentiment polarity.
9
+ In cases where there are no aspects the output should be noaspectterm:none.
10
+
11
+ Positive example 1-
12
+ input: I charge it at night and skip taking the cord with me because of the good battery life.
13
+ output: battery life:positive,
14
+ Positive example 2-
15
+ input: I even got my teenage son one, because of the features that it offers, like, iChat, Photobooth, garage band and more!.
16
+ output: features:positive, iChat:positive, Photobooth:positive, garage band:positive
17
+ Negative example 1-
18
+ input: Speaking of the browser, it too has problems.
19
+ output: browser:negative
20
+ Negative example 2-
21
+ input: The keyboard is too slick.
22
+ output: keyboard:negative
23
+ Neutral example 1-
24
+ input: I took it back for an Asus and same thing- blue screen which required me to remove the battery to reset.
25
+ output: battery:neutral
26
+ Neutral example 2-
27
+ input: Nightly my computer defrags itself and runs a virus scan.
28
+ output: virus scan:neutral
29
+ Now complete the following example-
30
+ {generated_tweet}
31
+ """
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  streamlit==1.31.0
2
  transformers==4.38.2
3
- torch==2.2.1
 
 
1
  streamlit==1.31.0
2
  transformers==4.38.2
3
+ torch==2.2.1
4
+ sentencepiece==0.2.0