|
import time |
|
|
|
import pandas as pd |
|
import streamlit as st |
|
from transformers import pipeline |
|
from constants import tweet_generator_prompt, absa_prompt |
|
|
|
|
|
st.set_page_config(layout="wide") |
|
|
|
|
|
st.title("Towards a Programmable Humanizing AI through Scalable Stance-Directed Architecture Dashboard") |
|
|
|
|
|
|
|
col1, col2, _ = st.columns([2, 2, 4]) |
|
with col1: |
|
model_selection = st.selectbox( |
|
"Select an ideology", |
|
options=['Left', 'Right'], |
|
index=0 |
|
) |
|
|
|
|
|
with col2: |
|
st.header("Entities") |
|
pro_entities = st.text_input("Pro Entities", help="Enter pro entities separated by commas") |
|
anti_entities = st.text_input("Anti Entities", help="Enter anti entities separated by commas") |
|
neutral_entities = st.text_input("Neutral Entities", help="Enter neutral entities separated by commas") |
|
|
|
col3, col4 = st.columns([1, 3]) |
|
|
|
with col3: |
|
st.header("Aspects") |
|
pro_aspects = st.text_input("Pro Aspects", help="Enter pro aspects separated by commas") |
|
anti_aspects = st.text_input("Anti Aspects", help="Enter anti aspects separated by commas") |
|
neutral_aspects = st.text_input("Neutral Aspects", help="Enter neutral aspects separated by commas") |
|
|
|
|
|
generate_button = st.button("Generate tweet and classify toxicity") |
|
|
|
|
|
|
|
|
|
|
|
if generate_button: |
|
with col4: |
|
with st.spinner('Generating the tweet...'): |
|
|
|
time.sleep(5) |
|
generated_tweet = [{ |
|
"generated_text": "the agricultural sector is the single biggest recipient of migrants workers rights groups argue . nearly 90 % of those who come to the us are denied employment due to discriminatory employment laws and safety standards ."}] |
|
|
|
|
|
st.write(f"Generated Tweet: {generated_tweet[0]['generated_text']}") |
|
|
|
with st.spinner('Generating the Stance-Aware ABSA output...'): |
|
time.sleep(3) |
|
absa_output = [{'generated_text': 'migrants:positive, rights:positive, laws:positive, safety:positive'}] |
|
|
|
stances = [x.strip() for x in absa_output[0]['generated_text'].split(',')] |
|
stances = [{ |
|
'Aspect': x.split(':')[0], |
|
'Sentiment': x.split(':')[1] |
|
} for x in stances] |
|
stances_df = pd.DataFrame(stances) |
|
stances_df.index = stances_df.index + 1 |
|
st.write("Stance-Aware ABSA Output:") |
|
st.table(stances_df) |
|
|
|
with st.spinner('Classifying the toxicity...'): |
|
time.sleep(2) |
|
model_output = [[{'label': 'LABEL_0', 'score': 0.9999998807907104}, |
|
{'label': 'LABEL_1', 'score': 1.1919785395889282e-07}, |
|
{'label': 'LABEL_2', 'score': 1.1919785395889282e-07}]] |
|
output = model_output[0] |
|
|
|
st.write("Toxicity Classifier Output:") |
|
for i in range(len(output)): |
|
if output[i]['label'] == 'LABEL_0': |
|
st.write(f"Non-Toxic Content: {output[i]['score'] * 100:.1f}%") |
|
elif output[i]['label'] == 'LABEL_2': |
|
st.write(f"Toxic Content: {output[i]['score'] * 100:.1f}%") |
|
else: |
|
continue |
|
|