LLM-Open-Generation-Bias / pages /2_new_Demo_1.py
ProgU
update metric and plot
87700ed
raw
history blame
10.2 kB
import streamlit as st
import pandas as pd
from datasets import load_dataset, Dataset
from random import sample
from utils.metric import Regard
from utils.model import gpt2
import matplotlib.pyplot as plt
import os
# Set up the Streamlit interface
st.title('Gender Bias Analysis in Text Generation')
def check_password():
def password_entered():
if password_input == os.getenv('PASSWORD'):
# if password_input == " ":
st.session_state['password_correct'] = True
else:
st.error("Incorrect Password, please try again.")
password_input = st.text_input("Enter Password:", type="password")
submit_button = st.button("Submit", on_click=password_entered)
if submit_button and not st.session_state.get('password_correct', False):
st.error("Please enter a valid password to access the demo.")
if not st.session_state.get('password_correct', False):
check_password()
else:
st.sidebar.success("Password Verified. Proceed with the demo.")
if 'data_size' not in st.session_state:
st.session_state['data_size'] = 10
if 'bold' not in st.session_state:
bold = pd.DataFrame({})
bold_raw = pd.DataFrame(load_dataset("AlexaAI/bold", split="train"))
for index, row in bold_raw.iterrows():
bold_raw_prompts = list(row['prompts'])
bold_raw_wikipedia = list(row['wikipedia'])
bold_expansion = zip(bold_raw_prompts, bold_raw_wikipedia)
for bold_prompt, bold_wikipedia in bold_expansion:
bold = bold._append(
{'domain': row['domain'], 'name': row['name'], 'category': row['category'], 'prompts': bold_prompt,
'wikipedia': bold_wikipedia}, ignore_index=True)
st.session_state['bold'] = Dataset.from_pandas(bold)
if 'female_bold' not in st.session_state:
st.session_state['female_bold'] = []
if 'male_bold' not in st.session_state:
st.session_state['male_bold'] = []
st.subheader('Step 1: Set Data Size')
data_size = st.slider('Select number of samples per category:', min_value=1, max_value=50,
value=st.session_state['data_size'])
st.session_state['data_size'] = data_size
if st.button('Show Data'):
st.session_state['female_bold'] = sample(
[p for p in st.session_state['bold'] if p['category'] == 'American_actresses'], data_size)
st.session_state['male_bold'] = sample(
[p for p in st.session_state['bold'] if p['category'] == 'American_actors'], data_size)
st.write(f'Sampled {data_size} female and male American actors.')
st.write('**Female Samples:**', pd.DataFrame(st.session_state['female_bold']))
st.write('**Male Samples:**', pd.DataFrame(st.session_state['male_bold']))
if st.session_state['female_bold'] and st.session_state['male_bold']:
st.subheader('Step 2: Generate Text')
if st.button('Generate Text'):
GPT2 = gpt2()
st.session_state['male_prompts'] = [p['prompts'] for p in st.session_state['male_bold']]
st.session_state['female_prompts'] = [p['prompts'] for p in st.session_state['female_bold']]
st.session_state['male_wiki_continuation'] = [p['wikipedia'].replace(p['prompts'], '') for p in
st.session_state['male_bold']]
st.session_state['female_wiki_continuation'] = [p['wikipedia'].replace(p['prompts'], '') for p in
st.session_state['female_bold']]
progress_bar = st.progress(0)
st.write('Generating text for male prompts...')
male_generation = GPT2.text_generation(st.session_state['male_prompts'], pad_token_id=50256, max_length=50,
do_sample=False, truncation=True)
st.session_state['male_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
zip(male_generation, st.session_state['male_prompts'])]
progress_bar.progress(50)
st.write('Generating text for female prompts...')
female_generation = GPT2.text_generation(st.session_state['female_prompts'], pad_token_id=50256,
max_length=50, do_sample=False, truncation=True)
st.session_state['female_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
zip(female_generation, st.session_state['female_prompts'])]
progress_bar.progress(100)
st.write('Text generation completed.')
if st.session_state.get('male_continuations') and st.session_state.get('female_continuations'):
st.subheader('Step 3: Sample Generated Texts')
st.write("Male Data Samples:")
samples_df = pd.DataFrame({
'Male Prompt': st.session_state['male_prompts'],
'Male Continuation': st.session_state['male_continuations'],
'Male Wiki Continuation': st.session_state['male_wiki_continuation'],
})
st.write(samples_df)
st.write("Female Data Samples:")
samples_df = pd.DataFrame({
'Female Prompt': st.session_state['female_prompts'],
'Female Continuation': st.session_state['female_continuations'],
'Female Wiki Continuation': st.session_state['female_wiki_continuation'],
})
st.write(samples_df)
if st.button('Evaluate'):
st.subheader('Step 4: Regard Results')
regard = Regard("inner_compare")
st.write('Computing regard results to compare male and female continuations...')
with st.spinner('Computing regard results...'):
regard_male_results = regard.compute(data=st.session_state['male_continuations'],
references=st.session_state['male_wiki_continuation'])
st.write('**Raw Regard Results:**')
st.json(regard_male_results)
st.session_state['rmr'] = regard_male_results
regard_female_results = regard.compute(data=st.session_state['female_continuations'],
references=st.session_state['female_wiki_continuation'])
st.write('**Average Regard Results:**')
st.json(regard_female_results)
st.session_state['rfr'] = regard_female_results
if st.button('Plot'):
st.subheader('Step 5: Regard Results Plotting')
categories = ['GPT2', 'Wiki']
mp_gpt = st.session_state['rmr']['no_ref_diff_mean']['positive']
mn_gpt = st.session_state['rmr']['no_ref_diff_mean']['negative']
mo_gpt = 1 - (mp_gpt + mn_gpt)
mp_wiki = mp_gpt - st.session_state['rmr']['ref_diff_mean']['positive']
mn_wiki = mn_gpt -st.session_state['rmr']['ref_diff_mean']['negative']
mo_wiki = 1 - (mn_wiki + mp_wiki)
fp_gpt = st.session_state['rfr']['no_ref_diff_mean']['positive']
fn_gpt = st.session_state['rfr']['no_ref_diff_mean']['negative']
fo_gpt = 1 - (fp_gpt + fn_gpt)
fp_wiki = fp_gpt - st.session_state['rfr']['ref_diff_mean']['positive']
fn_wiki = fn_gpt - st.session_state['rfr']['ref_diff_mean']['negative']
fo_wiki = 1 - (fn_wiki + fp_wiki)
positive_m = [mp_gpt, mp_wiki]
other_m = [mo_gpt, mo_wiki]
negative_m = [mn_gpt, mn_wiki]
positive_f = [fp_gpt, fp_wiki]
other_f = [fo_gpt, fo_wiki]
negative_f = [fn_gpt, fn_wiki]
# Plotting
fig_a, ax_a = plt.subplots()
ax_a.bar(categories, negative_m, label='Negative', color='blue')
ax_a.bar(categories, other_m, bottom=negative_m, label='Other', color='orange')
ax_a.bar(categories, positive_m, bottom=[negative_m[i] + other_m[i] for i in range(len(negative_m))],
label='Positive', color='green')
plt.xlabel('Categories')
plt.ylabel('Proportion')
plt.title('GPT vs Wiki on male regard')
plt.legend()
st.pyplot(fig_a)
fig_b, ax_b = plt.subplots()
ax_b.bar(categories, negative_f, label='Negative', color='blue')
ax_b.bar(categories, other_f, bottom=negative_f, label='Other', color='orange')
ax_b.bar(categories, positive_f, bottom=[negative_f[i] + other_f[i] for i in range(len(negative_f))],
label='Positive', color='green')
plt.xlabel('Categories')
plt.ylabel('Proportion')
plt.title('GPT vs Wiki on female regard')
plt.legend()
st.pyplot(fig_b)
m_increase = mp_gpt - mn_gpt
m_relative_increase = mp_gpt - mp_wiki - (mn_gpt - mn_wiki)
f_increase = fp_gpt - fn_gpt
f_relative_increase = fp_gpt - fp_wiki - (fn_gpt - fn_wiki)
absolute_difference = [m_increase, f_increase]
relative_difference = [m_relative_increase, f_relative_increase]
new_categories = ['Male', 'Female']
fig_c, ax_c = plt.subplots()
ax_c.bar(new_categories, absolute_difference, label='Positive - Negative', color='#40E0D0')
plt.xlabel('Categories')
plt.ylabel('Proportion')
plt.title('Difference of positive and negative: Male vs Female')
plt.legend()
st.pyplot(fig_c)
fig_d, ax_d = plt.subplots()
ax_d.bar(new_categories, relative_difference, label='Positive - Negative', color='#40E0D0')
plt.xlabel('Categories')
plt.ylabel('Proportion')
plt.title('Difference of positive and negative (relative to Wiki): Male vs Female')
plt.legend()
st.pyplot(fig_d)