|
import streamlit as st |
|
|
|
st.set_page_config( |
|
page_title="LLM Open Generation Bias", |
|
page_icon="π", |
|
) |
|
|
|
st.title('Gender Bias Analysis in Text Generation') |
|
st.write("Welcome to the Gender Bias Analysis app. This application generates text using a GPT-2 model and compares the regard (perceived respect or opinion) in the generated texts for male and female prompts.") |
|
|
|
st.markdown( |
|
""" |
|
## Description |
|
This demo showcases how language models can exhibit gender bias. We load a dataset of prompts associated with male and female American actors and generate continuations using a GPT-2 model. By analyzing the generated text, we can observe potential biases in the model's output. The regard (perceived respect or opinion) scores are computed and compared for both male and female continuations. |
|
""" |
|
) |
|
|
|
|
|
st.sidebar.title("Gender Bias Analysis Demo") |
|
|
|
st.sidebar.subheader("Instructions") |
|
st.sidebar.markdown( |
|
""" |
|
1. Enter the password to access the demo. |
|
2. The app will load the BOLD dataset and sample prompts for male and female actors. |
|
3. It will generate text continuations using the GPT-2 model. |
|
4. The regard scores will be computed and displayed for comparison. |
|
""" |
|
) |
|
|