Zekun Wu commited on
Commit
7bcf67d
1 Parent(s): e4768e1
Files changed (1) hide show
  1. pages/1_Demo_1.py +23 -17
pages/1_Demo_1.py CHANGED
@@ -42,6 +42,7 @@ else:
42
  data_size = st.slider('Select number of samples per category:', min_value=1, max_value=50,
43
  value=st.session_state['data_size'])
44
  st.session_state['data_size'] = data_size
 
45
  if st.button('Show Data'):
46
  st.session_state['female_bold'] = sample(
47
  [p for p in st.session_state['bold'] if p['category'] == 'American_actresses'], data_size)
@@ -49,31 +50,35 @@ else:
49
  [p for p in st.session_state['bold'] if p['category'] == 'American_actors'], data_size)
50
 
51
  st.write(f'Sampled {data_size} female and male American actors.')
 
 
52
 
53
  if st.session_state['female_bold'] and st.session_state['male_bold']:
54
- st.subheader('Step 2: Generated Text')
 
55
  if st.button('Generate Text'):
56
  GPT2 = gpt2()
57
  st.session_state['male_prompts'] = [p['prompts'][0] for p in st.session_state['male_bold']]
58
  st.session_state['female_prompts'] = [p['prompts'][0] for p in st.session_state['female_bold']]
59
 
 
 
60
  st.write('Generating text for male prompts...')
61
  male_generation = GPT2.text_generation(st.session_state['male_prompts'], pad_token_id=50256, max_length=50,
62
  do_sample=False, truncation=True)
63
-
64
- print(male_generation)
65
  st.session_state['male_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
66
  zip(male_generation, st.session_state['male_prompts'])]
67
 
 
 
68
  st.write('Generating text for female prompts...')
69
  female_generation = GPT2.text_generation(st.session_state['female_prompts'], pad_token_id=50256,
70
  max_length=50, do_sample=False, truncation=True)
71
  st.session_state['female_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
72
  zip(female_generation, st.session_state['female_prompts'])]
73
 
74
- st.write('Generated {} male continuations'.format(len(st.session_state['male_continuations'])))
75
- st.write('Generated {} female continuations'.format(len(st.session_state['female_continuations'])))
76
-
77
 
78
  if st.session_state.get('male_continuations') and st.session_state.get('female_continuations'):
79
  st.subheader('Step 3: Sample Generated Texts')
@@ -86,14 +91,15 @@ else:
86
  st.subheader('Step 4: Regard Results')
87
  regard = Regard("compare")
88
  st.write('Computing regard results to compare male and female continuations...')
89
- regard_results = regard.compute(data=st.session_state['male_continuations'],
90
- references=st.session_state['female_continuations'])
91
- st.write('**Raw Regard Results:**')
92
- st.json(regard_results)
93
-
94
- st.write('Computing average regard results for comparative analysis...')
95
- regard_results_avg = regard.compute(data=st.session_state['male_continuations'],
96
- references=st.session_state['female_continuations'],
97
- aggregation='average')
98
- st.write('**Average Regard Results:**')
99
- st.json(regard_results_avg)
 
 
42
  data_size = st.slider('Select number of samples per category:', min_value=1, max_value=50,
43
  value=st.session_state['data_size'])
44
  st.session_state['data_size'] = data_size
45
+
46
  if st.button('Show Data'):
47
  st.session_state['female_bold'] = sample(
48
  [p for p in st.session_state['bold'] if p['category'] == 'American_actresses'], data_size)
 
50
  [p for p in st.session_state['bold'] if p['category'] == 'American_actors'], data_size)
51
 
52
  st.write(f'Sampled {data_size} female and male American actors.')
53
+ st.write('**Female Samples:**', pd.DataFrame(st.session_state['female_bold']))
54
+ st.write('**Male Samples:**', pd.DataFrame(st.session_state['male_bold']))
55
 
56
  if st.session_state['female_bold'] and st.session_state['male_bold']:
57
+ st.subheader('Step 2: Generate Text')
58
+
59
  if st.button('Generate Text'):
60
  GPT2 = gpt2()
61
  st.session_state['male_prompts'] = [p['prompts'][0] for p in st.session_state['male_bold']]
62
  st.session_state['female_prompts'] = [p['prompts'][0] for p in st.session_state['female_bold']]
63
 
64
+ progress_bar = st.progress(0)
65
+
66
  st.write('Generating text for male prompts...')
67
  male_generation = GPT2.text_generation(st.session_state['male_prompts'], pad_token_id=50256, max_length=50,
68
  do_sample=False, truncation=True)
 
 
69
  st.session_state['male_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
70
  zip(male_generation, st.session_state['male_prompts'])]
71
 
72
+ progress_bar.progress(50)
73
+
74
  st.write('Generating text for female prompts...')
75
  female_generation = GPT2.text_generation(st.session_state['female_prompts'], pad_token_id=50256,
76
  max_length=50, do_sample=False, truncation=True)
77
  st.session_state['female_continuations'] = [gen[0]['generated_text'].replace(prompt, '') for gen, prompt in
78
  zip(female_generation, st.session_state['female_prompts'])]
79
 
80
+ progress_bar.progress(100)
81
+ st.write('Text generation completed.')
 
82
 
83
  if st.session_state.get('male_continuations') and st.session_state.get('female_continuations'):
84
  st.subheader('Step 3: Sample Generated Texts')
 
91
  st.subheader('Step 4: Regard Results')
92
  regard = Regard("compare")
93
  st.write('Computing regard results to compare male and female continuations...')
94
+
95
+ with st.spinner('Computing regard results...'):
96
+ regard_results = regard.compute(data=st.session_state['male_continuations'],
97
+ references=st.session_state['female_continuations'])
98
+ st.write('**Raw Regard Results:**')
99
+ st.json(regard_results)
100
+
101
+ regard_results_avg = regard.compute(data=st.session_state['male_continuations'],
102
+ references=st.session_state['female_continuations'],
103
+ aggregation='average')
104
+ st.write('**Average Regard Results:**')
105
+ st.json(regard_results_avg)