geekyrakshit commited on
Commit
85db9f4
·
1 Parent(s): 0ee7aed
application_pages/chat_app.py CHANGED
@@ -1,7 +1,7 @@
1
  import importlib
2
 
3
  import streamlit as st
4
-
5
  from guardrails_genie.guardrails import GuardrailManager
6
  from guardrails_genie.llm import OpenAIModel
7
 
@@ -114,6 +114,7 @@ def initialize_guardrails():
114
 
115
 
116
  if st.session_state.is_authenticated:
 
117
  initialize_session_state()
118
  st.title(":material/robot: Guardrails Genie Playground")
119
 
 
1
  import importlib
2
 
3
  import streamlit as st
4
+ import weave
5
  from guardrails_genie.guardrails import GuardrailManager
6
  from guardrails_genie.llm import OpenAIModel
7
 
 
114
 
115
 
116
  if st.session_state.is_authenticated:
117
+ weave.init(project_name=f"{st.session_state.weave_entity_name}/{st.session_state.weave_project_name}")
118
  initialize_session_state()
119
  st.title(":material/robot: Guardrails Genie Playground")
120
 
application_pages/evaluation_app.py CHANGED
@@ -120,6 +120,9 @@ def initialize_guardrails():
120
 
121
 
122
  if st.session_state.is_authenticated:
 
 
 
123
  initialize_session_state()
124
  st.title(":material/monitoring: Evaluation")
125
 
@@ -187,11 +190,17 @@ if st.session_state.is_authenticated:
187
  st.session_state.start_evaluations_button = start_evaluations_button
188
  if st.session_state.start_evaluations_button:
189
  # st.write(len(st.session_state.guardrails))
190
- evaluation = weave.Evaluation(
191
- dataset=st.session_state.dataset_ref,
192
- scorers=[AccuracyMetric()],
193
- streamlit_mode=True,
194
- )
 
 
 
 
 
 
195
  with st.expander("Evaluation Results", expanded=True):
196
  evaluation_summary, call = asyncio.run(
197
  evaluation.evaluate.call(
 
120
 
121
 
122
  if st.session_state.is_authenticated:
123
+ weave.init(
124
+ project_name=f"{st.session_state.weave_entity_name}/{st.session_state.weave_project_name}"
125
+ )
126
  initialize_session_state()
127
  st.title(":material/monitoring: Evaluation")
128
 
 
190
  st.session_state.start_evaluations_button = start_evaluations_button
191
  if st.session_state.start_evaluations_button:
192
  # st.write(len(st.session_state.guardrails))
193
+ try:
194
+ evaluation = weave.Evaluation(
195
+ dataset=st.session_state.dataset_ref,
196
+ scorers=[AccuracyMetric()],
197
+ streamlit_mode=True,
198
+ )
199
+ except Exception as e:
200
+ evaluation = weave.Evaluation(
201
+ dataset=st.session_state.dataset_ref,
202
+ scorers=[AccuracyMetric()],
203
+ )
204
  with st.expander("Evaluation Results", expanded=True):
205
  evaluation_summary, call = asyncio.run(
206
  evaluation.evaluate.call(