File size: 3,578 Bytes
031e5e2
 
 
 
 
 
 
 
 
 
6d737a4
031e5e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d737a4
031e5e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d737a4
 
031e5e2
 
 
 
 
6d737a4
031e5e2
 
 
 
6d737a4
031e5e2
6d737a4
031e5e2
 
6d737a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
031e5e2
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# set path
import glob, os, sys; 
sys.path.append('../utils')

#import needed libraries
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import streamlit as st
from utils.netzero_classifier import load_netzeroClassifier, netzero_classification
import logging
logger = logging.getLogger(__name__)
from utils.config import get_classifier_params
from io import BytesIO
import xlsxwriter
import plotly.express as px


# Declare all the necessary variables
classifier_identifier = 'netzero'
params  = get_classifier_params(classifier_identifier)

# Labels dictionary ###
_lab_dict = {
            'NEGATIVE':'NO NETZERO TARGET',
            'NA':'NOT APPLICABLE',
            'NETZERO':'NETZERO TARGET',
            }


@st.cache_data
def to_excel(df):
    len_df = len(df)
    output = BytesIO()
    writer = pd.ExcelWriter(output, engine='xlsxwriter')
    df.to_excel(writer, index=False, sheet_name='Sheet1')
    workbook = writer.book
    worksheet = writer.sheets['Sheet1']
    worksheet.data_validation('E2:E{}'.format(len_df), 
                              {'validate': 'list', 
                               'source': ['No', 'Yes', 'Discard']})
    writer.save()
    processed_data = output.getvalue()
    return processed_data

def app():
    ### Main app code ###
    with st.container():
            if 'key1' in st.session_state:
                df = st.session_state.key1

                # Load the classifier model
                classifier = load_netzeroClassifier(classifier_name=params['model_name'])
                st.session_state['{}_classifier'.format(classifier_identifier)] = classifier

                if sum(df['Target Label'] == 'TARGET') > 100:
                    warning_msg = ": This might take sometime, please sit back and relax."
                else:
                    warning_msg = ""
                
                df = netzero_classification(haystack_doc=df,
                                            threshold= params['threshold'])
                st.session_state.key1 = df
               

def netzero_display():
  if 'key1' in st.session_state:
      df = st.session_state.key2
      hits  = df[df['Netzero Label'] == 'NETZERO']
      range_val = min(5,len(hits))
      if range_val !=0:
          count_df = df['Netzero Label'].value_counts()
          count_df = count_df.rename('count')
          count_df = count_df.rename_axis('Netzero Label').reset_index()
          count_df['Label_def'] = count_df['Netzero Label'].apply(lambda x: _lab_dict[x])

          fig = px.bar(count_df, y="Label_def", x="count", orientation='h', height =200)
          c1, c2 = st.columns([1,1])
          with c1:
              st.plotly_chart(fig,use_container_width= True)
              
          hits = hits.sort_values(by=['Netzero Score'], ascending=False)
          st.write("")
          st.markdown("###### Top few NetZero Target Classified paragraph/text results ######")
          range_val = min(5,len(hits))
          for i in range(range_val):
              # the page number reflects the page that contains the main paragraph 
              # according to split limit, the overlapping part can be on a separate page
              st.write('**Result {}** `page {}` (Relevancy Score: {:.2f})'.format(i+1,hits.iloc[i]['page'],hits.iloc[i]['Netzero Score']))
              st.write("\t Text: \t{}".format(hits.iloc[i]['text']))
      else:
          st.info("🤔 No Netzero target found")