huriacane33 commited on
Commit
54b562a
·
verified ·
1 Parent(s): eba25c2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -0
app.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+ import pandas as pd
4
+ import re
5
+
6
+ # Load the Question Answering model
7
+ qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")
8
+
9
+ # Load SOP Dataset
10
+ @st.cache
11
+ def load_sop_dataset():
12
+ """Load SOP dataset from CSV."""
13
+ dataset = pd.read_csv("dataset.csv") # Ensure this file is uploaded to your Hugging Face Space
14
+ return dataset
15
+
16
+ # Load the dataset
17
+ dataset = load_sop_dataset()
18
+
19
+ # Utility function to find relevant contexts
20
+ def find_relevant_contexts(question, dataset):
21
+ """Search for relevant contexts in the dataset."""
22
+ relevant_contexts = []
23
+ for index, row in dataset.iterrows():
24
+ if re.search(question, row["text"], re.IGNORECASE):
25
+ relevant_contexts.append(row["text"])
26
+ return relevant_contexts
27
+
28
+ # Streamlit UI
29
+ st.title("SOP Question Answering AI")
30
+ st.markdown("Ask any question about Standard Operating Procedures:")
31
+
32
+ # User input
33
+ question = st.text_area("Enter your question:", "")
34
+ specific_context = st.checkbox("Use specific SOP context?")
35
+
36
+ context = None
37
+ if specific_context:
38
+ st.write("Choose a context:")
39
+ context = st.selectbox("SOP Contexts", dataset["text"])
40
+ else:
41
+ if question:
42
+ st.write("Searching for relevant contexts...")
43
+ relevant_contexts = find_relevant_contexts(question, dataset)
44
+ if relevant_contexts:
45
+ context = st.selectbox("Relevant SOP Contexts", relevant_contexts)
46
+ else:
47
+ st.warning("No relevant contexts found. Try refining your question.")
48
+
49
+ # Generate answer
50
+ if st.button("Get Answer"):
51
+ if context:
52
+ with st.spinner("Finding the answer..."):
53
+ result = qa_pipeline(question=question, context=context)
54
+ st.success("Answer:")
55
+ st.write(result["answer"])
56
+ st.write("Confidence Score:", result["score"])
57
+ else:
58
+ st.warning("Please select a context or refine your question.")