spedrox-sac commited on
Commit
d1058b3
·
verified ·
1 Parent(s): 0a668df

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -0
app.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import csv
4
+ from dotenv import load_dotenv
5
+ import streamlit as st
6
+ from langchain_huggingface import HuggingFaceEndpoint
7
+ from langchain_core.output_parsers import StrOutputParser
8
+
9
+ # Load environment variables
10
+ load_dotenv()
11
+ hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
12
+
13
+ # Initialize session state variables
14
+ if "past" not in st.session_state:
15
+ st.session_state["past"] = []
16
+ if "generated" not in st.session_state:
17
+ st.session_state["generated"] = []
18
+
19
+ # Initialize parser and model
20
+ parser = StrOutputParser()
21
+ st.title("Llama-3.2-3B-Instruct")
22
+
23
+ # Sidebar for LLM parameters
24
+ st.sidebar.header("LLM Parameters")
25
+ temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.8, 0.1)
26
+ max_length = st.sidebar.slider("Max Length", 100, 500, 100, 50)
27
+
28
+ llm = HuggingFaceEndpoint(
29
+ repo_id="meta-llama/Llama-3.2-3B-Instruct",
30
+ max_length=max_length,
31
+ temperature=temperature,
32
+ token_id=hf_token
33
+ )
34
+
35
+ # Input section
36
+ col1, col2 = st.columns(2)
37
+
38
+ with col1:
39
+ user_input = st.text_area("Enter your query:")
40
+
41
+ with col2:
42
+ uploaded_file = st.file_uploader("Upload a file", type=["txt", "csv", "json"])
43
+
44
+ submit = st.button("Submit")
45
+
46
+ def parse_file(file):
47
+ try:
48
+ if file.type == "application/json":
49
+ return json.loads(file.read().decode("utf-8"))
50
+ elif file.type == "text/csv":
51
+ return list(csv.reader(file.read().decode("utf-8").splitlines()))
52
+ else:
53
+ return file.read().decode("utf-8")
54
+ except Exception as e:
55
+ st.error(f"Error reading file: {e}")
56
+ return None
57
+
58
+ if submit:
59
+ if user_input:
60
+ with st.spinner("Thinking..."):
61
+ try:
62
+ result = llm.invoke(user_input)
63
+ output = parser.parse(result)
64
+ st.session_state["past"].append(user_input)
65
+ st.session_state["generated"].append(output)
66
+ st.success("Response generated.")
67
+ except Exception as e:
68
+ st.error(f"Error invoking LLM: {e}")
69
+ elif uploaded_file:
70
+ file_content = parse_file(uploaded_file)
71
+ if file_content:
72
+ with st.spinner("Thinking..."):
73
+ try:
74
+ result = llm.invoke(str(file_content))
75
+ output = parser.parse(result)
76
+ st.session_state["past"].append(file_content)
77
+ st.session_state["generated"].append(output)
78
+ st.success("Response generated.")
79
+ except Exception as e:
80
+ st.error(f"Error invoking LLM: {e}")
81
+ else:
82
+ st.warning("Please enter a query or upload a file.")
83
+
84
+ # Display chat history
85
+ if st.session_state["generated"]:
86
+ st.markdown("### Chat History")
87
+ for i in range(len(st.session_state["generated"])-1, -1, -1):
88
+ st.markdown(f"**You:** {st.session_state['past'][i]}")
89
+ st.markdown(f"**Llama:** {st.session_state['generated'][i]}")