File size: 2,917 Bytes
d1058b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import os
import json
import csv
from dotenv import load_dotenv
import streamlit as st
from langchain_huggingface import HuggingFaceEndpoint
from langchain_core.output_parsers import StrOutputParser

# Load environment variables
load_dotenv()
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")

# Initialize session state variables
if "past" not in st.session_state:
    st.session_state["past"] = []
if "generated" not in st.session_state:
    st.session_state["generated"] = []

# Initialize parser and model
parser = StrOutputParser()
st.title("Llama-3.2-3B-Instruct")

# Sidebar for LLM parameters
st.sidebar.header("LLM Parameters")
temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.8, 0.1)
max_length = st.sidebar.slider("Max Length", 100, 500, 100, 50)

llm = HuggingFaceEndpoint(
    repo_id="meta-llama/Llama-3.2-3B-Instruct",
    max_length=max_length,
    temperature=temperature,
    token_id=hf_token
)

# Input section
col1, col2 = st.columns(2)

with col1:
    user_input = st.text_area("Enter your query:")

with col2:
    uploaded_file = st.file_uploader("Upload a file", type=["txt", "csv", "json"])

submit = st.button("Submit")

def parse_file(file):
    try:
        if file.type == "application/json":
            return json.loads(file.read().decode("utf-8"))
        elif file.type == "text/csv":
            return list(csv.reader(file.read().decode("utf-8").splitlines()))
        else:
            return file.read().decode("utf-8")
    except Exception as e:
        st.error(f"Error reading file: {e}")
        return None

if submit:
    if user_input:
        with st.spinner("Thinking..."):
            try:
                result = llm.invoke(user_input)
                output = parser.parse(result)
                st.session_state["past"].append(user_input)
                st.session_state["generated"].append(output)
                st.success("Response generated.")
            except Exception as e:
                st.error(f"Error invoking LLM: {e}")
    elif uploaded_file:
        file_content = parse_file(uploaded_file)
        if file_content:
            with st.spinner("Thinking..."):
                try:
                    result = llm.invoke(str(file_content))
                    output = parser.parse(result)
                    st.session_state["past"].append(file_content)
                    st.session_state["generated"].append(output)
                    st.success("Response generated.")
                except Exception as e:
                    st.error(f"Error invoking LLM: {e}")
    else:
        st.warning("Please enter a query or upload a file.")

# Display chat history
if st.session_state["generated"]:
    st.markdown("### Chat History")
    for i in range(len(st.session_state["generated"])-1, -1, -1):
        st.markdown(f"**You:** {st.session_state['past'][i]}")
        st.markdown(f"**Llama:** {st.session_state['generated'][i]}")