DexterSptizu commited on
Commit
5d000a0
β€’
1 Parent(s): 018412d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +105 -0
app.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from io import StringIO
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ import torch
6
+
7
+ # Predefined example CSV content
8
+ EXAMPLE_CSV_CONTENT = """
9
+ "Loss","Date","Score","Opponent","Record","Attendance"
10
+ "Hampton (14–12)","September 25","8–7","Padres","67–84","31,193"
11
+ "Speier (5–3)","September 26","3–1","Padres","67–85","30,711"
12
+ "Elarton (4–9)","September 22","3–1","@ Expos","65–83","9,707"
13
+ "Lundquist (0–1)","September 24","15–11","Padres","67–83","30,774"
14
+ "Hampton (13–11)","September 6","9–5","Dodgers","61–78","31,407"
15
+ """
16
+
17
+ # Load the model and tokenizer
18
+ @st.cache_resource
19
+ def load_model_and_tokenizer():
20
+ model_name = "tablegpt/TableGPT2-7B"
21
+ model = AutoModelForCausalLM.from_pretrained(
22
+ model_name, torch_dtype="auto", device_map="auto"
23
+ )
24
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
25
+ return model, tokenizer
26
+
27
+ model, tokenizer = load_model_and_tokenizer()
28
+
29
+ # Application UI
30
+ st.title("Table Question Answering App")
31
+ st.write(
32
+ """
33
+ This app uses a language model to answer questions about tabular data.
34
+ You can upload your own CSV file or use a predefined example to test it.
35
+ """
36
+ )
37
+
38
+ # Sidebar for input options
39
+ st.sidebar.header("Input Options")
40
+ data_source = st.sidebar.radio("Choose a data source:", ("Example CSV", "Upload CSV"))
41
+
42
+ if data_source == "Example CSV":
43
+ st.subheader("Using Example CSV Data")
44
+ csv_file = StringIO(EXAMPLE_CSV_CONTENT)
45
+ df = pd.read_csv(csv_file)
46
+ else:
47
+ st.subheader("Upload Your CSV File")
48
+ uploaded_file = st.file_uploader("Upload a CSV file", type=["csv"])
49
+ if uploaded_file is not None:
50
+ df = pd.read_csv(uploaded_file)
51
+ else:
52
+ st.warning("Please upload a CSV file to proceed.")
53
+ st.stop()
54
+
55
+ # Display the loaded dataframe
56
+ st.write("### Data Preview")
57
+ st.dataframe(df)
58
+
59
+ # Question Input
60
+ st.write("### Ask a Question")
61
+ question = st.text_input("Enter your question:", "ε“ͺδΊ›ζ―”θ΅›ηš„ζˆ˜η»©θΎΎεˆ°δΊ†40θƒœ40负?")
62
+
63
+ # Generate response if question is provided
64
+ if question:
65
+ example_prompt_template = """Given access to several pandas dataframes, write the Python code to answer the user's question.
66
+
67
+ /*
68
+ "{var_name}.head(5).to_string(index=False)" as follows:
69
+ {df_info}
70
+ */
71
+
72
+ Question: {user_question}
73
+ """
74
+ prompt = example_prompt_template.format(
75
+ var_name="df",
76
+ df_info=df.head(5).to_string(index=False),
77
+ user_question=question,
78
+ )
79
+
80
+ messages = [
81
+ {"role": "system", "content": "You are a helpful assistant."},
82
+ {"role": "user", "content": prompt},
83
+ ]
84
+ text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
85
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
86
+
87
+ with st.spinner("Generating response..."):
88
+ generated_ids = model.generate(**model_inputs, max_new_tokens=512)
89
+ generated_ids = [
90
+ output_ids[len(input_ids) :]
91
+ for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
92
+ ]
93
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
94
+
95
+ # Display response
96
+ st.write("### Model Response")
97
+ st.text_area("Response", response, height=200)
98
+
99
+ # Footer
100
+ st.sidebar.info(
101
+ """
102
+ This app demonstrates the use of a language model for tabular data understanding.
103
+ Powered by [Hugging Face Transformers](https://huggingface.co/).
104
+ """
105
+ )