awacke1 commited on
Commit
912ec24
Β·
verified Β·
1 Parent(s): 2add5e2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +187 -0
app.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import os
3
+ import re
4
+ import glob
5
+ import streamlit as st
6
+ import streamlit.components.v1 as components
7
+ from transformers import pipeline
8
+ from urllib.parse import quote
9
+ from datetime import datetime
10
+ import pytz
11
+ import base64
12
+ import pandas as pd
13
+ import torch
14
+ import torch.nn as nn
15
+ import torch.optim as optim
16
+ from torch.utils.data import DataLoader, TensorDataset
17
+
18
+ st.set_page_config(page_title="AI Knowledge Tree Builder πŸ“ˆπŸŒΏ", page_icon="🌳✨", layout="wide")
19
+
20
+ trees = {
21
+ "Biology": """
22
+ 0. Biology Core Rules and Future Exceptions
23
+ 1. Central Dogma DNA RNA Protein
24
+ - Current CRISPR RNA editing πŸ§ͺ
25
+ - Research Gene therapy siRNA πŸ”¬
26
+ - Future Programmable genetics πŸš€
27
+ """,
28
+ "AI Topics": """
29
+ 1. Major AI Industry Players 🌐
30
+ 1. Research Leaders 🎯
31
+ - OpenAI: GPT-4 DALL-E Foundation Models πŸ”΅
32
+ """
33
+ }
34
+
35
+ def parse_outline_to_mermaid(outline_text):
36
+ lines = outline_text.strip().split('\n')
37
+ nodes, edges, clicks, stack = [], [], [], []
38
+ for line in lines:
39
+ indent = len(line) - len(line.lstrip())
40
+ level = indent // 4
41
+ label = re.sub(r'^[#*\->\d\.\s]+', '', line.strip())
42
+ if label:
43
+ node_id = f"N{len(nodes)}"
44
+ nodes.append(f'{node_id}["{label}"]')
45
+ clicks.append(f'click {node_id} "?q={quote(label)}" _blank')
46
+ if stack:
47
+ parent_level = stack[-1][0]
48
+ if level > parent_level:
49
+ edges.append(f"{stack[-1][1]} --> {node_id}")
50
+ stack.append((level, node_id))
51
+ else:
52
+ while stack and stack[-1][0] >= level:
53
+ stack.pop()
54
+ if stack:
55
+ edges.append(f"{stack[-1][1]} --> {node_id}")
56
+ stack.append((level, node_id))
57
+ else:
58
+ stack.append((level, node_id))
59
+ return "%%{init: {'themeVariables': {'fontSize': '18px'}}}%%\nflowchart LR\n" + "\n".join(nodes + edges + clicks)
60
+
61
+ def generate_mermaid_html(mermaid_code):
62
+ return f"""
63
+ <html><head><script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
64
+ <style>.centered-mermaid{{display:flex;justify-content:center;margin:20px auto;}}</style></head>
65
+ <body><div class="mermaid centered-mermaid">{mermaid_code}</div>
66
+ <script>mermaid.initialize({{startOnLoad:true}});</script></body></html>
67
+ """
68
+
69
+ def grow_tree(base_tree, new_node_name, parent_node):
70
+ lines = base_tree.strip().split('\n')
71
+ new_lines = []
72
+ added = False
73
+ for line in lines:
74
+ new_lines.append(line)
75
+ if parent_node in line and not added:
76
+ indent = len(line) - len(line.lstrip())
77
+ new_lines.append(f"{' ' * (indent + 4)}- {new_node_name} 🌱")
78
+ added = True
79
+ return "\n".join(new_lines)
80
+
81
+ def get_download_link(file_path, mime_type="text/plain"):
82
+ with open(file_path, 'rb') as f:
83
+ data = f.read()
84
+ b64 = base64.b64encode(data).decode()
85
+ return f'<a href="data:{mime_type};base64,{b64}" download="{file_path}">Download {file_path}</a>'
86
+
87
+ @st.cache_resource
88
+ def load_generator():
89
+ return pipeline("text-generation", model="distilgpt2")
90
+
91
+ # Main App
92
+ st.title("🌳 AI Knowledge Tree Builder 🌱")
93
+
94
+ if 'current_tree' not in st.session_state:
95
+ if os.path.exists("current_tree.md"):
96
+ with open("current_tree.md", "r") as f:
97
+ st.session_state['current_tree'] = f.read()
98
+ else:
99
+ st.session_state['current_tree'] = trees["Biology"]
100
+
101
+ selected_tree = st.selectbox("Select Knowledge Tree", list(trees.keys()))
102
+ if selected_tree != st.session_state.get('selected_tree_name', 'Biology'):
103
+ st.session_state['current_tree'] = trees[selected_tree]
104
+ st.session_state['selected_tree_name'] = selected_tree
105
+ with open("current_tree.md", "w") as f:
106
+ f.write(st.session_state['current_tree'])
107
+
108
+ new_node = st.text_input("Add New Node")
109
+ parent_node = st.text_input("Parent Node")
110
+ if st.button("Grow Tree 🌱") and new_node and parent_node:
111
+ st.session_state['current_tree'] = grow_tree(st.session_state['current_tree'], new_node, parent_node)
112
+ with open("current_tree.md", "w") as f:
113
+ f.write(st.session_state['current_tree'])
114
+ st.success(f"Added '{new_node}' under '{parent_node}'!")
115
+
116
+ st.markdown("### Knowledge Tree Visualization")
117
+ mermaid_code = parse_outline_to_mermaid(st.session_state['current_tree'])
118
+ components.html(generate_mermaid_html(mermaid_code), height=600)
119
+
120
+ if st.button("Export Tree as Markdown"):
121
+ export_md = f"# Knowledge Tree\n\n## Outline\n{st.session_state['current_tree']}\n\n## Mermaid Diagram\n```mermaid\n{mermaid_code}\n```"
122
+ with open("knowledge_tree.md", "w") as f:
123
+ f.write(export_md)
124
+ st.markdown(get_download_link("knowledge_tree.md", "text/markdown"), unsafe_allow_html=True)
125
+
126
+ st.subheader("Build ML Model from CSV")
127
+ uploaded_file = st.file_uploader("Upload CSV", type="csv")
128
+ if uploaded_file:
129
+ df = pd.read_csv(uploaded_file)
130
+ st.write("Columns:", df.columns.tolist())
131
+ feature_cols = st.multiselect("Select feature columns", df.columns)
132
+ target_col = st.selectbox("Select target column", df.columns)
133
+ if st.button("Train Model"):
134
+ X = df[feature_cols].values
135
+ y = df[target_col].values
136
+ X_tensor = torch.tensor(X, dtype=torch.float32)
137
+ y_tensor = torch.tensor(y, dtype=torch.float32).view(-1, 1)
138
+ dataset = TensorDataset(X_tensor, y_tensor)
139
+ loader = DataLoader(dataset, batch_size=32, shuffle=True)
140
+ model = nn.Linear(X.shape[1], 1)
141
+ criterion = nn.MSELoss()
142
+ optimizer = optim.Adam(model.parameters(), lr=0.01)
143
+ for epoch in range(10):
144
+ for batch_X, batch_y in loader:
145
+ optimizer.zero_grad()
146
+ outputs = model(batch_X)
147
+ loss = criterion(outputs, batch_y)
148
+ loss.backward()
149
+ optimizer.step()
150
+ torch.save(model.state_dict(), "model.pth")
151
+ app_code = f"""
152
+ import streamlit as st
153
+ import torch
154
+ import torch.nn as nn
155
+
156
+ model = nn.Linear({len(feature_cols)}, 1)
157
+ model.load_state_dict(torch.load("model.pth"))
158
+ model.eval()
159
+
160
+ st.title("ML Model Demo")
161
+ inputs = []
162
+ for col in {feature_cols}:
163
+ inputs.append(st.number_input(col))
164
+ if st.button("Predict"):
165
+ input_tensor = torch.tensor([inputs], dtype=torch.float32)
166
+ prediction = model(input_tensor).item()
167
+ st.write(f"Predicted {target_col}: {{prediction}}")
168
+ """
169
+ with open("app.py", "w") as f:
170
+ f.write(app_code)
171
+ reqs = "streamlit\ntorch\npandas\n"
172
+ with open("requirements.txt", "w") as f:
173
+ f.write(reqs)
174
+ readme = """
175
+ # ML Model Demo
176
+
177
+ ## How to run
178
+ 1. Install requirements: `pip install -r requirements.txt`
179
+ 2. Run the app: `streamlit run app.py`
180
+ 3. Input feature values and click "Predict".
181
+ """
182
+ with open("README.md", "w") as f:
183
+ f.write(readme)
184
+ st.markdown(get_download_link("model.pth", "application/octet-stream"), unsafe_allow_html=True)
185
+ st.markdown(get_download_link("app.py", "text/plain"), unsafe_allow_html=True)
186
+ st.markdown(get_download_link("requirements.txt", "text/plain"), unsafe_allow_html=True)
187
+ st.markdown(get_download_link("README.md", "text/markdown"), unsafe_allow_html=True)