omkarenator commited on
Commit
45cd785
1 Parent(s): 0c8c1d8

deploy at 2024-09-09 11:39:41.311847

Browse files
.ruff_cache/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Automatically created by ruff.
2
+ *
.ruff_cache/0.5.7/874107400203978039 ADDED
Binary file (80 Bytes). View file
 
.ruff_cache/CACHEDIR.TAG ADDED
@@ -0,0 +1 @@
 
 
1
+ Signature: 8a477f597d28d172789f06886806bc55
config.ini ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [DEFAULT]
2
+ dataset_id = space-backup
3
+ db_dir = data
4
+ private_backup = True
5
+
main.py CHANGED
@@ -1,6 +1,7 @@
1
  from fasthtml_hf import setup_hf_backup
2
  from fasthtml.common import *
3
 
 
4
  app, rt = fast_app()
5
 
6
 
@@ -10,8 +11,9 @@ def get():
10
  Head(
11
  Meta(charset="UTF-8"),
12
  Meta(name="viewport", content="width=device-width, initial-scale=1.0"),
13
- Title("Simple Blog Post"),
14
  Link(rel="stylesheet", href="style.css"),
 
15
  ),
16
  Body(
17
  Div(
@@ -108,7 +110,6 @@ def get():
108
  Section(
109
  H2("Main Content"),
110
  P(
111
-
112
  """The performance of a large language model (LLM)
113
  depends heavily on the quality and size of its
114
  pretraining dataset. However, the pretraining
@@ -154,7 +155,6 @@ def get():
154
  (listing and explaining all of our design choices),
155
  and the process followed to create its 📚
156
  FineWeb-Edu subset."""
157
-
158
  ),
159
  id="section3",
160
  ),
@@ -170,14 +170,13 @@ def get():
170
 
171
 
172
 
173
- """
174
- ),
175
  id="section4",
176
  ),
177
  cls="content",
178
  ),
179
  cls="container",
180
- )
181
  ),
182
  lang="en",
183
  )
 
1
  from fasthtml_hf import setup_hf_backup
2
  from fasthtml.common import *
3
 
4
+
5
  app, rt = fast_app()
6
 
7
 
 
11
  Head(
12
  Meta(charset="UTF-8"),
13
  Meta(name="viewport", content="width=device-width, initial-scale=1.0"),
14
+ Title("Simple Blog Post", cls="d-title"),
15
  Link(rel="stylesheet", href="style.css"),
16
+ Script(src="https://distill.pub/template.v1.js"),
17
  ),
18
  Body(
19
  Div(
 
110
  Section(
111
  H2("Main Content"),
112
  P(
 
113
  """The performance of a large language model (LLM)
114
  depends heavily on the quality and size of its
115
  pretraining dataset. However, the pretraining
 
155
  (listing and explaining all of our design choices),
156
  and the process followed to create its 📚
157
  FineWeb-Edu subset."""
 
158
  ),
159
  id="section3",
160
  ),
 
170
 
171
 
172
 
173
+ """),
 
174
  id="section4",
175
  ),
176
  cls="content",
177
  ),
178
  cls="container",
179
+ ),
180
  ),
181
  lang="en",
182
  )
test.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from fasthtml.common import *
2
+
3
+ Meta(charset='utf-8')
4
+
5
+ Script(src='https://distill.pub/template.v1.js')
6
+
7
+ Dt_article(
8
+ H1('Hello World')
9
+ )