Spaces:
Restarting
on
CPU Upgrade
Restarting
on
CPU Upgrade
Add columns
Browse files
app.py
CHANGED
@@ -35,6 +35,8 @@ DEFAULT_COLUMNS = [
|
|
35 |
"Title",
|
36 |
"Type",
|
37 |
"Paper page",
|
|
|
|
|
38 |
"OpenReview",
|
39 |
"GitHub",
|
40 |
"Spaces",
|
|
|
35 |
"Title",
|
36 |
"Type",
|
37 |
"Paper page",
|
38 |
+
"π",
|
39 |
+
"π¬",
|
40 |
"OpenReview",
|
41 |
"GitHub",
|
42 |
"Spaces",
|
papers.py
CHANGED
@@ -24,6 +24,8 @@ class PaperList:
|
|
24 |
["Authors", "str"],
|
25 |
["Type", "str"],
|
26 |
["Paper page", "markdown"],
|
|
|
|
|
27 |
["OpenReview", "markdown"],
|
28 |
["GitHub", "markdown"],
|
29 |
["Spaces", "markdown"],
|
@@ -40,11 +42,12 @@ class PaperList:
|
|
40 |
def get_df() -> pd.DataFrame:
|
41 |
df = pd.merge(
|
42 |
left=datasets.load_dataset("ICLR2024/ICLR2024-papers", split="train").to_pandas(),
|
43 |
-
right=datasets.load_dataset("ICLR2024/ICLR2024-
|
44 |
on="id",
|
45 |
how="left",
|
46 |
)
|
47 |
-
|
|
|
48 |
df["paper_page"] = df["arxiv_id"].apply(
|
49 |
lambda arxiv_id: f"https://huggingface.co/papers/{arxiv_id}" if arxiv_id else ""
|
50 |
)
|
@@ -62,12 +65,16 @@ class PaperList:
|
|
62 |
n_linked_authors = "" if row.n_linked_authors == -1 else row.n_linked_authors
|
63 |
n_authors = "" if row.n_authors == -1 else row.n_authors
|
64 |
claimed_paper = "" if n_linked_authors == "" else f"{n_linked_authors}/{n_authors} {author_linked}"
|
|
|
|
|
65 |
|
66 |
new_row = {
|
67 |
"Title": row["title"],
|
68 |
"Authors": ", ".join(row["authors"]),
|
69 |
"Type": row["type"],
|
70 |
"Paper page": PaperList.create_link(row["arxiv_id"], row["paper_page"]),
|
|
|
|
|
71 |
"OpenReview": PaperList.create_link("OpenReview", row["OpenReview"]),
|
72 |
"GitHub": "\n".join([PaperList.create_link("GitHub", url) for url in row["GitHub"]]),
|
73 |
"Spaces": "\n".join(
|
|
|
24 |
["Authors", "str"],
|
25 |
["Type", "str"],
|
26 |
["Paper page", "markdown"],
|
27 |
+
["π", "number"],
|
28 |
+
["π¬", "number"],
|
29 |
["OpenReview", "markdown"],
|
30 |
["GitHub", "markdown"],
|
31 |
["Spaces", "markdown"],
|
|
|
42 |
def get_df() -> pd.DataFrame:
|
43 |
df = pd.merge(
|
44 |
left=datasets.load_dataset("ICLR2024/ICLR2024-papers", split="train").to_pandas(),
|
45 |
+
right=datasets.load_dataset("ICLR2024/ICLR2024-paper-stats", split="train").to_pandas(),
|
46 |
on="id",
|
47 |
how="left",
|
48 |
)
|
49 |
+
keys = ["n_authors", "n_linked_authors", "upvotes", "num_comments"]
|
50 |
+
df[keys] = df[keys].fillna(-1).astype(int)
|
51 |
df["paper_page"] = df["arxiv_id"].apply(
|
52 |
lambda arxiv_id: f"https://huggingface.co/papers/{arxiv_id}" if arxiv_id else ""
|
53 |
)
|
|
|
65 |
n_linked_authors = "" if row.n_linked_authors == -1 else row.n_linked_authors
|
66 |
n_authors = "" if row.n_authors == -1 else row.n_authors
|
67 |
claimed_paper = "" if n_linked_authors == "" else f"{n_linked_authors}/{n_authors} {author_linked}"
|
68 |
+
upvotes = "" if row.upvotes == -1 else row.upvotes
|
69 |
+
num_comments = "" if row.num_comments == -1 else row.num_comments
|
70 |
|
71 |
new_row = {
|
72 |
"Title": row["title"],
|
73 |
"Authors": ", ".join(row["authors"]),
|
74 |
"Type": row["type"],
|
75 |
"Paper page": PaperList.create_link(row["arxiv_id"], row["paper_page"]),
|
76 |
+
"π": upvotes,
|
77 |
+
"π¬": num_comments,
|
78 |
"OpenReview": PaperList.create_link("OpenReview", row["OpenReview"]),
|
79 |
"GitHub": "\n".join([PaperList.create_link("GitHub", url) for url in row["GitHub"]]),
|
80 |
"Spaces": "\n".join(
|