Spaces:
Running
Running
cosmetic
Browse files- app.py +5 -4
- df/PaperCentral.py +1 -1
- paper_chat_tab.py +2 -2
app.py
CHANGED
@@ -186,8 +186,7 @@ with gr.Blocks(css_paths="style.css") as demo:
|
|
186 |
with gr.Tab("Contributors"):
|
187 |
author_resource_leaderboard_tab()
|
188 |
|
189 |
-
|
190 |
-
with gr.Tab("Chat With Paper", id="tab-chat-with-paper"):
|
191 |
gr.Markdown("## Chat with Paper")
|
192 |
arxiv_id = gr.State(value=None)
|
193 |
paper_chat_tab(arxiv_id)
|
@@ -481,6 +480,7 @@ with gr.Blocks(css_paths="style.css") as demo:
|
|
481 |
hf_options = gr.update(value=[])
|
482 |
selected_tab = gr.Tabs()
|
483 |
paper_id = gr.update(value=None)
|
|
|
484 |
|
485 |
if request:
|
486 |
# print("Request headers dictionary:", dict(request.headers))
|
@@ -519,8 +519,9 @@ with gr.Blocks(css_paths="style.css") as demo:
|
|
519 |
selected_tab = gr.Tabs(selected="tab-chat-with-paper")
|
520 |
if "paper_id" in request.query_params:
|
521 |
paper_id = request.query_params['paper_id']
|
|
|
522 |
|
523 |
-
return calendar, date_range, conferences, hf_options, selected_tab, paper_id
|
524 |
|
525 |
|
526 |
demo.load(
|
@@ -530,7 +531,7 @@ with gr.Blocks(css_paths="style.css") as demo:
|
|
530 |
api_name="update_data",
|
531 |
).then(
|
532 |
fn=echo,
|
533 |
-
outputs=[calendar, date_range_radio, conference_options, hf_options, tabs, arxiv_id],
|
534 |
api_name=False,
|
535 |
).then(
|
536 |
# New then to handle LoginButton and HTML components
|
|
|
186 |
with gr.Tab("Contributors"):
|
187 |
author_resource_leaderboard_tab()
|
188 |
|
189 |
+
with gr.Tab("Chat With Paper", id="tab-chat-with-paper", visible=False) as tab_chat_paper:
|
|
|
190 |
gr.Markdown("## Chat with Paper")
|
191 |
arxiv_id = gr.State(value=None)
|
192 |
paper_chat_tab(arxiv_id)
|
|
|
480 |
hf_options = gr.update(value=[])
|
481 |
selected_tab = gr.Tabs()
|
482 |
paper_id = gr.update(value=None)
|
483 |
+
tab_chat_paper = gr.update(visible=False)
|
484 |
|
485 |
if request:
|
486 |
# print("Request headers dictionary:", dict(request.headers))
|
|
|
519 |
selected_tab = gr.Tabs(selected="tab-chat-with-paper")
|
520 |
if "paper_id" in request.query_params:
|
521 |
paper_id = request.query_params['paper_id']
|
522 |
+
tab_chat_paper = gr.update(visible=True)
|
523 |
|
524 |
+
return calendar, date_range, conferences, hf_options, selected_tab, paper_id, tab_chat_paper
|
525 |
|
526 |
|
527 |
demo.load(
|
|
|
531 |
api_name="update_data",
|
532 |
).then(
|
533 |
fn=echo,
|
534 |
+
outputs=[calendar, date_range_radio, conference_options, hf_options, tabs, arxiv_id, tab_chat_paper],
|
535 |
api_name=False,
|
536 |
).then(
|
537 |
# New then to handle LoginButton and HTML components
|
df/PaperCentral.py
CHANGED
@@ -483,7 +483,7 @@ class PaperCentral:
|
|
483 |
neurips_id = re.search(r'id=([^&]+)', row["proceedings"])
|
484 |
if neurips_id:
|
485 |
neurips_id = neurips_id.group(1)
|
486 |
-
return f'<a href="/?tab=tab-chat-with-paper&paper_id={neurips_id}" id="custom_button" target="_blank"
|
487 |
else:
|
488 |
return ""
|
489 |
|
|
|
483 |
neurips_id = re.search(r'id=([^&]+)', row["proceedings"])
|
484 |
if neurips_id:
|
485 |
neurips_id = neurips_id.group(1)
|
486 |
+
return f'<a href="/?tab=tab-chat-with-paper&paper_id={neurips_id}" id="custom_button" target="_blank">✨ Chat with paper</a>'
|
487 |
else:
|
488 |
return ""
|
489 |
|
paper_chat_tab.py
CHANGED
@@ -167,7 +167,7 @@ def create_chat_interface(model_name, paper_content, hf_token_input):
|
|
167 |
# Load the tokenizer from Hugging Face
|
168 |
# tokenizer = AutoTokenizer.from_pretrained(model_name)
|
169 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct",
|
170 |
-
token=os.
|
171 |
tokenizer_cache[model_name] = tokenizer
|
172 |
else:
|
173 |
tokenizer = tokenizer_cache[model_name]
|
@@ -241,7 +241,7 @@ def create_chat_interface(model_name, paper_content, hf_token_input):
|
|
241 |
final_messages.extend(messages)
|
242 |
|
243 |
# Use the Hugging Face token if provided
|
244 |
-
api_key = hf_token_value or os.
|
245 |
if not api_key:
|
246 |
raise ValueError("API token is not provided.")
|
247 |
|
|
|
167 |
# Load the tokenizer from Hugging Face
|
168 |
# tokenizer = AutoTokenizer.from_pretrained(model_name)
|
169 |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct",
|
170 |
+
token=os.environ.get("HF_TOKEN"))
|
171 |
tokenizer_cache[model_name] = tokenizer
|
172 |
else:
|
173 |
tokenizer = tokenizer_cache[model_name]
|
|
|
241 |
final_messages.extend(messages)
|
242 |
|
243 |
# Use the Hugging Face token if provided
|
244 |
+
api_key = hf_token_value or os.environ.get("SAMBANOVA_API_KEY")
|
245 |
if not api_key:
|
246 |
raise ValueError("API token is not provided.")
|
247 |
|