w601sxs commited on
Commit
d5a1295
·
1 Parent(s): d3cca0e

pushing to hub

Browse files
Files changed (2) hide show
  1. app.py +30 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import galai as gal
3
+ import re
4
+ import urllib
5
+
6
+ model = gal.load_model("base")
7
+
8
+ def cite(prompt):
9
+ text = model.generate(prompt+'[START_REF]')
10
+ pattern = r'\[START_REF\](.*?)\[END_REF\]'
11
+ references = re.findall(pattern, text)
12
+
13
+ base_url = 'https://scholar.google.com/scholar?q='
14
+ search_links = [base_url + urllib.parse.quote(reference) for reference in references]
15
+
16
+ # Print the constructed links
17
+ for i in range(len(references)):
18
+
19
+ references[i] = f'<a href="{search_links[i]}" target="_blank">{references[i]}</a>'
20
+
21
+ references = list(set(references))
22
+ return '<br>'.join(references)
23
+
24
+ iface = gr.Interface(fn=cite, inputs="text", outputs="html",examples=["The cosine scheduler has been used in several papers as a scheduler for training large language models.",
25
+ "We propose a new simple network architecture based on the original Transformer.",
26
+ "The loss scales as a power-law with model size, dataset size, and the amount of compute used for training, with some trends spanning more than seven orders of magnitude.",
27
+ "Molecular species that emerge and destroy during the birth of stars can be used to track the starforming processes within molecular clumps and cores",
28
+ "Large Language Models (LLMs) have issues with document question answering (QA) in situations where the document is unable to fit in the small context length of an LLM"
29
+ ])
30
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ galai
2
+ gradio