maxidl commited on
Commit
abced76
·
1 Parent(s): 79a0f4b
Files changed (1) hide show
  1. app.py +24 -5
app.py CHANGED
@@ -14,17 +14,36 @@ model = AutoModelForCausalLM.from_pretrained(
14
  tokenizer = AutoTokenizer.from_pretrained(model_name)
15
 
16
 
17
- title = "# Paper Review Generator"
18
- steps = """1. Converts uploaded pdf file to markdown. You can edit the intermediate markdown output.\n2. Generates a review for the paper"""
 
19
 
20
  def process_file(file):
21
-
22
  return "Processed file"
23
 
24
 
25
-
26
  def generate(paper_text):
27
- return "Success"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
 
30
 
 
14
  tokenizer = AutoTokenizer.from_pretrained(model_name)
15
 
16
 
17
+ title = "# Placeholder Title"
18
+ steps = """Placeholder Description"""
19
+ # steps = """1. Converts uploaded pdf file to markdown. You can edit the intermediate markdown output.\n2. Generates a review for the paper"""
20
 
21
  def process_file(file):
 
22
  return "Processed file"
23
 
24
 
25
+ @spaces.GPU(duration=60)
26
  def generate(paper_text):
27
+ messages = [
28
+ {"role": "system", "content": "You are a pirate."},
29
+ {"role": "user", "content": paper_text}
30
+ ]
31
+ model_inputs = tokenizer.apply_chat_template(
32
+ messages,
33
+ add_generation_prompt=True
34
+ ).to(model.device)
35
+
36
+ generated_ids = model.generate(
37
+ **model_inputs,
38
+ max_new_tokens=256
39
+ )
40
+ generated_ids = [
41
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
42
+ ]
43
+
44
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
45
+ return response
46
+ # return "Success"
47
 
48
 
49