habdine commited on
Commit
92b59a8
·
verified ·
1 Parent(s): 09cc2a0

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +5 -7
  2. app.py +43 -0
  3. requirements.txt +2 -0
README.md CHANGED
@@ -1,14 +1,12 @@
1
  ---
2
- title: GreekBART News24 Abstract
3
- emoji:
4
  colorFrom: pink
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 5.4.0
8
  app_file: app.py
9
  pinned: false
10
- license: apache-2.0
11
- short_description: Greek News Summarizer
12
  ---
13
 
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: GreeBART News Summarizer
3
+ emoji: 🌐
4
  colorFrom: pink
5
+ colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 5.1.0
8
  app_file: app.py
9
  pinned: false
 
 
10
  ---
11
 
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import gradio as gr
3
+ import spaces
4
+ import os
5
+ from threading import Thread
6
+ from typing import Iterator
7
+ import torch
8
+
9
+ from transformers import (
10
+ AutoTokenizer,
11
+ AutoModelForSeq2SeqLM,
12
+ TextIteratorStreamer
13
+ )
14
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
15
+ tokenizer = AutoTokenizer.from_pretrained("dascim/greekbart-news24-abstract")
16
+ model = AutoModelForSeq2SeqLM.from_pretrained("dascim/greekbart-news24-abstract")
17
+ model.eval()
18
+ @spaces.GPU(duration=90)
19
+ def get_input(text) -> Iterator[str]:
20
+ streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_special_tokens=True)
21
+ input_ids = tokenizer.encode(text, add_special_tokens=True, return_tensors='pt')
22
+
23
+ generate_kwargs = dict(
24
+ input_ids=input_ids,
25
+ tokenizer=tokenizer,
26
+ device=device,
27
+ streamer=streamer,
28
+ max_new_tokens=120,
29
+ do_sample=False,
30
+ num_beams=1,
31
+ )
32
+
33
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
34
+ t.start()
35
+
36
+ outputs = []
37
+ for text in streamer:
38
+ outputs.append(text)
39
+ yield "".join(outputs)
40
+
41
+
42
+ iface = gr.Interface(fn=get_input,inputs="text",outputs="text",title = "Greek News Summarizer",description="Enter your text (maximum of 512 tokens news article) to get a summary")
43
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ torch
2
+ transformers