Spaces:
Running
Running
File size: 969 Bytes
4961115 96da553 90b10ad 96da553 fa2b247 90b10ad 4961115 fa2b247 90b10ad 89a4135 fa2b247 4961115 fa2b247 90b10ad fa2b247 89a4135 fa2b247 89a4135 cb2c11c 89a4135 cb2c11c 89a4135 cb2c11c 89a4135 cb2c11c 8a2f6f7 89a4135 fa2b247 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import torch
from fastai.text.all import *
from blurr.text.data.all import *
from blurr.text.modeling.all import *
from transformers import BartForConditionalGeneration
# Load the pre-trained model and tokenizer (adjust for Bart if needed)
pretrained_model_name = "facebook/bart-large-cnn" # Or "facebook/bart-base"
hf_tokenizer = BartTokenizer.from_pretrained(pretrained_model_name)
def summarize(article):
# Define your data transformation pipeline here, if applicable
# ...
# Load the exported model
learn = load_learner('article_highlights.pkl')
# Generate the summary
summary = learn.blurr_generate(article)[0]
return summary
# Create the Gradio interface
iface = gr.Interface(
fn=summarize,
inputs="text",
outputs="text",
title="Article Summarizer (Part 3)",
description="Enter an article and get a summary.",
examples=[["This is an example article..."]]
)
# Launch the Gradio interface
iface.launch()
|