import gradio as gr import wikipedia from transformers import pipeline import os ## Setting to use the 0th GPU os.environ["CUDA_VISIBLE_DEVICES"] = "0" def summarize(text): ## Setting to use the bart-large-cnn model for summarization summarizer = pipeline("summarization") ## To use the t5-base model for summarization: ## summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf") summary_text = summarizer(text, max_length=100, min_length=5, do_sample=False)[0]['summary_text'] print(f'Length of initial text: {len(text)}') print(f'Length of summary: {len(summary_text)}') print(summary_text) return summary_text def greet(name): return "Hello " + name.orig_name + "!!" def get_ocr(): return '' def search_wiki(text): return wikipedia.search(text) def get_wiki(search_term): text = wikipedia.summary(search_term) orig_text_len = len(text) text = summarize(text) sum_length = len(text) return [text,orig_text_len,sum_length] # def inference(file): # get_ocr() # model = AutoModelForSeq2SeqLM.from_pretrained("sgugger/my-awesome-model") out_sum_text = gr.Textbox(label='Summarized Text') out_orig_test_len = gr.Number(label='Original Text Length') out_sum_text_len = gr.Number(label='Summarized Text Length') iface = gr.Interface(fn=get_wiki, inputs=gr.Textbox(lines=2, placeholder="Wikipedia search term here...", label='Search Term'), outputs=[out_sum_text,out_orig_test_len,out_sum_text_len] ) iface.launch()