uwc / app.py
pr0mila's picture
Update app.py
2fd4512
from summarizer import Summarizer
bert_model = Summarizer()
# text = """One month after the United States began what has become a troubled rollout of a national COVID vaccination campaign, the effort is finally gathering real steam.
# Close to a million doses -- over 951,000, to be more exact -- made their way into the arms of Americans in the past 24 hours, the U.S. Centers for Disease Control and Prevention reported Wednesday. That's the largest number of shots given in one day since the rollout began and a big jump from the previous day, when just under 340,000 doses were given, CBS News reported.
# That number is likely to jump quickly after the federal government on Tuesday gave states the OK to vaccinate anyone over 65 and said it would release all the doses of vaccine it has available for distribution. Meanwhile, a number of states have now opened mass vaccination sites in an effort to get larger numbers of people inoculated, CBS News reported."""
def abstractive_text(text):
summary_text = bert_model(text, ratio=0.1)
return summary_text
import gradio as gr
sum_iface = gr.Interface(fn=abstractive_text, inputs= ["text"],outputs=["text"],title="Case Summary Generation").queue()
import transformers
from transformers import BloomForCausalLM
from transformers import BloomTokenizerFast
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import gradio as gr
from transformers import GPTJForCausalLM
import torch
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m")
model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m")
def get_result_with_bloom(text):
result_length = 200
inputs1 = tokenizer(text, return_tensors="pt")
output1 = tokenizer.decode(model.generate(inputs1["input_ids"],
max_length=result_length,
do_sample=True,
top_k=50,
top_p=0.9,early_stopping=True
)[0])
return output1
txtgen_iface = gr.Interface(fn=get_result_with_bloom,inputs = "text",outputs=["text"],title="Text Generation").queue()
import spacy.cli
import en_core_med7_lg
import spacy
import gradio as gr
spacy.cli.download("en_core_web_lg")
med7 = en_core_med7_lg.load()
# create distinct colours for labels
col_dict = {}
seven_colours = ['#e6194B', '#3cb44b', '#ffe119', '#ffd8b1', '#f58231', '#f032e6', '#42d4f4']
for label, colour in zip(med7.pipe_labels['ner'], seven_colours):
col_dict[label] = colour
options = {'ents': med7.pipe_labels['ner'], 'colors':col_dict}
#text = 'A patient was prescribed Magnesium hydroxide 400mg/5ml suspension PO of total 30ml bid for the next 5 days.'
def ner_drugs(text):
doc = med7(text)
spacy.displacy.render(doc, style='ent', jupyter=True, options=options)
return [(ent.text, ent.label_) for ent in doc.ents]
med_iface = gr.Interface(fn=ner_drugs,inputs = "text",outputs=["text"],title="Drugs Named Entity Recognition").queue()
from diffusers import StableDiffusionPipeline
#pip install accelerate
#pip install --user
#pip install --user torch==1.7.0+cu110 torchvision==0.8.1+cu110 torchaudio===0.7.0 -f https://download.pytorch.org/whl/torch_stable.html
pipe = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
# Initialize a prompt
def stable_image(text):
prompt = text
# Pass the prompt in the pipeline
return pipe(prompt).images[0]
import gradio as gr
stable_iface = gr.Interface(fn=stable_image, inputs= "text",outputs=["image"],title="Text to Image").queue()
demo = gr.TabbedInterface(
[txtgen_iface, sum_iface, med_iface,stable_iface], ["Text Generation", "Summary Generation", "Drug Named-entity Recognition","Text to Image"],
title="United We Care",
)
demo.queue()
demo.launch(share=False)