File size: 3,810 Bytes
ba0da2e
 
 
 
 
 
 
 
 
 
 
 
 
f7c3475
5d132f3
ac54d64
 
 
 
 
 
 
 
 
 
91cfacf
 
ac54d64
09a32cd
 
ac54d64
 
09a32cd
 
 
 
d385ddd
 
2fd4512
 
09a32cd
 
ac54d64
 
 
5d132f3
ac54d64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e2711e5
ac54d64
 
2742f54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49c2150
ac54d64
 
5d132f3
 
ac54d64
 
 
 
a6bedab
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
from summarizer import Summarizer

bert_model = Summarizer()

# text = """One month after the United States began what has become a troubled rollout of a national COVID vaccination campaign, the effort is finally gathering real steam.
# Close to a million doses -- over 951,000, to be more exact -- made their way into the arms of Americans in the past 24 hours, the U.S. Centers for Disease Control and Prevention reported Wednesday. That's the largest number of shots given in one day since the rollout began and a big jump from the previous day, when just under 340,000 doses were given, CBS News reported.
# That number is likely to jump quickly after the federal government on Tuesday gave states the OK to vaccinate anyone over 65 and said it would release all the doses of vaccine it has available for distribution. Meanwhile, a number of states have now opened mass vaccination sites in an effort to get larger numbers of people inoculated, CBS News reported."""

def abstractive_text(text):
 summary_text = bert_model(text, ratio=0.1)
 return summary_text


import gradio as gr
sum_iface = gr.Interface(fn=abstractive_text, inputs= ["text"],outputs=["text"],title="Case Summary Generation").queue()



import transformers
from transformers import BloomForCausalLM
from transformers import BloomTokenizerFast
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import gradio as gr

from transformers import GPTJForCausalLM
import torch    

tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m")
model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m")

def get_result_with_bloom(text):
  result_length = 200
  inputs1 = tokenizer(text, return_tensors="pt")
  output1 = tokenizer.decode(model.generate(inputs1["input_ids"],
                       max_length=result_length, 
                       do_sample=True, 
                       top_k=50, 
                       top_p=0.9,early_stopping=True

                      )[0])
  return output1



txtgen_iface = gr.Interface(fn=get_result_with_bloom,inputs = "text",outputs=["text"],title="Text Generation").queue()


import spacy.cli
import en_core_med7_lg
import spacy
import gradio as gr

spacy.cli.download("en_core_web_lg")


med7 = en_core_med7_lg.load()

# create distinct colours for labels
col_dict = {}
seven_colours = ['#e6194B', '#3cb44b', '#ffe119', '#ffd8b1', '#f58231', '#f032e6', '#42d4f4']
for label, colour in zip(med7.pipe_labels['ner'], seven_colours):
    col_dict[label] = colour

options = {'ents': med7.pipe_labels['ner'], 'colors':col_dict}

#text = 'A patient was prescribed Magnesium hydroxide 400mg/5ml suspension PO of total 30ml bid for the next 5 days.'
def ner_drugs(text):
  doc = med7(text)

  spacy.displacy.render(doc, style='ent', jupyter=True, options=options)

  return [(ent.text, ent.label_) for ent in doc.ents]



med_iface = gr.Interface(fn=ner_drugs,inputs = "text",outputs=["text"],title="Drugs Named Entity Recognition").queue()


from diffusers import StableDiffusionPipeline

#pip install accelerate

#pip install --user 

#pip install --user torch==1.7.0+cu110 torchvision==0.8.1+cu110 torchaudio===0.7.0 -f https://download.pytorch.org/whl/torch_stable.html

pipe = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')



# Initialize a prompt
def stable_image(text):
  prompt = text
  # Pass the prompt in the pipeline
  return pipe(prompt).images[0]



import gradio as gr
stable_iface = gr.Interface(fn=stable_image, inputs= "text",outputs=["image"],title="Text to Image").queue()



demo = gr.TabbedInterface(

    [txtgen_iface, sum_iface, med_iface,stable_iface], ["Text Generation", "Summary Generation", "Drug Named-entity Recognition","Text to Image"],
    title="United We Care",

)

demo.queue()
demo.launch(share=False)