File size: 10,384 Bytes
d765024
 
 
 
 
 
 
95e2820
 
 
d765024
 
 
 
 
 
 
6210ba7
95e2820
 
d765024
95e2820
275319c
d903211
d9695c5
d903211
275319c
95e2820
 
d765024
 
 
d903211
95e2820
 
9f394ff
95e2820
 
d765024
 
b59c607
d765024
275319c
 
 
 
 
95e2820
275319c
 
95e2820
 
 
d765024
 
b59c607
d765024
95e2820
 
 
 
 
 
 
 
 
e421fe6
95e2820
d765024
 
 
 
 
 
 
 
6233aa1
d765024
 
 
b59c607
 
 
 
d765024
 
 
 
95e2820
 
 
 
 
 
 
 
d765024
 
 
 
 
b59c607
d765024
 
 
 
 
 
 
95e2820
d765024
 
b59c607
d765024
b59c607
 
 
 
 
d765024
 
 
 
95e2820
 
 
 
 
 
 
 
d765024
 
 
 
b59c607
d765024
 
 
 
 
 
 
95e2820
b59c607
d765024
b59c607
d765024
b59c607
 
 
 
 
d765024
 
 
 
95e2820
 
 
 
 
 
 
 
d765024
 
 
 
b59c607
d765024
 
 
 
b59c607
d765024
b59c607
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
import gradio as gr
import cv2
import numpy as np
import requests
import g4f
import time
import os
from transformers import pipeline
from PIL import Image
import google.generativeai as genai

theme = gr.themes.Base(
    primary_hue="cyan",
    secondary_hue="blue",
    neutral_hue="slate",
)

API_KEY = os.getenv("API_KEY")
genai.configure(api_key = os.environ['GOOGLE_API_KEY'])
txt_model = genai.GenerativeModel('gemini-pro')

BRAIN_TUMOR_API_URL = "https://api-inference.huggingface.co/models/Devarshi/Brain_Tumor_Classification"
BREAST_CANCER_API_URL = "https://api-inference.huggingface.co/models/MUmairAB/Breast_Cancer_Detector"
# ALZHEIMER_API_URL = "https://api-inference.huggingface.co/models/dewifaj/alzheimer_mri_classification"
headers = {"Authorization": "Bearer "+API_KEY+"", 'Content-Type': 'application/json'}
alzheimer_classifier = pipeline("image-classification", model="dewifaj/alzheimer_mri_classification")
# breast_cancer_classifier = pipeline("image-classification", model="MUmairAB/Breast_Cancer_Detector")
# brain_tumor_classifier = pipeline("image-classification", model="Devarshi/Brain_Tumor_Classification")


# Create a function to Detect/Classify Alzheimer
def classify_alzheimer(image):
    result = alzheimer_classifier(image)
    prediction = {}
    for ele in result:
        label, score = ele.values()
        prediction[label] = score
    return prediction


# Create a function to Detect/Classify Breast_Cancer
def classify_breast_cancer(image):
    image_data = np.array(image, dtype=np.uint8)
    _, buffer = cv2.imencode('.jpg', image_data)
    binary_data = buffer.tobytes()

    response = requests.post(BREAST_CANCER_API_URL, headers=headers, data=binary_data)
    prediction = {}
    for ele in response.json():
        label, score = ele.values()
        prediction[label] = score
    
    return prediction


# Create a function to Detect/Classify Brain_Tumor
def classify_brain_tumor(image):
    image_data = np.array(image, dtype=np.uint8)
    _, buffer = cv2.imencode('.jpg', image_data)
    binary_data = buffer.tobytes()

    response = requests.post(BRAIN_TUMOR_API_URL, headers=headers, data=binary_data)
    prediction = {}
    for ele in response.json():
        label, score = ele.values()
        prediction[label] = score
    
    return prediction


# Create the Gradio interface
with gr.Blocks(theme=theme) as Alzheimer:
    with gr.Row():
        with gr.Column():
            gr.Markdown("# Alzheimer Detection and Classification")
            gr.Markdown("> Classify the alzheimer into Mild Demented, Very Mild Demented, Moderate Demented and Non Demented.")
            image = gr.Image(type="pil")
            output = gr.Label(label='Alzheimer Classification', container=True, scale=2)
            with gr.Row():
                gr.ClearButton([image, output])
                button = gr.Button(value="Submit", variant="primary")
            gr.Examples(inputs=image, fn=classify_alzheimer, examples=[os.path.join(os.path.dirname(__file__), "diseases/Alzheimer/mild_12.jpg"),
                                                                       os.path.join(os.path.dirname(__file__), "diseases/Alzheimer/moderate_21.jpg"),
                                                                       os.path.join(os.path.dirname(__file__), "diseases/Alzheimer/verymild_1013.jpg")])

        button.click(classify_alzheimer, [image], [output])

        def respond(message, history):
            # bot_message = g4f.ChatCompletion.create(
            #     model="gemini",
            #     provider=g4f.Provider.GeminiProChat,
            #     messages=[{"role": "user",
            #                "content": "Your role is Alzheimer Disease Expert. Now I will provide you with the user query. First check if the user query is related to Alzheimer or not. If it is not related to Alzheimer then simply avoid the query by saying this is not my expertise, whereas if related to Alzheimer reply it as usual. Here's the user Query:" + message}],
            # )
            bot_message = txt_model.generate_content("Your role is Alzheimer Disease Expert. Now I will provide you with the user query. First check if the user query is related to Alzheimer or not. If it is not related to Alzheimer then simply avoid the query by saying this is not my expertise, whereas if related to Alzheimer reply it as usual. Here's the user Query:" + message)
            return str(bot_message.text)


        with gr.Column():
            gr.Markdown("# Health Bot for Alzheimer")
            gr.Markdown("> **Note:** The information may not be accurate. Please consult a Doctor before considering any actions.")
            gr.ChatInterface(respond, autofocus=False, examples=["Explain Alzhiemer diasease.", "What are the types of Alzhiemer diasease?", "Alzhiemer Prevention methods."]).queue()


with gr.Blocks(theme=theme) as BreastCancer:
    with gr.Row():
        with gr.Column():
            gr.Markdown("# Breast Cancer Detection and Classification")
            gr.Markdown("> Classify the breast cancer.")
            image = gr.Image()
            output = gr.Label(label='Breast Cancer Classification', container=True, scale=2)
            with gr.Row():
                button = gr.Button(value="Submit", variant="primary")
                gr.ClearButton([image, output])
            gr.Examples(inputs=image, fn=classify_breast_cancer,
                        examples=[os.path.join(os.path.dirname(__file__), "diseases/Breast_Cancer/class0.png"),
                                  os.path.join(os.path.dirname(__file__), "diseases/Breast_Cancer/class0_1.png"),
                                  os.path.join(os.path.dirname(__file__), "diseases/Breast_Cancer/class1.png"),
                                  os.path.join(os.path.dirname(__file__), "diseases/Breast_Cancer/class1_1.png")])

        button.click(classify_breast_cancer, [image], [output])

        def respond(message, history):
            # bot_message = g4f.ChatCompletion.create(
            #     model="gpt-4-32k-0613",
            #     provider=g4f.Provider.GeekGpt,
            #     messages=[{"role": "user",
            #                "content": "Your role is Breast_Cancer Disease Expert. Now I will provide you with the user query. First check if the user query is related to Breast_Cancer or not. If it is not related to Breast_Cancer then simply avoid the query by saying this is not my expertise, whereas if related to Breast_Cancer reply it as usual. Here's the user Query:" + message}],
            # )
            bot_message = txt_model.generate_content("Your role is Breast_Cancer Disease Expert. Now I will provide you with the user query. First check if the user query is related to Breast_Cancer or not. If it is not related to Breast_Cancer then simply avoid the query by saying this is not my expertise, whereas if related to Breast_Cancer reply it as usual. Here's the user Query:" + message)
            yield str(bot_message.text)

        with gr.Column():
            gr.Markdown("# Health Bot for Breast Cancer")
            gr.Markdown("> **Note:** The information may not be accurate. Please consult a Doctor before considering any actions.")
            gr.ChatInterface(respond, autofocus=False, examples=["Explain Breast Cancer.", "What are the types of Breast Cancer?", "Breast Cancer Prevention methods."]).queue()


with gr.Blocks(theme=theme) as BrainTumor:
    with gr.Row():
        with gr.Column():
            gr.Markdown("# Brain Tumor Detection and Classification")
            gr.Markdown("> Classify the Brain Tumor.")
            image = gr.Image()
            output = gr.Label(label='Brain_Tumor Classification', container=True, scale=2)
            with gr.Row():
                button = gr.Button(value="Submit", variant="primary")
                gr.ClearButton([image, output])
            gr.Examples(inputs=image, fn=classify_brain_tumor,
                    examples=[os.path.join(os.path.dirname(__file__), "diseases/Brain_Tumor/glioma.jpg"),
                              os.path.join(os.path.dirname(__file__), "diseases/Brain_Tumor/meningioma.jpg"),
                              os.path.join(os.path.dirname(__file__), "diseases/Brain_Tumor/no_tumor.jpg"),
                              os.path.join(os.path.dirname(__file__), "diseases/Brain_Tumor/pituitary.jpg")])

        button.click(classify_brain_tumor, [image], [output])

        def respond(message, history):
            # bot_message = g4f.ChatCompletion.create(
            #     model="gpt-4-32k-0613",
            #     provider=g4f.Provider.GeekGpt,
            #     messages=[{"role": "user",
            #                "content": "Your role is Brain Tumor Disease Expert. Now I will provide you with the user query. First check if the user query is related to Brain Tumor or not. If it is not related to Brain Tumor then simply avoid the query by saying this is not my expertise, whereas if related to Brain Tumor reply it as usual. Here's the user Query:" + message}],
            # )
            bot_message = txt_model.generate_content("Your role is Brain Tumor Disease Expert. Now I will provide you with the user query. First check if the user query is related to Brain Tumor or not. If it is not related to Brain Tumor then simply avoid the query by saying this is not my expertise, whereas if related to Brain Tumor reply it as usual. Here's the user Query:" + message)
            return str(bot_message.text)

        with gr.Column():
            gr.Markdown("# Health Bot for Brain Tumor")
            gr.Markdown("> **Note:** The information may not be accurate. Please consult a Doctor before considering any actions.")
            gr.ChatInterface(respond, autofocus=False, examples=["Explain Brain Tumor.", "What are the types of Brain Tumor?", "Brain Tumor Prevention methods."]).queue()


Main = gr.TabbedInterface([Alzheimer, BreastCancer, BrainTumor], ["Alzheimer", "Breast Cancer", "Brain Tumor"],
                          theme=theme,
                          css=".gradio-container {  background: rgba(255, 255, 255, 0.2) !important; box-shadow: 0 8px 32px 0 rgba( 31, 38, 135, 0.37 ) !important; backdrop-filter: blur( 10px ) !important; -webkit-backdrop-filter: blur( 10px ) !important; border-radius: 10px !important; border: 1px solid rgba( 0, 0, 0, 0.5 ) !important;}")

Main.launch()