import gradio as gr import torch import os import numpy as np import SimpleITK as sitk from scipy.ndimage import zoom import pickle from model.Vision_Transformer_with_mask import vit_base_patch16_224,Attention,CrossAttention,Attention_ori from model.CoordAttention import * from typing import Tuple, Type from torch import Tensor, nn #import tempfile def load_from_pkl(load_path): data_input = open(load_path, 'rb') read_data = pickle.load(data_input) data_input.close() return read_data class MLP_att_out(nn.Module): def __init__(self, input_dim, inter_dim=None, output_dim=None, activation="relu", drop=0.0): super().__init__() self.input_dim = input_dim self.inter_dim = inter_dim self.output_dim = output_dim if inter_dim is None: self.inter_dim=input_dim if output_dim is None: self.output_dim=input_dim self.linear1 = nn.Linear(self.input_dim, self.inter_dim) self.activation = self._get_activation_fn(activation) self.dropout3 = nn.Dropout(drop) self.linear2 = nn.Linear(self.inter_dim, self.output_dim) self.dropout4 = nn.Dropout(drop) self.norm3 = nn.LayerNorm(self.output_dim) def forward(self, x): x = self.linear2(self.dropout3(self.activation(self.linear1(x)))) x = x + self.dropout4(x) x = self.norm3(x) return x def _get_activation_fn(self, activation): """Return an activation function given a string""" if activation == "relu": return F.relu if activation == "gelu": return F.gelu if activation == "glu": return F.glu raise RuntimeError(F"activation should be relu/gelu, not {activation}.") class MLPBlock(nn.Module): def __init__( self, embedding_dim: int, mlp_dim: int, act: Type[nn.Module] = nn.GELU, ) -> None: super().__init__() self.lin1 = nn.Linear(embedding_dim, mlp_dim) self.lin2 = nn.Linear(mlp_dim, embedding_dim) self.act = act() def forward(self, x: torch.Tensor) -> torch.Tensor: return self.lin2(self.act(self.lin1(x))) class FusionAttentionBlock(nn.Module): def __init__( self, embedding_dim: int, num_heads: int, mlp_dim: int = 2048, activation: Type[nn.Module] = nn.ReLU, ) -> None: """ A transformer block with four layers: (1) self-attention of sparse inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp block on sparse inputs, and (4) cross attention of dense inputs to sparse inputs. Arguments: embedding_dim (int): the channel dimension of the embeddings num_heads (int): the number of heads in the attention layers mlp_dim (int): the hidden dimension of the mlp block activation (nn.Module): the activation of the mlp block """ super().__init__() self.self_attn = Attention_ori(embedding_dim, num_heads) self.norm1 = nn.LayerNorm(embedding_dim) self.cross_attn_mask_to_image = CrossAttention(dim=embedding_dim, num_heads=num_heads) self.norm2 = nn.LayerNorm(embedding_dim) self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) self.norm3 = nn.LayerNorm(embedding_dim) self.norm4 = nn.LayerNorm(embedding_dim) self.cross_attn_image_to_mask = CrossAttention(dim=embedding_dim, num_heads=num_heads) def forward(self, img_emb: Tensor, mask_emb: Tensor, atten_mask: Tensor) -> Tuple[ Tensor]: # Self attention block #最开始的时候 queries=query_pe #queries: Tensor, keys: Tensor queries = mask_emb attn_out = self.self_attn(queries) #小图 queries = attn_out #queries = queries + attn_out queries = self.norm1(queries) # Cross attention block, mask attending to image embedding q = queries #1,5,256 k = img_emb # v是值,因此用keys? input_x = torch.cat((q, k), dim=1) # 2 50 768 attn_out = self.cross_attn_mask_to_image(input_x) #TODO 要不要mask呢 交叉的时候 先不用试试 queries = queries + attn_out queries = self.norm2(queries) # MLP block mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.norm3(queries) # Cross attention block, image embedding attending to tokens q = img_emb k = queries input_x = torch.cat((q, k), dim=1) attn_out = self.cross_attn_image_to_mask(input_x) img_emb = img_emb + attn_out img_emb = self.norm4(img_emb) return img_emb class my_model7(nn.Module): '''不用mask的版本 concate 部分 加了nor 加 attention attention 用不一样的方法 ''' def __init__(self, pretrained=False,num_classes=3,in_chans=1,img_size=224, **kwargs): super().__init__() self.backboon1 = vit_base_patch16_224(pretrained=False,in_chans=in_chans, as_backbone=True,img_size=img_size) if pretrained: pre_train_model = timm.create_model('vit_base_patch16_224', pretrained=True, in_chans=in_chans, num_classes=3) self.backboon1 = load_weights(self.backboon1, pre_train_model.state_dict()) #self.backboon2 = vit_base_patch32_224(pretrained=False,as_backbone=True) #TODO 同一个网络共享参数/不共享参数/patch不同网络 self.self_atten_img = Attention_ori(dim= self.backboon1.embed_dim, num_heads=self.backboon1.num_heads) #self.self_atten_mask = Attention(dim=self.backboon1.embed_dim, num_heads=self.backboon1.num_heads) self.self_atten_mask = Attention_ori(dim=self.backboon1.embed_dim, num_heads=self.backboon1.num_heads) self.cross_atten = FusionAttentionBlock(embedding_dim=self.backboon1.embed_dim, num_heads=self.backboon1.num_heads) #self.external_attention = ExternalAttention(d_model=2304,S=8) self.mlp = MLP_att_out(input_dim=self.backboon1.embed_dim * 3, output_dim=self.backboon1.embed_dim) self.attention = CoordAtt(1,1,1) self.norm1 = nn.LayerNorm(self.backboon1.embed_dim) self.norm2 = nn.LayerNorm(self.backboon1.embed_dim) self.norm3 = nn.LayerNorm(self.backboon1.embed_dim) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.backboon1.embed_dim*3, num_classes) if num_classes > 0 else nn.Identity() #self.head = nn.Linear(196, num_classes) if num_classes > 0 else nn.Identity() def forward(self, img, mask): x1 = self.backboon1(torch.cat((img, torch.zeros_like(img)), dim=1)) #TODO 是否用同一模型 还是不同 中间是否融合多尺度 x2 = self.backboon1(torch.cat((img*mask, torch.zeros_like(img)), dim=1)) #输出经过了归一化层 #小图 #自注意力+残差 x2_atten_mask = self.backboon1.atten_mask x1_atten = self.self_atten_img(x1) x2_atten = self.self_atten_mask(x2) x1_out = self.norm1((x1 + x1_atten)) x2_out = self.norm2((x2 + x2_atten)) #交叉注意力 corss_out = self.norm3(self.cross_atten(x1, x2, x2_atten_mask)) #得到输出特征 out = torch.concat((x1_out, corss_out, x2_out), dim=2).permute(0, 2, 1)#12 2304 196 out = self.attention(out) #12 2304 196 #out_ = out.permute(0, 2, 1) #out = self.mlp(out) # mlp #特征融合 2 196 768 # out = self.norm1(out) #这个好像不用 好像可以删掉 out = self.avgpool(out) # B C 1 out = torch.flatten(out, 1) out = self.head(out) return out Image_3D = None Current_name = None ALL_message = load_from_pkl(r'.\label0601.pkl') ALL_message2 = load_from_pkl(r'.\all_data_label.pkl') a = ALL_message2['train'] a.update(ALL_message2['val']) a.update(ALL_message2['test']) ALL_message2 = a LC_model_Paht = r'.\train_ADA_1.pkl' LC_model = load_from_pkl(LC_model_Paht)['model'][0] TF_model_Paht = r'.\tf_model.pkl' TF_model = load_from_pkl(TF_model_Paht)['model'] DR_model = load_from_pkl(TF_model_Paht)['dr'] Model_Paht = r'./model_epoch120.pth.tar' checkpoint = torch.load(Model_Paht, map_location='cpu') classnet = my_model7(pretrained=False,num_classes=3,in_chans=1, img_size=224) classnet.load_state_dict(checkpoint['model_dict']) def resize3D(img, aimsize, order=3): """ :param img: 3D array :param aimsize: list, one or three elements, like [256], or [256,56,56] :return: """ _shape = img.shape if len(aimsize) == 1: aimsize = [aimsize[0] for _ in range(3)] if aimsize[0] is None: return zoom(img, (1, aimsize[1] / _shape[1], aimsize[2] / _shape[2]), order=order) # resample for cube_size if aimsize[1] is None: return zoom(img, (aimsize[0] / _shape[0], 1, aimsize[2] / _shape[2]), order=order) # resample for cube_size if aimsize[2] is None: return zoom(img, (aimsize[0] / _shape[0], aimsize[1] / _shape[1], 1), order=order) # resample for cube_size return zoom(img, (aimsize[0] / _shape[0], aimsize[1] / _shape[1], aimsize[2] / _shape[2]), order=order) # resample for cube_size def get_lc(): global Current_name lc_min = np.array([17,1,0,1,1,1,1,1 , 1 , 1]) lc_max = np.array([96 ,2, 3 ,2, 2,2 , 2 ,2 ,2 ,4]) lc_key = ['age', 'sex', 'time', 'postpartum', 'traumatism', 'diabetes', 'high_blood_pressure', 'cerebral_infarction', 'postoperation'] lc_all = [ALL_message2[Current_name][ii] for ii in lc_key] site_ = Current_name.split('_',1)[-1] if site_ == 'A_L': lc_all.append(1) elif site_ == 'A_R': lc_all.append(2) elif site_ == 'B_L': lc_all.append(3) elif site_ == 'B_R': lc_all.append(4) else: pass lc_all = (np.array(lc_all)-lc_min)/(lc_max-lc_min+ 1e-12) a = 5 return lc_all def inference(): global Image_small_3D global ROI_small_3D model = classnet data_3d = Image_small_3D lc_data = get_lc() lc_data = np.expand_dims(lc_data, axis=0) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model.eval() try: #影像模型 with torch.no_grad(): all_probs = np.empty((0, 3)) for ii in tqdm(range(0, data_3d.shape[1]),total = data_3d.shape[1]): data = torch.from_numpy(data_3d[:,ii,:]) roi = torch.from_numpy(ROI_small_3D[:,ii,:].astype(np.int8)) image = torch.unsqueeze(data, 0) roi = torch.unsqueeze(torch.unsqueeze(roi, 0),0).to(device).float() patch_data = torch.unsqueeze(image, 0).to(device).float() # (N, C_{in}, D_{in}, H_{in}, W_{in}) # Pre : Prediction Result pre_probs = model(patch_data,roi) pre_probs = torch.nn.functional.softmax(pre_probs, dim=1) all_probs = np.concatenate((all_probs, pre_probs.cpu().numpy()), axis=0) dl_prob = np.mean(all_probs, axis=0) dl_prob = np.expand_dims(dl_prob, axis=0) lc_prob = LC_model.predict_proba(lc_data) feature = DR_model.transform(np.concatenate([dl_prob, lc_prob], axis=1)) final_p = TF_model.predict_proba(feature) final_p = np.round(final_p[0], decimals=2) return {'急性期': final_p[0], '亚急性期': final_p[1], '慢性期': final_p[2]} except: return ' ' def get_Image_reslice(input_file): '''得到图像 返回随即层''' global Image_3D global Current_name global Input_File if isinstance(input_file, str): input_file = input_file else: input_file = input_file.name Input_File = input_file print(input_file) Image_3D = sitk.GetArrayFromImage(sitk.ReadImage(input_file)) Current_name = input_file.split(os.sep)[-1].split('.')[0].rsplit('_', 1)[0] Image_3D = (np.max(Image_3D) - Image_3D) / (np.max(Image_3D) - np.min(Image_3D)) random_z = np.random.randint(0, Image_3D.shape[0]) image_slice_z = Image_3D[random_z, :, :] random_y = np.random.randint(0, Image_3D.shape[1]) image_slice_y = Image_3D[:, random_y, :] random_x = np.random.randint(0, Image_3D.shape[2]) image_slice_x = Image_3D[:, :, random_x] # return zoom(image_slice_z, (10 / image_slice_z.shape[0], 10 / image_slice_z.shape[1]), order=3) , \ # zoom(image_slice_y, (10 / image_slice_y.shape[0], 10 / image_slice_y.shape[1]), order=3), \ # zoom(image_slice_x, (10 / image_slice_x.shape[0], 10 / image_slice_x.shape[1]), order=3) return image_slice_z, \ image_slice_y, \ image_slice_x, random_z, random_y, random_x, '影像数据加载成功' def get_ROI(input_file): '''得到图像 返回随即层''' global ROI_3D if isinstance(input_file, str): input_file = input_file else: input_file = input_file.name Image_3D = sitk.GetArrayFromImage(sitk.ReadImage(input_file)) ROI_3D = Image_3D unique_elements = np.unique(ROI_3D) a = 5 if np.where(unique_elements>1)[0]: return '这个数据没有经过二值化' else: return '感兴趣区域加载成功' def change_image_slice_x(slice): image_slice = Image_3D[:, :, slice - 1] cut_thre = np.percentile(image_slice, 99.9) # 直方图99.9%右侧值不要 image_slice[image_slice >= cut_thre] = cut_thre image_slice = (((np.max(image_slice) -image_slice)/(np.max(image_slice) - np.min(image_slice)))*255).astype(np.int16) a = 5 return image_slice def change_image_slice_y(slice): image_slice = Image_3D[:, slice - 1, :] cut_thre = np.percentile(image_slice, 99.9) # 直方图99.9%右侧值不要 image_slice[image_slice >= cut_thre] = cut_thre image_slice = (((np.max(image_slice) - image_slice) / (np.max(image_slice) - np.min(image_slice))) * 255).astype( np.int16) return image_slice def change_image_slice_z(slice): image_slice = Image_3D[slice - 1, :, :] cut_thre = np.percentile(image_slice, 99.9) # 直方图99.9%右侧值不要 image_slice[image_slice >= cut_thre] = cut_thre image_slice = (((np.max(image_slice) - image_slice) / (np.max(image_slice) - np.min(image_slice))) * 255).astype(np.int16) return image_slice def get_medical_message(): global Current_name if Current_name == None: return '请先加载数据', ' ' else: past = ALL_message[Current_name]['past'] now = ALL_message[Current_name]['now'] return past, now def clear_all(): global Image_3D global Current_name Current_name = None Image_3D = None return np.ones((10, 10)), np.ones((10, 10)), np.ones((10, 10)), '', '', ' ',"尚未进行预处理 请先预处理再按“分期结果”按钮","尚未加载影像数据","尚未加载感兴趣区域" def get_box(mask): """ :param mask: array,输入金标准图像 :return: """ # 得到boxx坐标 # 计算得到bbox,形式为[dim0min, dim0max, dim1min, dim1max, dim2min, dim2max] indexx = np.where(mask > 0.) # 返回坐标,几维就是几组坐标,坐标纵向看 dim0min, dim0max, dim1min, dim1max, dim2min, dim2max = [np.min(indexx[0]), np.max(indexx[0]), np.min(indexx[1]), np.max(indexx[1]), np.min(indexx[2]), np.max(indexx[2])] bbox = [dim0min, dim0max, dim1min, dim1max, dim2min, dim2max] return bbox def arry_crop_3D(img,mask,ex_pix): ''' 得到小图,并外扩 :param img array 3D :param mask array :param ex_pix: list [a,b,c] 向两侧各自外扩多少 维度顺序与输入一致 :param z_waikuo:z轴是否外扩,默认第一维 务必提前确认 !! ''' if len(ex_pix)==1: ex_pix=[ex_pix[0] for _ in range(3)] elif len(ex_pix) == 2: print('如果z轴不外扩,第一维请输入0') sys.exit() [dim0min, dim0max, dim1min, dim1max, dim2min, dim2max] = get_box(mask) #判断能否外扩 dim0,dim1,dim2 = img.shape dim1_l_index = np.clip(dim1min-ex_pix[1],0 ,dim1) #dim1外扩后左边的坐标,若触碰边界,则尽量外扩至边界 dim1_r_index = np.clip(dim1max + ex_pix[1], 0, dim1) dim2_l_index = np.clip(dim2min - ex_pix[2], 0, dim2) dim2_r_index = np.clip(dim2max + ex_pix[2], 0, dim2) fina_img = img[:, dim1_l_index:dim1_r_index+1, dim2_l_index:dim2_r_index+1] fina_mask = mask[:, dim1_l_index:dim1_r_index+1, dim2_l_index:dim2_r_index+1] if ex_pix[0]: dim0_l_index = np.clip(dim0min - ex_pix[0], 0, dim0) dim0_r_index = np.clip(dim0max + ex_pix[0], 0, dim0) fina_img = fina_img[dim0_l_index:dim0_r_index+1, :, :] fina_mask = fina_mask[dim0_l_index:dim0_r_index+1, :, :] else: #不外扩 print('dim0 不外扩') dim0_l_index = dim0min dim0_r_index = dim0max fina_img = fina_img[dim0_l_index:dim0_r_index+1, :, :] fina_mask = fina_mask[dim0_l_index:dim0_r_index+1, :, :] return fina_img, fina_mask def data_pretreatment(): global Image_3D global ROI_3D global ROI_small_3D global Image_small_3D global Current_name global Input_File if Image_3D.all() ==None: return '没有数据' else: roi = ROI_3D # waikuo = [4, 4, 4] # fina_img, fina_mask = arry_crop_3D(Image_3D,roi,waikuo) cut_thre = np.percentile(fina_img, 99.9) # 直方图99.9%右侧值不要 fina_img[fina_img >= cut_thre] = cut_thre z, y, x = fina_img.shape fina_img = resize3D(fina_img, [224,y,224], order=3) fina_roi = resize3D(roi, [224, y, 224], order=3) fina_img = (np.max(fina_img)-fina_img)/(np.max(fina_img)-np.min(fina_img)) Image_small_3D = fina_img ROI_small_3D = fina_roi return '预处理结束' class App: def __init__(self): self.demo = None self.main() def main(self): # get_name = gr.Interface(lambda name: name, inputs="textbox", outputs="textbox") # prepend_hello = gr.Interface(lambda name: f"Hello {name}!", inputs="textbox", outputs="textbox") # append_nice = gr.Interface(lambda greeting: f"{greeting} Nice to meet you!", # inputs="textbox", outputs=gr.Textbox(label="Greeting")) # iface_1 = gr.Interface(fn=get_Image_reslice, inputs=gr.inputs.File(label="Upload NIfTI file"), outputs=[,gr.Image(shape=(5, 5)),gr.Image(shape=(5, 5))]) with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=1): inp = gr.inputs.File(label="Upload MRI file") inp2 = gr.inputs.File(label="Upload ROI file") with gr.Column(scale=1): out8 = gr.Textbox(placeholder="尚未加载影像数据") out9 = gr.Textbox(placeholder="尚未加载感兴趣区域") with gr.Row(): btn1 = gr.Button("Upload MRI") btn5 = gr.Button("Upload ROI") clear = gr.Button(" Clear All") with gr.Tab("Image"): with gr.Row(): with gr.Column(scale=1): out1 = gr.Image(shape=(10, 10)) slider1 = gr.Slider(1, 128, label='z轴层数', step=1, interactive=True) with gr.Column(scale=1): out2 = gr.Image(shape=(10, 10)) slider2 = gr.Slider(1, 256, label='y轴层数', step=1, interactive=True) with gr.Column(scale=1): out3 = gr.Image(shape=(10, 10)) slider3 = gr.Slider(1, 128, label='x轴层数', step=1, interactive=True) with gr.Tab("Medical Information"): with gr.Row(): with gr.Column(scale=1): btn2 = gr.Button(value="临床信息") out4 = gr.Textbox(label="患病史") out6 = gr.Textbox(label="现病史") with gr.Column(scale=1): btn4 = gr.Button("预处理") out7 = gr.Textbox(placeholder="尚未进行预处理 请先预处理再按“分期结果”按钮", ) btn3 = gr.Button("分期结果") out5 = gr.Label(num_top_classes=2, label='分期结果') btn3.click(inference, inputs=None, outputs=out5) btn4.click(data_pretreatment, inputs=None, outputs=out7) btn2.click(get_medical_message, inputs=None, outputs=[out4, out6]) # demo = gr.Series(get_name, prepend_hello, append_nice) btn1.click(get_Image_reslice, inp, [out1, out2, out3, slider1, slider2, slider3,out8]) btn5.click(get_ROI, inputs=inp2, outputs=out9) slider3.change(change_image_slice_x, inputs=slider3, outputs=out3) slider2.change(change_image_slice_y, inputs=slider2, outputs=out2) slider1.change(change_image_slice_z, inputs=slider1, outputs=out1) clear.click(clear_all, None, [out1, out2, out3, out4, out6, out5, out7,out8,out9], queue=True) gr.Markdown('''# Examples''') gr.Examples( examples=[["./2239561_B_R_MRI.nii.gz"], ["./2239561_B_R_MRI.nii.gz"]], inputs=inp, outputs=[out1, out2, out3, slider1, slider2, slider3,out8], fn=get_Image_reslice, cache_examples=True, ) gr.Examples( examples=[["./2239561_B_R_ROI.nii.gz"], ["./2239561_B_R_ROI.nii.gz"]], inputs=inp2, outputs=out9, fn=get_ROI, cache_examples=True, ) demo.queue(concurrency_count=6) demo.launch(share=False) app = App()