{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "请点击[此处](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576)查看本环境基本用法.
\n", "Please click [here ](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576) for more detailed instructions. " ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "execution": { "iopub.execute_input": "2022-10-11T15:15:49.511879Z", "iopub.status.busy": "2022-10-11T15:15:49.511286Z", "iopub.status.idle": "2022-10-11T15:15:51.568549Z", "shell.execute_reply": "2022-10-11T15:15:51.567597Z", "shell.execute_reply.started": "2022-10-11T15:15:49.511839Z" }, "jupyter": { "outputs_hidden": false }, "scrolled": true, "tags": [] }, "outputs": [], "source": [ "import os\n", "import io\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "from PIL import Image\n", "import paddle\n", "from paddle.nn import functional as F\n", "import random\n", "from paddle.io import Dataset\n", "from visualdl import LogWriter\n", "from paddle.vision.transforms import transforms as T\n", "import warnings\n", "import cv2 as cv\n", "from PIL import Image\n", "import re\n", "warnings.filterwarnings(\"ignore\")\n", "os.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "execution": { "iopub.execute_input": "2022-10-11T15:16:14.415916Z", "iopub.status.busy": "2022-10-11T15:16:14.415245Z", "iopub.status.idle": "2022-10-11T15:16:14.428584Z", "shell.execute_reply": "2022-10-11T15:16:14.427470Z", "shell.execute_reply.started": "2022-10-11T15:16:14.415874Z" }, "jupyter": { "outputs_hidden": false }, "tags": [] }, "outputs": [], "source": [ "class SeparableConv2D(paddle.nn.Layer):\n", " def __init__(self,\n", " in_channels,\n", " out_channels,\n", " kernel_size,\n", " stride=1,\n", " padding=0,\n", " dilation=1,\n", " groups=None,\n", " weight_attr=None,\n", " bias_attr=None,\n", " data_format=\"NCHW\"):\n", " super(SeparableConv2D, self).__init__()\n", "\n", " self._padding = padding\n", " self._stride = stride\n", " self._dilation = dilation\n", " self._in_channels = in_channels\n", " self._data_format = data_format\n", "\n", " # 第一次卷积参数,没有偏置参数\n", " filter_shape = [in_channels, 1] + self.convert_to_list(kernel_size, 2, 'kernel_size')\n", " self.weight_conv = self.create_parameter(shape=filter_shape, attr=weight_attr)\n", "\n", " # 第二次卷积参数\n", " filter_shape = [out_channels, in_channels] + self.convert_to_list(1, 2, 'kernel_size')\n", " self.weight_pointwise = self.create_parameter(shape=filter_shape, attr=weight_attr)\n", " self.bias_pointwise = self.create_parameter(shape=[out_channels],\n", " attr=bias_attr,\n", " is_bias=True)\n", "\n", " def convert_to_list(self, value, n, name, dtype=np.int):\n", " if isinstance(value, dtype):\n", " return [value, ] * n\n", " else:\n", " try:\n", " value_list = list(value)\n", " except TypeError:\n", " raise ValueError(\"The \" + name +\n", " \"'s type must be list or tuple. Received: \" + str(\n", " value))\n", " if len(value_list) != n:\n", " raise ValueError(\"The \" + name + \"'s length must be \" + str(n) +\n", " \". Received: \" + str(value))\n", " for single_value in value_list:\n", " try:\n", " dtype(single_value)\n", " except (ValueError, TypeError):\n", " raise ValueError(\n", " \"The \" + name + \"'s type must be a list or tuple of \" + str(\n", " n) + \" \" + str(dtype) + \" . Received: \" + str(\n", " value) + \" \"\n", " \"including element \" + str(single_value) + \" of type\" + \" \"\n", " + str(type(single_value)))\n", " return value_list\n", "\n", " def forward(self, inputs):\n", " conv_out = F.conv2d(inputs,\n", " self.weight_conv,\n", " padding=self._padding,\n", " stride=self._stride,\n", " dilation=self._dilation,\n", " groups=self._in_channels,\n", " data_format=self._data_format)\n", "\n", " out = F.conv2d(conv_out,\n", " self.weight_pointwise,\n", " bias=self.bias_pointwise,\n", " padding=0,\n", " stride=1,\n", " dilation=1,\n", " groups=1,\n", " data_format=self._data_format)\n", "\n", " return out\n", "class Encoder(paddle.nn.Layer):\n", " def __init__(self, in_channels, out_channels):\n", " super(Encoder, self).__init__()\n", "\n", " self.relus = paddle.nn.LayerList(\n", " [paddle.nn.ReLU() for i in range(2)])\n", " self.separable_conv_01 = SeparableConv2D(in_channels,\n", " out_channels,\n", " kernel_size=3,\n", " padding='same')\n", " self.bns = paddle.nn.LayerList(\n", " [paddle.nn.BatchNorm2D(out_channels) for i in range(2)])\n", "\n", " self.separable_conv_02 = SeparableConv2D(out_channels,\n", " out_channels,\n", " kernel_size=3,\n", " padding='same')\n", " self.pool = paddle.nn.MaxPool2D(kernel_size=3, stride=2, padding=1)\n", " self.residual_conv = paddle.nn.Conv2D(in_channels,\n", " out_channels,\n", " kernel_size=1,\n", " stride=2,\n", " padding='same')\n", "\n", " def forward(self, inputs):\n", " previous_block_activation = inputs\n", "\n", " y = self.relus[0](inputs)\n", " y = self.separable_conv_01(y)\n", " y = self.bns[0](y)\n", " y = self.relus[1](y)\n", " y = self.separable_conv_02(y)\n", " y = self.bns[1](y)\n", " y = self.pool(y)\n", "\n", " residual = self.residual_conv(previous_block_activation)\n", " y = paddle.add(y, residual)\n", "\n", " return y\n", "class Decoder(paddle.nn.Layer):\n", " def __init__(self, in_channels, out_channels):\n", " super(Decoder, self).__init__()\n", "\n", " self.relus = paddle.nn.LayerList(\n", " [paddle.nn.ReLU() for i in range(2)])\n", " self.conv_transpose_01 = paddle.nn.Conv2DTranspose(in_channels,\n", " out_channels,\n", " kernel_size=3,\n", " padding=1)\n", " self.conv_transpose_02 = paddle.nn.Conv2DTranspose(out_channels,\n", " out_channels,\n", " kernel_size=3,\n", " padding=1)\n", " self.bns = paddle.nn.LayerList(\n", " [paddle.nn.BatchNorm2D(out_channels) for i in range(2)]\n", " )\n", " self.upsamples = paddle.nn.LayerList(\n", " [paddle.nn.Upsample(scale_factor=2.0) for i in range(2)]\n", " )\n", " self.residual_conv = paddle.nn.Conv2D(in_channels,\n", " out_channels,\n", " kernel_size=1,\n", " padding='same')\n", "\n", " def forward(self, inputs):\n", " previous_block_activation = inputs\n", "\n", " y = self.relus[0](inputs)\n", " y = self.conv_transpose_01(y)\n", " y = self.bns[0](y)\n", " y = self.relus[1](y)\n", " y = self.conv_transpose_02(y)\n", " y = self.bns[1](y)\n", " y = self.upsamples[0](y)\n", "\n", " residual = self.upsamples[1](previous_block_activation)\n", " residual = self.residual_conv(residual)\n", "\n", " y = paddle.add(y, residual)\n", "\n", " return y\n", "class PetNet(paddle.nn.Layer):\n", " def __init__(self, num_classes):\n", " super(PetNet, self).__init__()\n", "\n", " self.conv_1 = paddle.nn.Conv2D(3, 32,\n", " kernel_size=3,\n", " stride=2,\n", " padding='same')\n", " self.bn = paddle.nn.BatchNorm2D(32)\n", " self.relu = paddle.nn.ReLU()\n", "\n", " in_channels = 32\n", " self.encoders = []\n", " self.encoder_list = [64, 128, 256]\n", " self.decoder_list = [256, 128, 64, 32]\n", "\n", " for out_channels in self.encoder_list:\n", " block = self.add_sublayer('encoder_{}'.format(out_channels),\n", " Encoder(in_channels, out_channels))\n", " self.encoders.append(block)\n", " in_channels = out_channels\n", "\n", " self.decoders = []\n", "\n", " for out_channels in self.decoder_list:\n", " block = self.add_sublayer('decoder_{}'.format(out_channels),\n", " Decoder(in_channels, out_channels))\n", " self.decoders.append(block)\n", " in_channels = out_channels\n", "\n", " self.output_conv = paddle.nn.Conv2D(in_channels,\n", " num_classes,\n", " kernel_size=3,\n", " padding='same')\n", "\n", " def forward(self, inputs):\n", " y = self.conv_1(inputs)\n", " y = self.bn(y)\n", " y = self.relu(y)\n", "\n", " for encoder in self.encoders:\n", " y = encoder(y)\n", "\n", " for decoder in self.decoders:\n", " y = decoder(y)\n", "\n", " y = self.output_conv(y)\n", " return y\n", "IMAGE_SIZE = (512, 512)\n", "num_classes = 2\n", "network = PetNet(num_classes)\n", "model = paddle.Model(network)" ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "execution": { "iopub.execute_input": "2022-10-11T15:16:14.415916Z", "iopub.status.busy": "2022-10-11T15:16:14.415245Z", "iopub.status.idle": "2022-10-11T15:16:14.428584Z", "shell.execute_reply": "2022-10-11T15:16:14.427470Z", "shell.execute_reply.started": "2022-10-11T15:16:14.415874Z" }, "jupyter": { "outputs_hidden": false }, "scrolled": true, "tags": [] }, "outputs": [], "source": [ "#加载训练好的权重\n", "optimizer = paddle.optimizer.RMSProp(learning_rate=0.001, parameters=network.parameters())\n", "layer_state_dict = paddle.load(\"mymodel.pdparams\")\n", "opt_state_dict = paddle.load(\"optimizer.pdopt\")\n", "\n", "network.set_state_dict(layer_state_dict)\n", "optimizer.set_state_dict(opt_state_dict)" ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "execution": { "iopub.execute_input": "2022-10-11T16:07:50.639995Z", "iopub.status.busy": "2022-10-11T16:07:50.639338Z", "iopub.status.idle": "2022-10-11T16:07:50.941928Z", "shell.execute_reply": "2022-10-11T16:07:50.940805Z", "shell.execute_reply.started": "2022-10-11T16:07:50.639949Z" }, "jupyter": { "outputs_hidden": false }, "tags": [] }, "outputs": [], "source": [ "def FinalImage(mask,image):\n", " # 这个函数的作用是把mask高斯模糊之后的遮罩和原始的image叠加起来\n", " #输入 mask [0,255]的这招图\n", " #image 必须无条件转化为512*512 三通道彩图\n", " \n", " th = cv.threshold(mask,140,255,cv.THRESH_BINARY)[1]\n", " blur = cv.GaussianBlur(th,(33,33), 15)\n", " heatmap_img = cv.applyColorMap(blur, cv.COLORMAP_OCEAN)\n", " Blendermap = cv.addWeighted(heatmap_img, 0.5, image, 1, 0)\n", " return Blendermap" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "IMPORTANT: You are using gradio version 3.12.0, however version 3.14.0 is available, please upgrade.\n", "--------\n", "Running on local URL: http://127.0.0.1:7864\n", "Running on public URL: https://317fc297694e39a2.gradio.app\n", "\n", "This share link expires in 72 hours. For free permanent hosting and GPU upgrades (NEW!), check out Spaces: https://huggingface.co/spaces\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import gradio as gr\n", "def Showsegmentation(image):\n", " mask = paddle.argmax(network(paddle.to_tensor([((image - 127.5) / 127.5).transpose(2, 0, 1)]))[0], axis=0).numpy()\n", " mask=mask.astype('uint8')*255\n", " immask=cv.resize(mask, (512, 512))\n", " image=cv.resize(image,(512,512))\n", " blendmask=FinalImage(immask,image)\n", " return blendmask\n", "\n", "gr.Interface(fn=Showsegmentation, inputs=\"image\", outputs=\"image\").launch(share=True)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.5" }, "toc-autonumbering": true, "toc-showcode": true, "toc-showmarkdowntxt": true }, "nbformat": 4, "nbformat_minor": 4 }