prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4( | F.concat([x2, x3], axis=1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5( | F.concat([x3, x4], axis=1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow( | F.concat([x4, x5], axis=1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def generate_fused_flow(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
flow = self.flow_predictor(input_feature)[1]
assert flow.shape[1] == 2
return flow
def generate_map(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
out = self.mask_predictor(input_feature)[1]
mask = F.sigmoid(out)
assert mask.shape[1] == 1
return mask
def self_guided_fusion_module(self, flow, gyro_field_rsz, x1, x2_warp, layer):
fuse_flow = self.generate_fused_flow(flow, gyro_field_rsz)
mask = self.generate_map(self.conv_1x1[layer](x1), self.conv_1x1[layer](x2_warp))
flow = fuse_flow * mask + gyro_field_rsz * (1 - mask)
return flow
def normalize_features(self, feature_list, normalize, center, moments_across_channels=True, moments_across_images=True):
# Compute feature statistics.
statistics = collections.defaultdict(list)
axes = [1, 2, 3] if moments_across_channels else [2, 3] # [b, c, h, w]
for feature_image in feature_list:
mean = | F.mean(feature_image, axis=axes, keepdims=True) | megengine.functional.mean |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def generate_fused_flow(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
flow = self.flow_predictor(input_feature)[1]
assert flow.shape[1] == 2
return flow
def generate_map(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
out = self.mask_predictor(input_feature)[1]
mask = F.sigmoid(out)
assert mask.shape[1] == 1
return mask
def self_guided_fusion_module(self, flow, gyro_field_rsz, x1, x2_warp, layer):
fuse_flow = self.generate_fused_flow(flow, gyro_field_rsz)
mask = self.generate_map(self.conv_1x1[layer](x1), self.conv_1x1[layer](x2_warp))
flow = fuse_flow * mask + gyro_field_rsz * (1 - mask)
return flow
def normalize_features(self, feature_list, normalize, center, moments_across_channels=True, moments_across_images=True):
# Compute feature statistics.
statistics = collections.defaultdict(list)
axes = [1, 2, 3] if moments_across_channels else [2, 3] # [b, c, h, w]
for feature_image in feature_list:
mean = F.mean(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
variance = | F.var(feature_image, axis=axes, keepdims=True) | megengine.functional.var |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def generate_fused_flow(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
flow = self.flow_predictor(input_feature)[1]
assert flow.shape[1] == 2
return flow
def generate_map(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
out = self.mask_predictor(input_feature)[1]
mask = F.sigmoid(out)
assert mask.shape[1] == 1
return mask
def self_guided_fusion_module(self, flow, gyro_field_rsz, x1, x2_warp, layer):
fuse_flow = self.generate_fused_flow(flow, gyro_field_rsz)
mask = self.generate_map(self.conv_1x1[layer](x1), self.conv_1x1[layer](x2_warp))
flow = fuse_flow * mask + gyro_field_rsz * (1 - mask)
return flow
def normalize_features(self, feature_list, normalize, center, moments_across_channels=True, moments_across_images=True):
# Compute feature statistics.
statistics = collections.defaultdict(list)
axes = [1, 2, 3] if moments_across_channels else [2, 3] # [b, c, h, w]
for feature_image in feature_list:
mean = F.mean(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
variance = F.var(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
statistics['mean'].append(mean)
statistics['var'].append(variance)
if moments_across_images:
statistics['mean'] = ([F.mean(F.stack(statistics['mean'], axis=0), axis=(0, ))] * len(feature_list))
statistics['var'] = ([F.var(F.stack(statistics['var'], axis=0), axis=(0, ))] * len(feature_list))
statistics['std'] = [ | F.sqrt(v + 1e-16) | megengine.functional.sqrt |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def generate_fused_flow(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
flow = self.flow_predictor(input_feature)[1]
assert flow.shape[1] == 2
return flow
def generate_map(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
out = self.mask_predictor(input_feature)[1]
mask = F.sigmoid(out)
assert mask.shape[1] == 1
return mask
def self_guided_fusion_module(self, flow, gyro_field_rsz, x1, x2_warp, layer):
fuse_flow = self.generate_fused_flow(flow, gyro_field_rsz)
mask = self.generate_map(self.conv_1x1[layer](x1), self.conv_1x1[layer](x2_warp))
flow = fuse_flow * mask + gyro_field_rsz * (1 - mask)
return flow
def normalize_features(self, feature_list, normalize, center, moments_across_channels=True, moments_across_images=True):
# Compute feature statistics.
statistics = collections.defaultdict(list)
axes = [1, 2, 3] if moments_across_channels else [2, 3] # [b, c, h, w]
for feature_image in feature_list:
mean = F.mean(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
variance = F.var(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
statistics['mean'].append(mean)
statistics['var'].append(variance)
if moments_across_images:
statistics['mean'] = ([F.mean(F.stack(statistics['mean'], axis=0), axis=(0, ))] * len(feature_list))
statistics['var'] = ([F.var(F.stack(statistics['var'], axis=0), axis=(0, ))] * len(feature_list))
statistics['std'] = [F.sqrt(v + 1e-16) for v in statistics['var']]
# Center and normalize features.
if center:
feature_list = [f - mean for f, mean in zip(feature_list, statistics['mean'])]
if normalize:
feature_list = [f / std for f, std in zip(feature_list, statistics['std'])]
return feature_list
def predict_flow(self, x1_pyrs, x2_pyrs, gyro_field=None):
flow_pyrs = []
batch_size, _, h_x1, w_x1 = x1_pyrs[0].shape
dtype = x1_pyrs[0].dtype
flow = F.zeros((batch_size, 2, h_x1, w_x1), dtype=dtype)
for layer, (x1, x2) in enumerate(zip(x1_pyrs, x2_pyrs)):
if layer == 0:
x2_warp = x2
else:
flow = self.upsampler(flow, self.conv_1x1[layer](x1), self.conv_1x1[layer](x2))
gyro_field_rsz = upsample2d_flow_as(gyro_field, flow, if_rate=True)
x2_warp = flow_warp(x2, gyro_field_rsz)
flow = self.self_guided_fusion_module(flow, gyro_field_rsz, x1, x2_warp, layer)
x2_warp = flow_warp(x2, flow)
# cost volume normalized
x1_normalized, x2_warp_normalized = self.normalize_features([x1, x2_warp],
normalize=True,
center=True,
moments_across_channels=False,
moments_across_images=False)
_cv = self.cost_volume(x1_normalized, x2_warp_normalized)
_cv_relu = self.leakyRELU(_cv)
x1 = self.conv_1x1[layer](x1)
_x_feat, flow_pred = self.flow_estimator(F.concat([_cv_relu, x1, flow], axis=1))
flow += flow_pred
flow_refine = self.context_net(F.concat([_x_feat, flow], axis=1))
flow += flow_refine
flow_pyrs.append(flow)
if layer == self.output_level:
break
if self.upsample:
flows = [F.vision.interpolate(flow * 4, scale_factor=4, mode='bilinear', align_corners=True) for flow in flow_pyrs]
return flows[::-1]
def forward(self, data_batch, with_bk=True):
x = data_batch['imgs']
imgs = [x[:, 3 * i:3 * i + 3] for i in range(2)]
x = [self.feature_pyramid_extractor(img) + [img] for img in imgs]
gyro_field = data_batch["gyro_field"]
res = {}
res['flow_fw'] = self.predict_flow(x[0], x[1], gyro_field)
if with_bk:
res['flow_bw'] = self.predict_flow(x[1], x[0], -1 * gyro_field)
return res
class GyroFlowTestFlops(GyroFlow):
def forward(self, data_batch, with_bk=True):
x = data_batch
imgs = [x[:, 3 * i:3 * i + 3] for i in range(2)]
x = [self.feature_pyramid_extractor(img) + [img] for img in imgs]
gyro_field = | F.ones_like(data_batch) | megengine.functional.ones_like |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = | F.mean(cost, 1, keepdims=True) | megengine.functional.mean |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def generate_fused_flow(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
flow = self.flow_predictor(input_feature)[1]
assert flow.shape[1] == 2
return flow
def generate_map(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
out = self.mask_predictor(input_feature)[1]
mask = F.sigmoid(out)
assert mask.shape[1] == 1
return mask
def self_guided_fusion_module(self, flow, gyro_field_rsz, x1, x2_warp, layer):
fuse_flow = self.generate_fused_flow(flow, gyro_field_rsz)
mask = self.generate_map(self.conv_1x1[layer](x1), self.conv_1x1[layer](x2_warp))
flow = fuse_flow * mask + gyro_field_rsz * (1 - mask)
return flow
def normalize_features(self, feature_list, normalize, center, moments_across_channels=True, moments_across_images=True):
# Compute feature statistics.
statistics = collections.defaultdict(list)
axes = [1, 2, 3] if moments_across_channels else [2, 3] # [b, c, h, w]
for feature_image in feature_list:
mean = F.mean(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
variance = F.var(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
statistics['mean'].append(mean)
statistics['var'].append(variance)
if moments_across_images:
statistics['mean'] = ([F.mean(F.stack(statistics['mean'], axis=0), axis=(0, ))] * len(feature_list))
statistics['var'] = ([F.var(F.stack(statistics['var'], axis=0), axis=(0, ))] * len(feature_list))
statistics['std'] = [F.sqrt(v + 1e-16) for v in statistics['var']]
# Center and normalize features.
if center:
feature_list = [f - mean for f, mean in zip(feature_list, statistics['mean'])]
if normalize:
feature_list = [f / std for f, std in zip(feature_list, statistics['std'])]
return feature_list
def predict_flow(self, x1_pyrs, x2_pyrs, gyro_field=None):
flow_pyrs = []
batch_size, _, h_x1, w_x1 = x1_pyrs[0].shape
dtype = x1_pyrs[0].dtype
flow = F.zeros((batch_size, 2, h_x1, w_x1), dtype=dtype)
for layer, (x1, x2) in enumerate(zip(x1_pyrs, x2_pyrs)):
if layer == 0:
x2_warp = x2
else:
flow = self.upsampler(flow, self.conv_1x1[layer](x1), self.conv_1x1[layer](x2))
gyro_field_rsz = upsample2d_flow_as(gyro_field, flow, if_rate=True)
x2_warp = flow_warp(x2, gyro_field_rsz)
flow = self.self_guided_fusion_module(flow, gyro_field_rsz, x1, x2_warp, layer)
x2_warp = flow_warp(x2, flow)
# cost volume normalized
x1_normalized, x2_warp_normalized = self.normalize_features([x1, x2_warp],
normalize=True,
center=True,
moments_across_channels=False,
moments_across_images=False)
_cv = self.cost_volume(x1_normalized, x2_warp_normalized)
_cv_relu = self.leakyRELU(_cv)
x1 = self.conv_1x1[layer](x1)
_x_feat, flow_pred = self.flow_estimator( | F.concat([_cv_relu, x1, flow], axis=1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def generate_fused_flow(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
flow = self.flow_predictor(input_feature)[1]
assert flow.shape[1] == 2
return flow
def generate_map(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
out = self.mask_predictor(input_feature)[1]
mask = F.sigmoid(out)
assert mask.shape[1] == 1
return mask
def self_guided_fusion_module(self, flow, gyro_field_rsz, x1, x2_warp, layer):
fuse_flow = self.generate_fused_flow(flow, gyro_field_rsz)
mask = self.generate_map(self.conv_1x1[layer](x1), self.conv_1x1[layer](x2_warp))
flow = fuse_flow * mask + gyro_field_rsz * (1 - mask)
return flow
def normalize_features(self, feature_list, normalize, center, moments_across_channels=True, moments_across_images=True):
# Compute feature statistics.
statistics = collections.defaultdict(list)
axes = [1, 2, 3] if moments_across_channels else [2, 3] # [b, c, h, w]
for feature_image in feature_list:
mean = F.mean(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
variance = F.var(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
statistics['mean'].append(mean)
statistics['var'].append(variance)
if moments_across_images:
statistics['mean'] = ([F.mean(F.stack(statistics['mean'], axis=0), axis=(0, ))] * len(feature_list))
statistics['var'] = ([F.var(F.stack(statistics['var'], axis=0), axis=(0, ))] * len(feature_list))
statistics['std'] = [F.sqrt(v + 1e-16) for v in statistics['var']]
# Center and normalize features.
if center:
feature_list = [f - mean for f, mean in zip(feature_list, statistics['mean'])]
if normalize:
feature_list = [f / std for f, std in zip(feature_list, statistics['std'])]
return feature_list
def predict_flow(self, x1_pyrs, x2_pyrs, gyro_field=None):
flow_pyrs = []
batch_size, _, h_x1, w_x1 = x1_pyrs[0].shape
dtype = x1_pyrs[0].dtype
flow = F.zeros((batch_size, 2, h_x1, w_x1), dtype=dtype)
for layer, (x1, x2) in enumerate(zip(x1_pyrs, x2_pyrs)):
if layer == 0:
x2_warp = x2
else:
flow = self.upsampler(flow, self.conv_1x1[layer](x1), self.conv_1x1[layer](x2))
gyro_field_rsz = upsample2d_flow_as(gyro_field, flow, if_rate=True)
x2_warp = flow_warp(x2, gyro_field_rsz)
flow = self.self_guided_fusion_module(flow, gyro_field_rsz, x1, x2_warp, layer)
x2_warp = flow_warp(x2, flow)
# cost volume normalized
x1_normalized, x2_warp_normalized = self.normalize_features([x1, x2_warp],
normalize=True,
center=True,
moments_across_channels=False,
moments_across_images=False)
_cv = self.cost_volume(x1_normalized, x2_warp_normalized)
_cv_relu = self.leakyRELU(_cv)
x1 = self.conv_1x1[layer](x1)
_x_feat, flow_pred = self.flow_estimator(F.concat([_cv_relu, x1, flow], axis=1))
flow += flow_pred
flow_refine = self.context_net( | F.concat([_x_feat, flow], axis=1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def generate_fused_flow(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
flow = self.flow_predictor(input_feature)[1]
assert flow.shape[1] == 2
return flow
def generate_map(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
out = self.mask_predictor(input_feature)[1]
mask = F.sigmoid(out)
assert mask.shape[1] == 1
return mask
def self_guided_fusion_module(self, flow, gyro_field_rsz, x1, x2_warp, layer):
fuse_flow = self.generate_fused_flow(flow, gyro_field_rsz)
mask = self.generate_map(self.conv_1x1[layer](x1), self.conv_1x1[layer](x2_warp))
flow = fuse_flow * mask + gyro_field_rsz * (1 - mask)
return flow
def normalize_features(self, feature_list, normalize, center, moments_across_channels=True, moments_across_images=True):
# Compute feature statistics.
statistics = collections.defaultdict(list)
axes = [1, 2, 3] if moments_across_channels else [2, 3] # [b, c, h, w]
for feature_image in feature_list:
mean = F.mean(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
variance = F.var(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
statistics['mean'].append(mean)
statistics['var'].append(variance)
if moments_across_images:
statistics['mean'] = ([F.mean(F.stack(statistics['mean'], axis=0), axis=(0, ))] * len(feature_list))
statistics['var'] = ([F.var(F.stack(statistics['var'], axis=0), axis=(0, ))] * len(feature_list))
statistics['std'] = [F.sqrt(v + 1e-16) for v in statistics['var']]
# Center and normalize features.
if center:
feature_list = [f - mean for f, mean in zip(feature_list, statistics['mean'])]
if normalize:
feature_list = [f / std for f, std in zip(feature_list, statistics['std'])]
return feature_list
def predict_flow(self, x1_pyrs, x2_pyrs, gyro_field=None):
flow_pyrs = []
batch_size, _, h_x1, w_x1 = x1_pyrs[0].shape
dtype = x1_pyrs[0].dtype
flow = F.zeros((batch_size, 2, h_x1, w_x1), dtype=dtype)
for layer, (x1, x2) in enumerate(zip(x1_pyrs, x2_pyrs)):
if layer == 0:
x2_warp = x2
else:
flow = self.upsampler(flow, self.conv_1x1[layer](x1), self.conv_1x1[layer](x2))
gyro_field_rsz = upsample2d_flow_as(gyro_field, flow, if_rate=True)
x2_warp = flow_warp(x2, gyro_field_rsz)
flow = self.self_guided_fusion_module(flow, gyro_field_rsz, x1, x2_warp, layer)
x2_warp = flow_warp(x2, flow)
# cost volume normalized
x1_normalized, x2_warp_normalized = self.normalize_features([x1, x2_warp],
normalize=True,
center=True,
moments_across_channels=False,
moments_across_images=False)
_cv = self.cost_volume(x1_normalized, x2_warp_normalized)
_cv_relu = self.leakyRELU(_cv)
x1 = self.conv_1x1[layer](x1)
_x_feat, flow_pred = self.flow_estimator(F.concat([_cv_relu, x1, flow], axis=1))
flow += flow_pred
flow_refine = self.context_net(F.concat([_x_feat, flow], axis=1))
flow += flow_refine
flow_pyrs.append(flow)
if layer == self.output_level:
break
if self.upsample:
flows = [ | F.vision.interpolate(flow * 4, scale_factor=4, mode='bilinear', align_corners=True) | megengine.functional.vision.interpolate |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = | nn.init.calculate_fan_in_and_fan_out(m.weight) | megengine.module.init.calculate_fan_in_and_fan_out |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
| nn.init.uniform_(m.bias, -bound, bound) | megengine.module.init.uniform_ |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def generate_fused_flow(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
flow = self.flow_predictor(input_feature)[1]
assert flow.shape[1] == 2
return flow
def generate_map(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
out = self.mask_predictor(input_feature)[1]
mask = F.sigmoid(out)
assert mask.shape[1] == 1
return mask
def self_guided_fusion_module(self, flow, gyro_field_rsz, x1, x2_warp, layer):
fuse_flow = self.generate_fused_flow(flow, gyro_field_rsz)
mask = self.generate_map(self.conv_1x1[layer](x1), self.conv_1x1[layer](x2_warp))
flow = fuse_flow * mask + gyro_field_rsz * (1 - mask)
return flow
def normalize_features(self, feature_list, normalize, center, moments_across_channels=True, moments_across_images=True):
# Compute feature statistics.
statistics = collections.defaultdict(list)
axes = [1, 2, 3] if moments_across_channels else [2, 3] # [b, c, h, w]
for feature_image in feature_list:
mean = F.mean(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
variance = F.var(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
statistics['mean'].append(mean)
statistics['var'].append(variance)
if moments_across_images:
statistics['mean'] = ([F.mean( | F.stack(statistics['mean'], axis=0) | megengine.functional.stack |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import math
import collections
import megengine.module as nn
import megengine.functional as F
from model.nn_upsample import NeuralUpsampler, FlowMaskEstimator
from common.utils import flow_warp, upsample2d_flow_as
def conv(inp, out, k=3, s=1, d=1, isReLU=True):
if isReLU:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True), nn.LeakyReLU(0.1))
else:
ret = nn.Sequential(nn.Conv2d(inp, out, k, s, padding=((k - 1) * d) // 2, dilation=d, bias=True))
return ret
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(conv(ch_in, 128, 3, 1, 1), conv(128, 128, 3, 1, 2), conv(128, 128, 3, 1, 4), conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16), conv(64, 32, 3, 1, 1), conv(32, 2, isReLU=False))
def forward(self, x):
return self.convs(x)
class FlowEstimator(nn.Module):
def __init__(self, ch_in):
super(FlowEstimator, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(96 + 128, 64)
self.conv5 = conv(96 + 64, 32)
# channels of the second last layer
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(F.concat([x1, x2], axis=1))
x4 = self.conv4(F.concat([x2, x3], axis=1))
x5 = self.conv5(F.concat([x3, x4], axis=1))
flow = self.predict_flow(F.concat([x4, x5], axis=1))
return x5, flow
class CostVolume(nn.Module):
def __init__(self, d=4, *args, **kwargs):
super(CostVolume, self).__init__()
self.d = d
self.out_dim = 2 * self.d + 1
self.pad_size = self.d
def forward(self, x1, x2):
_, _, H, W = x1.shape
x2 = F.nn.pad(x2, ((0, 0), (0, 0), (self.pad_size, self.pad_size), (self.pad_size, self.pad_size)))
cv = []
for i in range(self.out_dim):
for j in range(self.out_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = F.mean(cost, 1, keepdims=True)
cv.append(cost)
return F.concat(cv, 1)
class FeaturePyramidExtractor(nn.Module):
def __init__(self, pyr_chans):
super(FeaturePyramidExtractor, self).__init__()
self.pyr_chans = pyr_chans
self.convs = []
for _, (ch_in, ch_out) in enumerate(zip(pyr_chans[:-1], pyr_chans[1:])):
layer = nn.Sequential(conv(ch_in, ch_out, s=2), conv(ch_out, ch_out))
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class GyroFlow(nn.Module):
def __init__(self, params):
super(GyroFlow, self).__init__()
self.leakyRELU = nn.LeakyReLU(0.1)
self.upsample = params.upsample
self.with_bk = True
self.pyr_chans = [3, 16, 32, 64, 96, 128, 192]
self.feature_pyramid_extractor = FeaturePyramidExtractor(self.pyr_chans)
# correlation range
self.d = 4
self.output_level = 4
# cost volume
self.cost_volume = CostVolume(d=self.d)
self.cv_dim = (self.d * 2 + 1)**2
self.upsampler = NeuralUpsampler()
self.ch_inp = 32 + self.cv_dim + 2
self.flow_estimator = FlowEstimator(self.ch_inp)
self.context_net = ContextNetwork(self.flow_estimator.feat_dim + 2)
self.conv_1x1 = list([
conv(192, 32, k=1, s=1, d=1),
conv(128, 32, k=1, s=1, d=1),
conv(96, 32, k=1, s=1, d=1),
conv(64, 32, k=1, s=1, d=1),
conv(32, 32, k=1, s=1, d=1)
])
self.with_gyro_field = True
self.flow_predictor = FlowMaskEstimator(4, (8, 16, 32, 16, 8), 2)
self.mask_predictor = FlowMaskEstimator(64, (32, 32, 32, 16, 8), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
def generate_fused_flow(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
flow = self.flow_predictor(input_feature)[1]
assert flow.shape[1] == 2
return flow
def generate_map(self, x1, x2):
input_feature = F.concat((x1, x2), axis=1)
out = self.mask_predictor(input_feature)[1]
mask = F.sigmoid(out)
assert mask.shape[1] == 1
return mask
def self_guided_fusion_module(self, flow, gyro_field_rsz, x1, x2_warp, layer):
fuse_flow = self.generate_fused_flow(flow, gyro_field_rsz)
mask = self.generate_map(self.conv_1x1[layer](x1), self.conv_1x1[layer](x2_warp))
flow = fuse_flow * mask + gyro_field_rsz * (1 - mask)
return flow
def normalize_features(self, feature_list, normalize, center, moments_across_channels=True, moments_across_images=True):
# Compute feature statistics.
statistics = collections.defaultdict(list)
axes = [1, 2, 3] if moments_across_channels else [2, 3] # [b, c, h, w]
for feature_image in feature_list:
mean = F.mean(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
variance = F.var(feature_image, axis=axes, keepdims=True) # [b,1,1,1] or [b,c,1,1]
statistics['mean'].append(mean)
statistics['var'].append(variance)
if moments_across_images:
statistics['mean'] = ([F.mean(F.stack(statistics['mean'], axis=0), axis=(0, ))] * len(feature_list))
statistics['var'] = ([F.var( | F.stack(statistics['var'], axis=0) | megengine.functional.stack |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import os
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.models import build_model, load_model, sync_model
from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger
def default_parser() -> argparse.ArgumentParser:
"""Build args parser for training script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="training process description file")
parser.add_argument(
"--resume", action="store_true", help="resume training from saved checkpoint or not"
)
parser.add_argument(
"opts",
default=None,
help="Modify config options using the command-line",
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for training script.
Args:
args: args for training script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
sys.path.append(os.path.dirname(args.file))
module_name = os.path.splitext(os.path.basename(args.file))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
cfg.merge(args.opts)
cfg.resume = args.resume
if cfg.output_dir is None:
cfg.output_dir = f"./logs_{module_name}"
cfg.output_dir = os.path.abspath(cfg.output_dir)
cfg.set_mode("freeze")
if dist.get_rank() == 0 and not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
| dist.group_barrier() | megengine.distributed.group_barrier |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import os
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.models import build_model, load_model, sync_model
from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger
def default_parser() -> argparse.ArgumentParser:
"""Build args parser for training script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="training process description file")
parser.add_argument(
"--resume", action="store_true", help="resume training from saved checkpoint or not"
)
parser.add_argument(
"opts",
default=None,
help="Modify config options using the command-line",
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for training script.
Args:
args: args for training script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
sys.path.append(os.path.dirname(args.file))
module_name = os.path.splitext(os.path.basename(args.file))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
cfg.merge(args.opts)
cfg.resume = args.resume
if cfg.output_dir is None:
cfg.output_dir = f"./logs_{module_name}"
cfg.output_dir = os.path.abspath(cfg.output_dir)
cfg.set_mode("freeze")
if dist.get_rank() == 0 and not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
dist.group_barrier()
# FIXME: will hang in fork mode, however spawn mode meets other issues
# try:
# from clearml import Task
# if dist.get_rank() == 0:
# Task.current_task().connect_configuration(cfg)
# except Exception as e:
# logger.warning(e)
setup_logger(cfg.output_dir, "train_log.txt", to_loguru=True)
logger.info(f"args: {args}")
if cfg.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if cfg.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
trainer = build(cfg)
trainer.train()
def build(cfg: ConfigDict):
"""Build function for training script.
Args:
cfg: config for training.
Returns:
A trainer.
"""
model = build_model(cfg)
if getattr(cfg, "weights", None) is not None:
load_model(model, cfg.weights, strict=False)
sync_model(model)
model.train()
logger.info(f"Using augments named {cfg.augments.name}")
augments = registers.augments.get(cfg.augments.name).build(cfg)
logger.info(f"Using dataloader named {cfg.data.name}")
dataloader = registers.dataloaders.get(cfg.data.name).build(cfg, True, augments)
logger.info(f"Using solver named {cfg.solver.name}")
solver = registers.solvers.get(cfg.solver.name).build(cfg, model)
logger.info(f"Using hooks named {cfg.hooks_name}")
hooks = registers.hooks.get(cfg.hooks_name).build(cfg)
logger.info(f"Using trainer named {cfg.trainer_name}")
TrainerClass = registers.trainers.get(cfg.trainer_name)
return TrainerClass(cfg, model, dataloader, solver, hooks=hooks)
def main():
"""Main function for training script."""
parser = default_parser()
args = parser.parse_args()
set_nccl_env()
set_num_threads()
# FIXME: will hang in fork mode, however spawn mode meets other issues
# try:
# import getpass
# from clearml import Task
# task_name = f"{getpass.getuser()}-{os.path.splitext(os.path.basename(args.file))[0]}"
# task = Task.init(project_name="basecls", task_name=task_name) # noqa: F841
# except Exception as e:
# logger.warning(e)
device_count = | mge.device.get_device_count("gpu") | megengine.device.get_device_count |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import os
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.models import build_model, load_model, sync_model
from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger
def default_parser() -> argparse.ArgumentParser:
"""Build args parser for training script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="training process description file")
parser.add_argument(
"--resume", action="store_true", help="resume training from saved checkpoint or not"
)
parser.add_argument(
"opts",
default=None,
help="Modify config options using the command-line",
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for training script.
Args:
args: args for training script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
sys.path.append(os.path.dirname(args.file))
module_name = os.path.splitext(os.path.basename(args.file))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
cfg.merge(args.opts)
cfg.resume = args.resume
if cfg.output_dir is None:
cfg.output_dir = f"./logs_{module_name}"
cfg.output_dir = os.path.abspath(cfg.output_dir)
cfg.set_mode("freeze")
if dist.get_rank() == 0 and not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
dist.group_barrier()
# FIXME: will hang in fork mode, however spawn mode meets other issues
# try:
# from clearml import Task
# if dist.get_rank() == 0:
# Task.current_task().connect_configuration(cfg)
# except Exception as e:
# logger.warning(e)
setup_logger(cfg.output_dir, "train_log.txt", to_loguru=True)
logger.info(f"args: {args}")
if cfg.fastrun:
logger.info("Using fastrun mode...")
| mge.functional.debug_param.set_execution_strategy("PROFILE") | megengine.functional.debug_param.set_execution_strategy |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import os
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.models import build_model, load_model, sync_model
from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger
def default_parser() -> argparse.ArgumentParser:
"""Build args parser for training script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="training process description file")
parser.add_argument(
"--resume", action="store_true", help="resume training from saved checkpoint or not"
)
parser.add_argument(
"opts",
default=None,
help="Modify config options using the command-line",
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for training script.
Args:
args: args for training script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
sys.path.append(os.path.dirname(args.file))
module_name = os.path.splitext(os.path.basename(args.file))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
cfg.merge(args.opts)
cfg.resume = args.resume
if cfg.output_dir is None:
cfg.output_dir = f"./logs_{module_name}"
cfg.output_dir = os.path.abspath(cfg.output_dir)
cfg.set_mode("freeze")
if dist.get_rank() == 0 and not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
dist.group_barrier()
# FIXME: will hang in fork mode, however spawn mode meets other issues
# try:
# from clearml import Task
# if dist.get_rank() == 0:
# Task.current_task().connect_configuration(cfg)
# except Exception as e:
# logger.warning(e)
setup_logger(cfg.output_dir, "train_log.txt", to_loguru=True)
logger.info(f"args: {args}")
if cfg.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if cfg.dtr:
logger.info("Enabling DTR...")
| mge.dtr.enable() | megengine.dtr.enable |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import os
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.models import build_model, load_model, sync_model
from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger
def default_parser() -> argparse.ArgumentParser:
"""Build args parser for training script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="training process description file")
parser.add_argument(
"--resume", action="store_true", help="resume training from saved checkpoint or not"
)
parser.add_argument(
"opts",
default=None,
help="Modify config options using the command-line",
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for training script.
Args:
args: args for training script.
"""
logger.info(f"Init process group for gpu{dist.get_rank()} done")
sys.path.append(os.path.dirname(args.file))
module_name = os.path.splitext(os.path.basename(args.file))[0]
current_network = importlib.import_module(module_name)
cfg = current_network.Cfg()
cfg.merge(args.opts)
cfg.resume = args.resume
if cfg.output_dir is None:
cfg.output_dir = f"./logs_{module_name}"
cfg.output_dir = os.path.abspath(cfg.output_dir)
cfg.set_mode("freeze")
if | dist.get_rank() | megengine.distributed.get_rank |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import argparse
import importlib
import os
import sys
import megengine as mge
import megengine.distributed as dist
from basecore.config import ConfigDict
from loguru import logger
from basecls.models import build_model, load_model, sync_model
from basecls.utils import registers, set_nccl_env, set_num_threads, setup_logger
def default_parser() -> argparse.ArgumentParser:
"""Build args parser for training script.
Returns:
The args parser.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str, help="training process description file")
parser.add_argument(
"--resume", action="store_true", help="resume training from saved checkpoint or not"
)
parser.add_argument(
"opts",
default=None,
help="Modify config options using the command-line",
nargs=argparse.REMAINDER,
)
return parser
@logger.catch
def worker(args: argparse.Namespace):
"""Worker function for training script.
Args:
args: args for training script.
"""
logger.info(f"Init process group for gpu{ | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.module import BatchNorm2d
def test_frozen_bn():
nchannel = 3
m = | BatchNorm2d(nchannel, freeze=True) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.module import BatchNorm2d
def test_frozen_bn():
nchannel = 3
m = BatchNorm2d(nchannel, freeze=True)
saved_var = m.running_var.numpy()
saved_mean = m.running_mean.numpy()
saved_wt = m.weight.numpy()
saved_bias = m.bias.numpy()
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).mean()
gm.backward(loss)
optim.step()
np.testing.assert_equal(m.running_var.numpy(), saved_var)
np.testing.assert_equal(m.running_mean.numpy(), saved_mean)
np.testing.assert_equal(m.weight.numpy(), saved_wt)
np.testing.assert_equal(m.bias.numpy(), saved_bias)
np.testing.assert_almost_equal(loss.numpy(), data.mean(), 5)
def test_bn_no_track_stat():
nchannel = 3
m = | BatchNorm2d(nchannel, track_running_stats=False) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.module import BatchNorm2d
def test_frozen_bn():
nchannel = 3
m = BatchNorm2d(nchannel, freeze=True)
saved_var = m.running_var.numpy()
saved_mean = m.running_mean.numpy()
saved_wt = m.weight.numpy()
saved_bias = m.bias.numpy()
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).mean()
gm.backward(loss)
optim.step()
np.testing.assert_equal(m.running_var.numpy(), saved_var)
np.testing.assert_equal(m.running_mean.numpy(), saved_mean)
np.testing.assert_equal(m.weight.numpy(), saved_wt)
np.testing.assert_equal(m.bias.numpy(), saved_bias)
np.testing.assert_almost_equal(loss.numpy(), data.mean(), 5)
def test_bn_no_track_stat():
nchannel = 3
m = BatchNorm2d(nchannel, track_running_stats=False)
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).sum()
gm.backward(loss)
optim.step()
def test_bn_no_track_stat2():
nchannel = 3
m = | BatchNorm2d(nchannel) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.module import BatchNorm2d
def test_frozen_bn():
nchannel = 3
m = BatchNorm2d(nchannel, freeze=True)
saved_var = m.running_var.numpy()
saved_mean = m.running_mean.numpy()
saved_wt = m.weight.numpy()
saved_bias = m.bias.numpy()
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).mean()
gm.backward(loss)
optim.step()
np.testing.assert_equal(m.running_var.numpy(), saved_var)
np.testing.assert_equal(m.running_mean.numpy(), saved_mean)
np.testing.assert_equal(m.weight.numpy(), saved_wt)
np.testing.assert_equal(m.bias.numpy(), saved_bias)
np.testing.assert_almost_equal(loss.numpy(), data.mean(), 5)
def test_bn_no_track_stat():
nchannel = 3
m = BatchNorm2d(nchannel, track_running_stats=False)
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).sum()
gm.backward(loss)
optim.step()
def test_bn_no_track_stat2():
nchannel = 3
m = BatchNorm2d(nchannel) # Init with track_running_stat = True
m.track_running_stats = False
# m.running_var and m.running_mean created during init time
saved_var = m.running_var.numpy()
assert saved_var is not None
saved_mean = m.running_mean.numpy()
assert saved_mean is not None
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).sum()
gm.backward(loss)
optim.step()
np.testing.assert_equal(m.running_var.numpy(), saved_var)
np.testing.assert_equal(m.running_mean.numpy(), saved_mean)
def test_bn_no_track_stat3():
nchannel = 3
m = | BatchNorm2d(nchannel, track_running_stats=False) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.module import BatchNorm2d
def test_frozen_bn():
nchannel = 3
m = BatchNorm2d(nchannel, freeze=True)
saved_var = m.running_var.numpy()
saved_mean = m.running_mean.numpy()
saved_wt = m.weight.numpy()
saved_bias = m.bias.numpy()
gm = | ad.GradManager() | megengine.autodiff.GradManager |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.module import BatchNorm2d
def test_frozen_bn():
nchannel = 3
m = BatchNorm2d(nchannel, freeze=True)
saved_var = m.running_var.numpy()
saved_mean = m.running_mean.numpy()
saved_wt = m.weight.numpy()
saved_bias = m.bias.numpy()
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).mean()
gm.backward(loss)
optim.step()
np.testing.assert_equal(m.running_var.numpy(), saved_var)
np.testing.assert_equal(m.running_mean.numpy(), saved_mean)
np.testing.assert_equal(m.weight.numpy(), saved_wt)
np.testing.assert_equal(m.bias.numpy(), saved_bias)
np.testing.assert_almost_equal(loss.numpy(), data.mean(), 5)
def test_bn_no_track_stat():
nchannel = 3
m = BatchNorm2d(nchannel, track_running_stats=False)
gm = | ad.GradManager() | megengine.autodiff.GradManager |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.module import BatchNorm2d
def test_frozen_bn():
nchannel = 3
m = BatchNorm2d(nchannel, freeze=True)
saved_var = m.running_var.numpy()
saved_mean = m.running_mean.numpy()
saved_wt = m.weight.numpy()
saved_bias = m.bias.numpy()
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).mean()
gm.backward(loss)
optim.step()
np.testing.assert_equal(m.running_var.numpy(), saved_var)
np.testing.assert_equal(m.running_mean.numpy(), saved_mean)
np.testing.assert_equal(m.weight.numpy(), saved_wt)
np.testing.assert_equal(m.bias.numpy(), saved_bias)
np.testing.assert_almost_equal(loss.numpy(), data.mean(), 5)
def test_bn_no_track_stat():
nchannel = 3
m = BatchNorm2d(nchannel, track_running_stats=False)
gm = ad.GradManager().attach(m.parameters())
optim = optimizer.SGD(m.parameters(), lr=1.0)
optim.clear_grad()
data = np.random.random((6, nchannel, 2, 2)).astype("float32")
with gm:
loss = m(data).sum()
gm.backward(loss)
optim.step()
def test_bn_no_track_stat2():
nchannel = 3
m = BatchNorm2d(nchannel) # Init with track_running_stat = True
m.track_running_stats = False
# m.running_var and m.running_mean created during init time
saved_var = m.running_var.numpy()
assert saved_var is not None
saved_mean = m.running_mean.numpy()
assert saved_mean is not None
gm = | ad.GradManager() | megengine.autodiff.GradManager |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
| mge.functional.debug_param.set_execution_strategy("PROFILE") | megengine.functional.debug_param.set_execution_strategy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
| mge.dtr.enable() | megengine.dtr.enable |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = jit.trace(self.model_step, symbolic=True)
def benchmark(self, warm_iters=50, log_seconds=2):
total_iters = len(self.dataloader) - warm_iters
total_time = 0
for i, data in enumerate(self.dataloader, 1):
if i == warm_iters + 1:
total_time = 0
samples, targets = self.preprocess(data)
mge._full_sync()
t = time.perf_counter()
self.model_step(samples, targets)
mge._full_sync()
total_time += time.perf_counter() - t
if log_seconds > 0:
cnt = i - warm_iters if i > warm_iters else i
tot = total_iters if i > warm_iters else warm_iters
cycle = total_time / cnt
eta = (tot - cnt) * cycle
log_every_n_seconds(
"{} process {}/{}, average speed:{:0.3f}ms/iters. ETA:{}".format(
"Benchmark" if i > warm_iters else "Warmup",
cnt,
tot,
cycle * 1000,
datetime.timedelta(seconds=int(eta)),
),
n=log_seconds,
)
avg_speed_ms = total_time / total_iters * 1000
logger.info(
"Benchmark total time:{}, average speed:{:0.3f}ms/iters.".format(
datetime.timedelta(seconds=int(total_time)), avg_speed_ms
)
)
return avg_speed_ms
def model_step(self, samples, targets):
raise NotImplementedError
class TrainBench(ClsBench):
def __init__(self, model, dataloader, trace: bool = False, amp_version: int = 0):
model.train()
super().__init__(model, dataloader, trace)
self.opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
self.gm = | autodiff.GradManager() | megengine.autodiff.GradManager |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
| dist.launcher(worker, n_gpus=args.world_size) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = | jit.trace(self.model_step, symbolic=True) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = jit.trace(self.model_step, symbolic=True)
def benchmark(self, warm_iters=50, log_seconds=2):
total_iters = len(self.dataloader) - warm_iters
total_time = 0
for i, data in enumerate(self.dataloader, 1):
if i == warm_iters + 1:
total_time = 0
samples, targets = self.preprocess(data)
| mge._full_sync() | megengine._full_sync |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = jit.trace(self.model_step, symbolic=True)
def benchmark(self, warm_iters=50, log_seconds=2):
total_iters = len(self.dataloader) - warm_iters
total_time = 0
for i, data in enumerate(self.dataloader, 1):
if i == warm_iters + 1:
total_time = 0
samples, targets = self.preprocess(data)
mge._full_sync()
t = time.perf_counter()
self.model_step(samples, targets)
| mge._full_sync() | megengine._full_sync |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = jit.trace(self.model_step, symbolic=True)
def benchmark(self, warm_iters=50, log_seconds=2):
total_iters = len(self.dataloader) - warm_iters
total_time = 0
for i, data in enumerate(self.dataloader, 1):
if i == warm_iters + 1:
total_time = 0
samples, targets = self.preprocess(data)
mge._full_sync()
t = time.perf_counter()
self.model_step(samples, targets)
mge._full_sync()
total_time += time.perf_counter() - t
if log_seconds > 0:
cnt = i - warm_iters if i > warm_iters else i
tot = total_iters if i > warm_iters else warm_iters
cycle = total_time / cnt
eta = (tot - cnt) * cycle
log_every_n_seconds(
"{} process {}/{}, average speed:{:0.3f}ms/iters. ETA:{}".format(
"Benchmark" if i > warm_iters else "Warmup",
cnt,
tot,
cycle * 1000,
datetime.timedelta(seconds=int(eta)),
),
n=log_seconds,
)
avg_speed_ms = total_time / total_iters * 1000
logger.info(
"Benchmark total time:{}, average speed:{:0.3f}ms/iters.".format(
datetime.timedelta(seconds=int(total_time)), avg_speed_ms
)
)
return avg_speed_ms
def model_step(self, samples, targets):
raise NotImplementedError
class TrainBench(ClsBench):
def __init__(self, model, dataloader, trace: bool = False, amp_version: int = 0):
model.train()
super().__init__(model, dataloader, trace)
self.opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
self.gm = autodiff.GradManager()
callbacks = (
[dist.make_allreduce_cb("mean", dist.WORLD)] if dist.get_world_size() > 1 else None
)
self.gm.attach(model.parameters(), callbacks=callbacks)
self.amp_version = amp_version
self.scaler = (
| amp.GradScaler(init_scale=65536.0, growth_interval=2000) | megengine.amp.GradScaler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = jit.trace(self.model_step, symbolic=True)
def benchmark(self, warm_iters=50, log_seconds=2):
total_iters = len(self.dataloader) - warm_iters
total_time = 0
for i, data in enumerate(self.dataloader, 1):
if i == warm_iters + 1:
total_time = 0
samples, targets = self.preprocess(data)
mge._full_sync()
t = time.perf_counter()
self.model_step(samples, targets)
mge._full_sync()
total_time += time.perf_counter() - t
if log_seconds > 0:
cnt = i - warm_iters if i > warm_iters else i
tot = total_iters if i > warm_iters else warm_iters
cycle = total_time / cnt
eta = (tot - cnt) * cycle
log_every_n_seconds(
"{} process {}/{}, average speed:{:0.3f}ms/iters. ETA:{}".format(
"Benchmark" if i > warm_iters else "Warmup",
cnt,
tot,
cycle * 1000,
datetime.timedelta(seconds=int(eta)),
),
n=log_seconds,
)
avg_speed_ms = total_time / total_iters * 1000
logger.info(
"Benchmark total time:{}, average speed:{:0.3f}ms/iters.".format(
datetime.timedelta(seconds=int(total_time)), avg_speed_ms
)
)
return avg_speed_ms
def model_step(self, samples, targets):
raise NotImplementedError
class TrainBench(ClsBench):
def __init__(self, model, dataloader, trace: bool = False, amp_version: int = 0):
model.train()
super().__init__(model, dataloader, trace)
self.opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
self.gm = autodiff.GradManager()
callbacks = (
[dist.make_allreduce_cb("mean", dist.WORLD)] if dist.get_world_size() > 1 else None
)
self.gm.attach(model.parameters(), callbacks=callbacks)
self.amp_version = amp_version
self.scaler = (
amp.GradScaler(init_scale=65536.0, growth_interval=2000)
if amp_version == 2
else | amp.GradScaler(init_scale=128.0, growth_interval=0) | megengine.amp.GradScaler |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = jit.trace(self.model_step, symbolic=True)
def benchmark(self, warm_iters=50, log_seconds=2):
total_iters = len(self.dataloader) - warm_iters
total_time = 0
for i, data in enumerate(self.dataloader, 1):
if i == warm_iters + 1:
total_time = 0
samples, targets = self.preprocess(data)
mge._full_sync()
t = time.perf_counter()
self.model_step(samples, targets)
mge._full_sync()
total_time += time.perf_counter() - t
if log_seconds > 0:
cnt = i - warm_iters if i > warm_iters else i
tot = total_iters if i > warm_iters else warm_iters
cycle = total_time / cnt
eta = (tot - cnt) * cycle
log_every_n_seconds(
"{} process {}/{}, average speed:{:0.3f}ms/iters. ETA:{}".format(
"Benchmark" if i > warm_iters else "Warmup",
cnt,
tot,
cycle * 1000,
datetime.timedelta(seconds=int(eta)),
),
n=log_seconds,
)
avg_speed_ms = total_time / total_iters * 1000
logger.info(
"Benchmark total time:{}, average speed:{:0.3f}ms/iters.".format(
datetime.timedelta(seconds=int(total_time)), avg_speed_ms
)
)
return avg_speed_ms
def model_step(self, samples, targets):
raise NotImplementedError
class TrainBench(ClsBench):
def __init__(self, model, dataloader, trace: bool = False, amp_version: int = 0):
model.train()
super().__init__(model, dataloader, trace)
self.opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
self.gm = autodiff.GradManager()
callbacks = (
[dist.make_allreduce_cb("mean", dist.WORLD)] if dist.get_world_size() > 1 else None
)
self.gm.attach(model.parameters(), callbacks=callbacks)
self.amp_version = amp_version
self.scaler = (
amp.GradScaler(init_scale=65536.0, growth_interval=2000)
if amp_version == 2
else amp.GradScaler(init_scale=128.0, growth_interval=0)
)
def model_step(self, samples, targets):
with self.gm:
with amp.autocast(enabled=self.amp_version > 0):
pred = self.model(samples)
loss = F.loss.cross_entropy(pred, targets)
if self.amp_version > 0:
self.scaler.backward(self.gm, loss, update_scale=False)
self.scaler.update()
else:
self.gm.backward(loss)
self.opt.step().clear_grad()
class EvalBench(ClsBench):
def __init__(self, model, dataloader, trace: bool = False, amp_version: int = 0):
model.eval()
super().__init__(model, dataloader, trace)
self.amp_version = amp_version
def model_step(self, samples, targets):
with | amp.autocast(enabled=self.amp_version > 0) | megengine.amp.autocast |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{ | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = jit.trace(self.model_step, symbolic=True)
def benchmark(self, warm_iters=50, log_seconds=2):
total_iters = len(self.dataloader) - warm_iters
total_time = 0
for i, data in enumerate(self.dataloader, 1):
if i == warm_iters + 1:
total_time = 0
samples, targets = self.preprocess(data)
mge._full_sync()
t = time.perf_counter()
self.model_step(samples, targets)
mge._full_sync()
total_time += time.perf_counter() - t
if log_seconds > 0:
cnt = i - warm_iters if i > warm_iters else i
tot = total_iters if i > warm_iters else warm_iters
cycle = total_time / cnt
eta = (tot - cnt) * cycle
log_every_n_seconds(
"{} process {}/{}, average speed:{:0.3f}ms/iters. ETA:{}".format(
"Benchmark" if i > warm_iters else "Warmup",
cnt,
tot,
cycle * 1000,
datetime.timedelta(seconds=int(eta)),
),
n=log_seconds,
)
avg_speed_ms = total_time / total_iters * 1000
logger.info(
"Benchmark total time:{}, average speed:{:0.3f}ms/iters.".format(
datetime.timedelta(seconds=int(total_time)), avg_speed_ms
)
)
return avg_speed_ms
def model_step(self, samples, targets):
raise NotImplementedError
class TrainBench(ClsBench):
def __init__(self, model, dataloader, trace: bool = False, amp_version: int = 0):
model.train()
super().__init__(model, dataloader, trace)
self.opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
self.gm = autodiff.GradManager()
callbacks = (
[dist.make_allreduce_cb("mean", dist.WORLD)] if | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = jit.trace(self.model_step, symbolic=True)
def benchmark(self, warm_iters=50, log_seconds=2):
total_iters = len(self.dataloader) - warm_iters
total_time = 0
for i, data in enumerate(self.dataloader, 1):
if i == warm_iters + 1:
total_time = 0
samples, targets = self.preprocess(data)
mge._full_sync()
t = time.perf_counter()
self.model_step(samples, targets)
mge._full_sync()
total_time += time.perf_counter() - t
if log_seconds > 0:
cnt = i - warm_iters if i > warm_iters else i
tot = total_iters if i > warm_iters else warm_iters
cycle = total_time / cnt
eta = (tot - cnt) * cycle
log_every_n_seconds(
"{} process {}/{}, average speed:{:0.3f}ms/iters. ETA:{}".format(
"Benchmark" if i > warm_iters else "Warmup",
cnt,
tot,
cycle * 1000,
datetime.timedelta(seconds=int(eta)),
),
n=log_seconds,
)
avg_speed_ms = total_time / total_iters * 1000
logger.info(
"Benchmark total time:{}, average speed:{:0.3f}ms/iters.".format(
datetime.timedelta(seconds=int(total_time)), avg_speed_ms
)
)
return avg_speed_ms
def model_step(self, samples, targets):
raise NotImplementedError
class TrainBench(ClsBench):
def __init__(self, model, dataloader, trace: bool = False, amp_version: int = 0):
model.train()
super().__init__(model, dataloader, trace)
self.opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
self.gm = autodiff.GradManager()
callbacks = (
[ | dist.make_allreduce_cb("mean", dist.WORLD) | megengine.distributed.make_allreduce_cb |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = jit.trace(self.model_step, symbolic=True)
def benchmark(self, warm_iters=50, log_seconds=2):
total_iters = len(self.dataloader) - warm_iters
total_time = 0
for i, data in enumerate(self.dataloader, 1):
if i == warm_iters + 1:
total_time = 0
samples, targets = self.preprocess(data)
mge._full_sync()
t = time.perf_counter()
self.model_step(samples, targets)
mge._full_sync()
total_time += time.perf_counter() - t
if log_seconds > 0:
cnt = i - warm_iters if i > warm_iters else i
tot = total_iters if i > warm_iters else warm_iters
cycle = total_time / cnt
eta = (tot - cnt) * cycle
log_every_n_seconds(
"{} process {}/{}, average speed:{:0.3f}ms/iters. ETA:{}".format(
"Benchmark" if i > warm_iters else "Warmup",
cnt,
tot,
cycle * 1000,
datetime.timedelta(seconds=int(eta)),
),
n=log_seconds,
)
avg_speed_ms = total_time / total_iters * 1000
logger.info(
"Benchmark total time:{}, average speed:{:0.3f}ms/iters.".format(
datetime.timedelta(seconds=int(total_time)), avg_speed_ms
)
)
return avg_speed_ms
def model_step(self, samples, targets):
raise NotImplementedError
class TrainBench(ClsBench):
def __init__(self, model, dataloader, trace: bool = False, amp_version: int = 0):
model.train()
super().__init__(model, dataloader, trace)
self.opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
self.gm = autodiff.GradManager()
callbacks = (
[dist.make_allreduce_cb("mean", dist.WORLD)] if dist.get_world_size() > 1 else None
)
self.gm.attach(model.parameters(), callbacks=callbacks)
self.amp_version = amp_version
self.scaler = (
amp.GradScaler(init_scale=65536.0, growth_interval=2000)
if amp_version == 2
else amp.GradScaler(init_scale=128.0, growth_interval=0)
)
def model_step(self, samples, targets):
with self.gm:
with | amp.autocast(enabled=self.amp_version > 0) | megengine.amp.autocast |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import argparse
import datetime
import multiprocessing as mp
import time
import megengine as mge
import megengine.amp as amp
import megengine.autodiff as autodiff
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
from basecore.utils import log_every_n_seconds
from loguru import logger
from basecls.data.fake_data import FakeDataLoader
from basecls.layers import Preprocess
from basecls.utils import registers, set_nccl_env, set_num_threads
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", default="resnet50", type=str)
parser.add_argument("--mode", default="eval", type=str)
parser.add_argument("-d", "--device", default="gpu", type=str)
parser.add_argument("--amp", default=0, type=int)
parser.add_argument("--fastrun", action="store_true")
parser.add_argument("--trace", action="store_true")
parser.add_argument("--dtr", action="store_true")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--channel", default=3, type=int)
parser.add_argument("--height", default=224, type=int)
parser.add_argument("--width", default=224, type=int)
parser.add_argument("-n", "--world-size", default=8, type=int)
parser.add_argument("--warm-iters", default=50, type=int)
parser.add_argument("-t", "--total-iters", default=200, type=int)
parser.add_argument("--log-seconds", default=2, type=int)
args = parser.parse_args()
mp.set_start_method("spawn")
set_nccl_env()
set_num_threads()
if args.world_size == 1:
worker(args)
else:
dist.launcher(worker, n_gpus=args.world_size)(args)
@logger.catch
def worker(args: argparse.Namespace):
if dist.get_rank() != 0:
logger.remove()
logger.info(f"args: {args}")
if args.fastrun:
logger.info("Using fastrun mode...")
mge.functional.debug_param.set_execution_strategy("PROFILE")
if args.dtr:
logger.info("Enabling DTR...")
mge.dtr.enable()
mge.set_default_device(f"{args.device}{dist.get_rank()}")
model = registers.models.get(args.model)(head=dict(w_out=1000))
dataloader = FakeDataLoader(
args.batch_size,
(args.height, args.width),
args.channel,
length=args.warm_iters + args.total_iters,
num_classes=1000,
)
if args.mode == "train":
BenchCls = TrainBench
elif args.mode == "eval":
BenchCls = EvalBench
else:
raise NotImplementedError(f"Benchmark mode '{args.mode}' not supported")
bench = BenchCls(model, dataloader, args.trace, args.amp)
bench.benchmark(args.warm_iters, args.log_seconds)
class ClsBench:
def __init__(self, model, dataloader, trace: bool = False):
self.model = model
self.dataloader = dataloader
self.preprocess = Preprocess(mean=127, std=128)
if trace:
self.model_step = jit.trace(self.model_step, symbolic=True)
def benchmark(self, warm_iters=50, log_seconds=2):
total_iters = len(self.dataloader) - warm_iters
total_time = 0
for i, data in enumerate(self.dataloader, 1):
if i == warm_iters + 1:
total_time = 0
samples, targets = self.preprocess(data)
mge._full_sync()
t = time.perf_counter()
self.model_step(samples, targets)
mge._full_sync()
total_time += time.perf_counter() - t
if log_seconds > 0:
cnt = i - warm_iters if i > warm_iters else i
tot = total_iters if i > warm_iters else warm_iters
cycle = total_time / cnt
eta = (tot - cnt) * cycle
log_every_n_seconds(
"{} process {}/{}, average speed:{:0.3f}ms/iters. ETA:{}".format(
"Benchmark" if i > warm_iters else "Warmup",
cnt,
tot,
cycle * 1000,
datetime.timedelta(seconds=int(eta)),
),
n=log_seconds,
)
avg_speed_ms = total_time / total_iters * 1000
logger.info(
"Benchmark total time:{}, average speed:{:0.3f}ms/iters.".format(
datetime.timedelta(seconds=int(total_time)), avg_speed_ms
)
)
return avg_speed_ms
def model_step(self, samples, targets):
raise NotImplementedError
class TrainBench(ClsBench):
def __init__(self, model, dataloader, trace: bool = False, amp_version: int = 0):
model.train()
super().__init__(model, dataloader, trace)
self.opt = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
self.gm = autodiff.GradManager()
callbacks = (
[dist.make_allreduce_cb("mean", dist.WORLD)] if dist.get_world_size() > 1 else None
)
self.gm.attach(model.parameters(), callbacks=callbacks)
self.amp_version = amp_version
self.scaler = (
amp.GradScaler(init_scale=65536.0, growth_interval=2000)
if amp_version == 2
else amp.GradScaler(init_scale=128.0, growth_interval=0)
)
def model_step(self, samples, targets):
with self.gm:
with amp.autocast(enabled=self.amp_version > 0):
pred = self.model(samples)
loss = | F.loss.cross_entropy(pred, targets) | megengine.functional.loss.cross_entropy |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import List, Tuple
import numpy as np
import megengine._internal as mgb
import megengine.functional as F
from megengine import Graph, jit
from megengine.module import Linear, Module
from megengine.test import assertTensorClose
from .env import modified_environ
class MLP(Module):
def __init__(self):
super().__init__()
self.dense0 = | Linear(28, 50) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import List, Tuple
import numpy as np
import megengine._internal as mgb
import megengine.functional as F
from megengine import Graph, jit
from megengine.module import Linear, Module
from megengine.test import assertTensorClose
from .env import modified_environ
class MLP(Module):
def __init__(self):
super().__init__()
self.dense0 = Linear(28, 50)
self.dense1 = | Linear(50, 20) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import List, Tuple
import numpy as np
import megengine._internal as mgb
import megengine.functional as F
from megengine import Graph, jit
from megengine.module import Linear, Module
from megengine.test import assertTensorClose
from .env import modified_environ
class MLP(Module):
def __init__(self):
super().__init__()
self.dense0 = Linear(28, 50)
self.dense1 = Linear(50, 20)
def forward(self, x):
x = self.dense0(x)
x = | F.relu(x) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import List, Tuple
import numpy as np
import megengine._internal as mgb
import megengine.functional as F
from megengine import Graph, jit
from megengine.module import Linear, Module
from megengine.test import assertTensorClose
from .env import modified_environ
class MLP(Module):
def __init__(self):
super().__init__()
self.dense0 = Linear(28, 50)
self.dense1 = Linear(50, 20)
def forward(self, x):
x = self.dense0(x)
x = F.relu(x)
x = self.dense1(x)
return x
def has_gpu(num=1):
try:
mgb.comp_node("gpu{}".format(num - 1))
except mgb.MegBrainError:
return False
return True
def randomNp(*args):
for arg in args:
assert isinstance(arg, int)
return np.random.random(args)
def randomTorch(*args):
import torch # pylint: disable=import-outside-toplevel
for arg in args:
assert isinstance(arg, int)
return torch.tensor(randomNp(*args), dtype=torch.float32)
def graph_mode(*modes):
if not set(modes).issubset({"eager", "static"}):
raise ValueError("graph mode must be in (eager, static)")
def decorator(func):
def wrapper(*args, **kwargs):
if "eager" in set(modes):
func(*args, **kwargs)
if "static" in set(modes):
with Graph() as cg:
cg.set_option("eager_evaluation", False)
func(*args, **kwargs)
return wrapper
return decorator
def _default_compare_fn(x, y):
assertTensorClose(x.numpy(), y)
def opr_test(
cases,
func,
mode=("eager", "static", "dynamic_shape"),
compare_fn=_default_compare_fn,
ref_fn=None,
**kwargs
):
"""
mode: the list of test mode which are eager, static and dynamic_shape
will test all the cases if None.
func: the function to run opr.
compare_fn: the function to compare the result and expected, use assertTensorClose if None.
ref_fn: the function to generate expected data, should assign output if None.
cases: the list which have dict element, the list length should be 2 for dynamic shape test.
and the dict should have input,
and should have output if ref_fn is None.
should use list for multiple inputs and outputs for each case.
kwargs: The additional kwargs for opr func.
simple examples:
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases,
F.eye,
ref_fn=lambda n, m: np.eye(n, m).astype(dtype),
dtype=dtype)
"""
def check_results(results, expected):
if not isinstance(results, Tuple):
results = (results,)
for r, e in zip(results, expected):
compare_fn(r, e)
def get_trace_fn(func, enabled, symbolic):
jit.trace.enabled = enabled
return | jit.trace(func, symbolic=symbolic) | megengine.jit.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import List, Tuple
import numpy as np
import megengine._internal as mgb
import megengine.functional as F
from megengine import Graph, jit
from megengine.module import Linear, Module
from megengine.test import assertTensorClose
from .env import modified_environ
class MLP(Module):
def __init__(self):
super().__init__()
self.dense0 = Linear(28, 50)
self.dense1 = Linear(50, 20)
def forward(self, x):
x = self.dense0(x)
x = F.relu(x)
x = self.dense1(x)
return x
def has_gpu(num=1):
try:
mgb.comp_node("gpu{}".format(num - 1))
except mgb.MegBrainError:
return False
return True
def randomNp(*args):
for arg in args:
assert isinstance(arg, int)
return np.random.random(args)
def randomTorch(*args):
import torch # pylint: disable=import-outside-toplevel
for arg in args:
assert isinstance(arg, int)
return torch.tensor(randomNp(*args), dtype=torch.float32)
def graph_mode(*modes):
if not set(modes).issubset({"eager", "static"}):
raise ValueError("graph mode must be in (eager, static)")
def decorator(func):
def wrapper(*args, **kwargs):
if "eager" in set(modes):
func(*args, **kwargs)
if "static" in set(modes):
with | Graph() | megengine.Graph |
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Open-MMLab.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import megengine.functional as F
import numpy as np
from megengine.core import tensor, Tensor
class BaseAnchorGenerator(metaclass=ABCMeta):
"""base class for anchor generator.
"""
def __init__(self):
pass
@abstractmethod
def get_anchors_by_feature(self) -> Tensor:
pass
class DefaultAnchorGenerator(BaseAnchorGenerator):
"""default retinanet anchor generator.
This class generate anchors by feature map in level.
Args:
base_size (int): anchor base size.
anchor_scales (np.ndarray): anchor scales based on stride.
The practical anchor scale is anchor_scale * stride
anchor_ratios(np.ndarray): anchor aspect ratios.
offset (float): center point offset.default is 0.
"""
def __init__(
self,
base_size=8,
anchor_scales: np.ndarray = np.array([2, 3, 4]),
anchor_ratios: np.ndarray = np.array([0.5, 1, 2]),
offset: float = 0,
):
super().__init__()
self.base_size = base_size
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.offset = offset
def _whctrs(self, anchor):
"""convert anchor box into (w, h, ctr_x, ctr_y)
"""
w = anchor[:, 2] - anchor[:, 0] + 1
h = anchor[:, 3] - anchor[:, 1] + 1
x_ctr = anchor[:, 0] + 0.5 * (w - 1)
y_ctr = anchor[:, 1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def get_plane_anchors(self, anchor_scales: np.ndarray):
"""get anchors per location on feature map.
The anchor number is anchor_scales x anchor_ratios
"""
base_anchor = | tensor([0, 0, self.base_size - 1, self.base_size - 1]) | megengine.core.tensor |
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Open-MMLab.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import megengine.functional as F
import numpy as np
from megengine.core import tensor, Tensor
class BaseAnchorGenerator(metaclass=ABCMeta):
"""base class for anchor generator.
"""
def __init__(self):
pass
@abstractmethod
def get_anchors_by_feature(self) -> Tensor:
pass
class DefaultAnchorGenerator(BaseAnchorGenerator):
"""default retinanet anchor generator.
This class generate anchors by feature map in level.
Args:
base_size (int): anchor base size.
anchor_scales (np.ndarray): anchor scales based on stride.
The practical anchor scale is anchor_scale * stride
anchor_ratios(np.ndarray): anchor aspect ratios.
offset (float): center point offset.default is 0.
"""
def __init__(
self,
base_size=8,
anchor_scales: np.ndarray = np.array([2, 3, 4]),
anchor_ratios: np.ndarray = np.array([0.5, 1, 2]),
offset: float = 0,
):
super().__init__()
self.base_size = base_size
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.offset = offset
def _whctrs(self, anchor):
"""convert anchor box into (w, h, ctr_x, ctr_y)
"""
w = anchor[:, 2] - anchor[:, 0] + 1
h = anchor[:, 3] - anchor[:, 1] + 1
x_ctr = anchor[:, 0] + 0.5 * (w - 1)
y_ctr = anchor[:, 1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def get_plane_anchors(self, anchor_scales: np.ndarray):
"""get anchors per location on feature map.
The anchor number is anchor_scales x anchor_ratios
"""
base_anchor = tensor([0, 0, self.base_size - 1, self.base_size - 1])
base_anchor = | F.add_axis(base_anchor, 0) | megengine.functional.add_axis |
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Open-MMLab.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import megengine.functional as F
import numpy as np
from megengine.core import tensor, Tensor
class BaseAnchorGenerator(metaclass=ABCMeta):
"""base class for anchor generator.
"""
def __init__(self):
pass
@abstractmethod
def get_anchors_by_feature(self) -> Tensor:
pass
class DefaultAnchorGenerator(BaseAnchorGenerator):
"""default retinanet anchor generator.
This class generate anchors by feature map in level.
Args:
base_size (int): anchor base size.
anchor_scales (np.ndarray): anchor scales based on stride.
The practical anchor scale is anchor_scale * stride
anchor_ratios(np.ndarray): anchor aspect ratios.
offset (float): center point offset.default is 0.
"""
def __init__(
self,
base_size=8,
anchor_scales: np.ndarray = np.array([2, 3, 4]),
anchor_ratios: np.ndarray = np.array([0.5, 1, 2]),
offset: float = 0,
):
super().__init__()
self.base_size = base_size
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.offset = offset
def _whctrs(self, anchor):
"""convert anchor box into (w, h, ctr_x, ctr_y)
"""
w = anchor[:, 2] - anchor[:, 0] + 1
h = anchor[:, 3] - anchor[:, 1] + 1
x_ctr = anchor[:, 0] + 0.5 * (w - 1)
y_ctr = anchor[:, 1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def get_plane_anchors(self, anchor_scales: np.ndarray):
"""get anchors per location on feature map.
The anchor number is anchor_scales x anchor_ratios
"""
base_anchor = tensor([0, 0, self.base_size - 1, self.base_size - 1])
base_anchor = F.add_axis(base_anchor, 0)
w, h, x_ctr, y_ctr = self._whctrs(base_anchor)
# ratio enumerate
size = w * h
size_ratios = size / self.anchor_ratios
ws = size_ratios.sqrt().round()
hs = (ws * self.anchor_ratios).round()
# scale enumerate
anchor_scales = anchor_scales[None, ...]
ws = | F.add_axis(ws, 1) | megengine.functional.add_axis |
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Open-MMLab.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import megengine.functional as F
import numpy as np
from megengine.core import tensor, Tensor
class BaseAnchorGenerator(metaclass=ABCMeta):
"""base class for anchor generator.
"""
def __init__(self):
pass
@abstractmethod
def get_anchors_by_feature(self) -> Tensor:
pass
class DefaultAnchorGenerator(BaseAnchorGenerator):
"""default retinanet anchor generator.
This class generate anchors by feature map in level.
Args:
base_size (int): anchor base size.
anchor_scales (np.ndarray): anchor scales based on stride.
The practical anchor scale is anchor_scale * stride
anchor_ratios(np.ndarray): anchor aspect ratios.
offset (float): center point offset.default is 0.
"""
def __init__(
self,
base_size=8,
anchor_scales: np.ndarray = np.array([2, 3, 4]),
anchor_ratios: np.ndarray = np.array([0.5, 1, 2]),
offset: float = 0,
):
super().__init__()
self.base_size = base_size
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.offset = offset
def _whctrs(self, anchor):
"""convert anchor box into (w, h, ctr_x, ctr_y)
"""
w = anchor[:, 2] - anchor[:, 0] + 1
h = anchor[:, 3] - anchor[:, 1] + 1
x_ctr = anchor[:, 0] + 0.5 * (w - 1)
y_ctr = anchor[:, 1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def get_plane_anchors(self, anchor_scales: np.ndarray):
"""get anchors per location on feature map.
The anchor number is anchor_scales x anchor_ratios
"""
base_anchor = tensor([0, 0, self.base_size - 1, self.base_size - 1])
base_anchor = F.add_axis(base_anchor, 0)
w, h, x_ctr, y_ctr = self._whctrs(base_anchor)
# ratio enumerate
size = w * h
size_ratios = size / self.anchor_ratios
ws = size_ratios.sqrt().round()
hs = (ws * self.anchor_ratios).round()
# scale enumerate
anchor_scales = anchor_scales[None, ...]
ws = F.add_axis(ws, 1)
hs = | F.add_axis(hs, 1) | megengine.functional.add_axis |
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Open-MMLab.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import megengine.functional as F
import numpy as np
from megengine.core import tensor, Tensor
class BaseAnchorGenerator(metaclass=ABCMeta):
"""base class for anchor generator.
"""
def __init__(self):
pass
@abstractmethod
def get_anchors_by_feature(self) -> Tensor:
pass
class DefaultAnchorGenerator(BaseAnchorGenerator):
"""default retinanet anchor generator.
This class generate anchors by feature map in level.
Args:
base_size (int): anchor base size.
anchor_scales (np.ndarray): anchor scales based on stride.
The practical anchor scale is anchor_scale * stride
anchor_ratios(np.ndarray): anchor aspect ratios.
offset (float): center point offset.default is 0.
"""
def __init__(
self,
base_size=8,
anchor_scales: np.ndarray = np.array([2, 3, 4]),
anchor_ratios: np.ndarray = np.array([0.5, 1, 2]),
offset: float = 0,
):
super().__init__()
self.base_size = base_size
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.offset = offset
def _whctrs(self, anchor):
"""convert anchor box into (w, h, ctr_x, ctr_y)
"""
w = anchor[:, 2] - anchor[:, 0] + 1
h = anchor[:, 3] - anchor[:, 1] + 1
x_ctr = anchor[:, 0] + 0.5 * (w - 1)
y_ctr = anchor[:, 1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def get_plane_anchors(self, anchor_scales: np.ndarray):
"""get anchors per location on feature map.
The anchor number is anchor_scales x anchor_ratios
"""
base_anchor = tensor([0, 0, self.base_size - 1, self.base_size - 1])
base_anchor = F.add_axis(base_anchor, 0)
w, h, x_ctr, y_ctr = self._whctrs(base_anchor)
# ratio enumerate
size = w * h
size_ratios = size / self.anchor_ratios
ws = size_ratios.sqrt().round()
hs = (ws * self.anchor_ratios).round()
# scale enumerate
anchor_scales = anchor_scales[None, ...]
ws = F.add_axis(ws, 1)
hs = F.add_axis(hs, 1)
ws = (ws * anchor_scales).reshape(-1, 1)
hs = (hs * anchor_scales).reshape(-1, 1)
anchors = F.concat(
[
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1),
],
axis=1,
)
return anchors.astype(np.float32)
def get_center_offsets(self, featmap, stride):
f_shp = featmap.shape
fm_height, fm_width = f_shp[-2], f_shp[-1]
shift_x = | F.linspace(0, fm_width - 1, fm_width) | megengine.functional.linspace |
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Open-MMLab.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import megengine.functional as F
import numpy as np
from megengine.core import tensor, Tensor
class BaseAnchorGenerator(metaclass=ABCMeta):
"""base class for anchor generator.
"""
def __init__(self):
pass
@abstractmethod
def get_anchors_by_feature(self) -> Tensor:
pass
class DefaultAnchorGenerator(BaseAnchorGenerator):
"""default retinanet anchor generator.
This class generate anchors by feature map in level.
Args:
base_size (int): anchor base size.
anchor_scales (np.ndarray): anchor scales based on stride.
The practical anchor scale is anchor_scale * stride
anchor_ratios(np.ndarray): anchor aspect ratios.
offset (float): center point offset.default is 0.
"""
def __init__(
self,
base_size=8,
anchor_scales: np.ndarray = np.array([2, 3, 4]),
anchor_ratios: np.ndarray = np.array([0.5, 1, 2]),
offset: float = 0,
):
super().__init__()
self.base_size = base_size
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.offset = offset
def _whctrs(self, anchor):
"""convert anchor box into (w, h, ctr_x, ctr_y)
"""
w = anchor[:, 2] - anchor[:, 0] + 1
h = anchor[:, 3] - anchor[:, 1] + 1
x_ctr = anchor[:, 0] + 0.5 * (w - 1)
y_ctr = anchor[:, 1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def get_plane_anchors(self, anchor_scales: np.ndarray):
"""get anchors per location on feature map.
The anchor number is anchor_scales x anchor_ratios
"""
base_anchor = tensor([0, 0, self.base_size - 1, self.base_size - 1])
base_anchor = F.add_axis(base_anchor, 0)
w, h, x_ctr, y_ctr = self._whctrs(base_anchor)
# ratio enumerate
size = w * h
size_ratios = size / self.anchor_ratios
ws = size_ratios.sqrt().round()
hs = (ws * self.anchor_ratios).round()
# scale enumerate
anchor_scales = anchor_scales[None, ...]
ws = F.add_axis(ws, 1)
hs = F.add_axis(hs, 1)
ws = (ws * anchor_scales).reshape(-1, 1)
hs = (hs * anchor_scales).reshape(-1, 1)
anchors = F.concat(
[
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1),
],
axis=1,
)
return anchors.astype(np.float32)
def get_center_offsets(self, featmap, stride):
f_shp = featmap.shape
fm_height, fm_width = f_shp[-2], f_shp[-1]
shift_x = F.linspace(0, fm_width - 1, fm_width) * stride
shift_y = | F.linspace(0, fm_height - 1, fm_height) | megengine.functional.linspace |
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Open-MMLab.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import megengine.functional as F
import numpy as np
from megengine.core import tensor, Tensor
class BaseAnchorGenerator(metaclass=ABCMeta):
"""base class for anchor generator.
"""
def __init__(self):
pass
@abstractmethod
def get_anchors_by_feature(self) -> Tensor:
pass
class DefaultAnchorGenerator(BaseAnchorGenerator):
"""default retinanet anchor generator.
This class generate anchors by feature map in level.
Args:
base_size (int): anchor base size.
anchor_scales (np.ndarray): anchor scales based on stride.
The practical anchor scale is anchor_scale * stride
anchor_ratios(np.ndarray): anchor aspect ratios.
offset (float): center point offset.default is 0.
"""
def __init__(
self,
base_size=8,
anchor_scales: np.ndarray = np.array([2, 3, 4]),
anchor_ratios: np.ndarray = np.array([0.5, 1, 2]),
offset: float = 0,
):
super().__init__()
self.base_size = base_size
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.offset = offset
def _whctrs(self, anchor):
"""convert anchor box into (w, h, ctr_x, ctr_y)
"""
w = anchor[:, 2] - anchor[:, 0] + 1
h = anchor[:, 3] - anchor[:, 1] + 1
x_ctr = anchor[:, 0] + 0.5 * (w - 1)
y_ctr = anchor[:, 1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def get_plane_anchors(self, anchor_scales: np.ndarray):
"""get anchors per location on feature map.
The anchor number is anchor_scales x anchor_ratios
"""
base_anchor = tensor([0, 0, self.base_size - 1, self.base_size - 1])
base_anchor = F.add_axis(base_anchor, 0)
w, h, x_ctr, y_ctr = self._whctrs(base_anchor)
# ratio enumerate
size = w * h
size_ratios = size / self.anchor_ratios
ws = size_ratios.sqrt().round()
hs = (ws * self.anchor_ratios).round()
# scale enumerate
anchor_scales = anchor_scales[None, ...]
ws = F.add_axis(ws, 1)
hs = F.add_axis(hs, 1)
ws = (ws * anchor_scales).reshape(-1, 1)
hs = (hs * anchor_scales).reshape(-1, 1)
anchors = F.concat(
[
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1),
],
axis=1,
)
return anchors.astype(np.float32)
def get_center_offsets(self, featmap, stride):
f_shp = featmap.shape
fm_height, fm_width = f_shp[-2], f_shp[-1]
shift_x = F.linspace(0, fm_width - 1, fm_width) * stride
shift_y = F.linspace(0, fm_height - 1, fm_height) * stride
# make the mesh grid of shift_x and shift_y
mesh_shape = (fm_height, fm_width)
broad_shift_x = shift_x.reshape(-1, shift_x.shape[0]).broadcast(*mesh_shape)
broad_shift_y = shift_y.reshape(shift_y.shape[0], -1).broadcast(*mesh_shape)
flatten_shift_x = F.add_axis(broad_shift_x.reshape(-1), 1)
flatten_shift_y = F.add_axis(broad_shift_y.reshape(-1), 1)
centers = F.concat(
[flatten_shift_x, flatten_shift_y, flatten_shift_x, flatten_shift_y,],
axis=1,
)
if self.offset > 0:
centers = centers + self.offset * stride
return centers
def get_anchors_by_feature(self, featmap, stride):
# shifts shape: [A, 4]
shifts = self.get_center_offsets(featmap, stride)
# plane_anchors shape: [B, 4], e.g. B=9
plane_anchors = self.get_plane_anchors(self.anchor_scales * stride)
all_anchors = | F.add_axis(plane_anchors, 0) | megengine.functional.add_axis |
# -*- coding: utf-8 -*-
# Copyright 2018-2019 Open-MMLab.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ---------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import megengine.functional as F
import numpy as np
from megengine.core import tensor, Tensor
class BaseAnchorGenerator(metaclass=ABCMeta):
"""base class for anchor generator.
"""
def __init__(self):
pass
@abstractmethod
def get_anchors_by_feature(self) -> Tensor:
pass
class DefaultAnchorGenerator(BaseAnchorGenerator):
"""default retinanet anchor generator.
This class generate anchors by feature map in level.
Args:
base_size (int): anchor base size.
anchor_scales (np.ndarray): anchor scales based on stride.
The practical anchor scale is anchor_scale * stride
anchor_ratios(np.ndarray): anchor aspect ratios.
offset (float): center point offset.default is 0.
"""
def __init__(
self,
base_size=8,
anchor_scales: np.ndarray = np.array([2, 3, 4]),
anchor_ratios: np.ndarray = np.array([0.5, 1, 2]),
offset: float = 0,
):
super().__init__()
self.base_size = base_size
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.offset = offset
def _whctrs(self, anchor):
"""convert anchor box into (w, h, ctr_x, ctr_y)
"""
w = anchor[:, 2] - anchor[:, 0] + 1
h = anchor[:, 3] - anchor[:, 1] + 1
x_ctr = anchor[:, 0] + 0.5 * (w - 1)
y_ctr = anchor[:, 1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def get_plane_anchors(self, anchor_scales: np.ndarray):
"""get anchors per location on feature map.
The anchor number is anchor_scales x anchor_ratios
"""
base_anchor = tensor([0, 0, self.base_size - 1, self.base_size - 1])
base_anchor = F.add_axis(base_anchor, 0)
w, h, x_ctr, y_ctr = self._whctrs(base_anchor)
# ratio enumerate
size = w * h
size_ratios = size / self.anchor_ratios
ws = size_ratios.sqrt().round()
hs = (ws * self.anchor_ratios).round()
# scale enumerate
anchor_scales = anchor_scales[None, ...]
ws = F.add_axis(ws, 1)
hs = F.add_axis(hs, 1)
ws = (ws * anchor_scales).reshape(-1, 1)
hs = (hs * anchor_scales).reshape(-1, 1)
anchors = F.concat(
[
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1),
],
axis=1,
)
return anchors.astype(np.float32)
def get_center_offsets(self, featmap, stride):
f_shp = featmap.shape
fm_height, fm_width = f_shp[-2], f_shp[-1]
shift_x = F.linspace(0, fm_width - 1, fm_width) * stride
shift_y = F.linspace(0, fm_height - 1, fm_height) * stride
# make the mesh grid of shift_x and shift_y
mesh_shape = (fm_height, fm_width)
broad_shift_x = shift_x.reshape(-1, shift_x.shape[0]).broadcast(*mesh_shape)
broad_shift_y = shift_y.reshape(shift_y.shape[0], -1).broadcast(*mesh_shape)
flatten_shift_x = F.add_axis(broad_shift_x.reshape(-1), 1)
flatten_shift_y = F.add_axis(broad_shift_y.reshape(-1), 1)
centers = F.concat(
[flatten_shift_x, flatten_shift_y, flatten_shift_x, flatten_shift_y,],
axis=1,
)
if self.offset > 0:
centers = centers + self.offset * stride
return centers
def get_anchors_by_feature(self, featmap, stride):
# shifts shape: [A, 4]
shifts = self.get_center_offsets(featmap, stride)
# plane_anchors shape: [B, 4], e.g. B=9
plane_anchors = self.get_plane_anchors(self.anchor_scales * stride)
all_anchors = F.add_axis(plane_anchors, 0) + | F.add_axis(shifts, 1) | megengine.functional.add_axis |
import os
import sys
import pytest
from megengine.core._imperative_rt.imperative import sync
sys.path.append(os.path.join(os.path.dirname(__file__), "helpers"))
def pytest_runtest_teardown():
| sync() | megengine.core._imperative_rt.imperative.sync |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = | F.linear(query, _w, _b) | megengine.functional.linear |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = | F.linear(key, _w, _b) | megengine.functional.linear |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = | F.linear(value, _w, _b) | megengine.functional.linear |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = | F.softmax(attn_output_weights, axis=-1) | megengine.functional.softmax |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = | F.dropout(attn_output_weights, dropout_p, training=training) | megengine.functional.dropout |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = | F.matmul(attn_output_weights, v) | megengine.functional.matmul |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = F.matmul(attn_output_weights, v)
assert attn_output.shape == (bsz * num_heads, tgt_len, head_dim)
attn_output = F.transpose(attn_output, (1, 0, 2)).reshape(tgt_len, bsz, embed_dim)
attn_output = | F.nn.linear(attn_output, out_proj_weight, out_proj_bias) | megengine.functional.nn.linear |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = | F.floor_div(embed_dim, num_heads) | megengine.functional.floor_div |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = F.matmul(attn_output_weights, v)
assert attn_output.shape == (bsz * num_heads, tgt_len, head_dim)
attn_output = F.transpose(attn_output, (1, 0, 2)).reshape(tgt_len, bsz, embed_dim)
attn_output = F.nn.linear(attn_output, out_proj_weight, out_proj_bias)
# average attention weights over heads
attn_output_weights = attn_output_weights_no_softmax.reshape(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights, raw_v
class MultiheadAttention(M.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[mge.Tensor]
bias_v: Optional[mge.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
# True By default
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
# False By default
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(F.zeros((3 * embed_dim, embed_dim)))
if bias:
self.in_proj_bias = Parameter(F.zeros((3 * embed_dim,)))
else:
self.in_proj_bias = None
self.out_proj = | M.Linear(embed_dim, embed_dim, bias=bias) | megengine.module.Linear |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = F.matmul(attn_output_weights, v)
assert attn_output.shape == (bsz * num_heads, tgt_len, head_dim)
attn_output = F.transpose(attn_output, (1, 0, 2)).reshape(tgt_len, bsz, embed_dim)
attn_output = F.nn.linear(attn_output, out_proj_weight, out_proj_bias)
# average attention weights over heads
attn_output_weights = attn_output_weights_no_softmax.reshape(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights, raw_v
class MultiheadAttention(M.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[mge.Tensor]
bias_v: Optional[mge.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
# True By default
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
# False By default
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(F.zeros((3 * embed_dim, embed_dim)))
if bias:
self.in_proj_bias = Parameter(F.zeros((3 * embed_dim,)))
else:
self.in_proj_bias = None
self.out_proj = M.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(F.zeros((1, 1, embed_dim)))
self.bias_v = Parameter(F.zeros((1, 1, embed_dim)))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
| xavier_uniform_(self.in_proj_weight) | megengine.module.init.xavier_uniform_ |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims( | F.expand_dims(key_padding_mask, axis=1) | megengine.functional.expand_dims |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = F.matmul(attn_output_weights, v)
assert attn_output.shape == (bsz * num_heads, tgt_len, head_dim)
attn_output = | F.transpose(attn_output, (1, 0, 2)) | megengine.functional.transpose |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = F.matmul(attn_output_weights, v)
assert attn_output.shape == (bsz * num_heads, tgt_len, head_dim)
attn_output = F.transpose(attn_output, (1, 0, 2)).reshape(tgt_len, bsz, embed_dim)
attn_output = F.nn.linear(attn_output, out_proj_weight, out_proj_bias)
# average attention weights over heads
attn_output_weights = attn_output_weights_no_softmax.reshape(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights, raw_v
class MultiheadAttention(M.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[mge.Tensor]
bias_v: Optional[mge.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
# True By default
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
# False By default
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter( | F.zeros((3 * embed_dim, embed_dim)) | megengine.functional.zeros |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = F.matmul(attn_output_weights, v)
assert attn_output.shape == (bsz * num_heads, tgt_len, head_dim)
attn_output = F.transpose(attn_output, (1, 0, 2)).reshape(tgt_len, bsz, embed_dim)
attn_output = F.nn.linear(attn_output, out_proj_weight, out_proj_bias)
# average attention weights over heads
attn_output_weights = attn_output_weights_no_softmax.reshape(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights, raw_v
class MultiheadAttention(M.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[mge.Tensor]
bias_v: Optional[mge.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
# True By default
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
# False By default
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(F.zeros((3 * embed_dim, embed_dim)))
if bias:
self.in_proj_bias = Parameter(F.zeros((3 * embed_dim,)))
else:
self.in_proj_bias = None
self.out_proj = M.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(F.zeros((1, 1, embed_dim)))
self.bias_v = Parameter(F.zeros((1, 1, embed_dim)))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self.in_proj_weight)
if self.in_proj_bias is not None:
| zeros_(self.in_proj_bias) | megengine.module.init.zeros_ |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = F.matmul(attn_output_weights, v)
assert attn_output.shape == (bsz * num_heads, tgt_len, head_dim)
attn_output = F.transpose(attn_output, (1, 0, 2)).reshape(tgt_len, bsz, embed_dim)
attn_output = F.nn.linear(attn_output, out_proj_weight, out_proj_bias)
# average attention weights over heads
attn_output_weights = attn_output_weights_no_softmax.reshape(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights, raw_v
class MultiheadAttention(M.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[mge.Tensor]
bias_v: Optional[mge.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
# True By default
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
# False By default
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(F.zeros((3 * embed_dim, embed_dim)))
if bias:
self.in_proj_bias = Parameter(F.zeros((3 * embed_dim,)))
else:
self.in_proj_bias = None
self.out_proj = M.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(F.zeros((1, 1, embed_dim)))
self.bias_v = Parameter(F.zeros((1, 1, embed_dim)))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
xavier_uniform_(self.in_proj_weight)
if self.in_proj_bias is not None:
zeros_(self.in_proj_bias)
| zeros_(self.out_proj.bias) | megengine.module.init.zeros_ |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = F.matmul(attn_output_weights, v)
assert attn_output.shape == (bsz * num_heads, tgt_len, head_dim)
attn_output = F.transpose(attn_output, (1, 0, 2)).reshape(tgt_len, bsz, embed_dim)
attn_output = F.nn.linear(attn_output, out_proj_weight, out_proj_bias)
# average attention weights over heads
attn_output_weights = attn_output_weights_no_softmax.reshape(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights, raw_v
class MultiheadAttention(M.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[mge.Tensor]
bias_v: Optional[mge.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
# True By default
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
# False By default
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(F.zeros((3 * embed_dim, embed_dim)))
if bias:
self.in_proj_bias = Parameter( | F.zeros((3 * embed_dim,)) | megengine.functional.zeros |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = F.matmul(attn_output_weights, v)
assert attn_output.shape == (bsz * num_heads, tgt_len, head_dim)
attn_output = F.transpose(attn_output, (1, 0, 2)).reshape(tgt_len, bsz, embed_dim)
attn_output = F.nn.linear(attn_output, out_proj_weight, out_proj_bias)
# average attention weights over heads
attn_output_weights = attn_output_weights_no_softmax.reshape(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights, raw_v
class MultiheadAttention(M.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[mge.Tensor]
bias_v: Optional[mge.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
# True By default
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
# False By default
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(F.zeros((3 * embed_dim, embed_dim)))
if bias:
self.in_proj_bias = Parameter(F.zeros((3 * embed_dim,)))
else:
self.in_proj_bias = None
self.out_proj = M.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter( | F.zeros((1, 1, embed_dim)) | megengine.functional.zeros |
# Some code is modified from pytorch
# pytorch is licensed under BSD
# From PyTorch:
# Copyright (c) 2016- Facebook, Inc (<NAME>)
# Copyright (c) 2014- Facebook, Inc (<NAME>)
# Copyright (c) 2011-2014 Idiap Research Institute (<NAME>)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (<NAME>)
# Copyright (c) 2006-2010 NEC Laboratories America (<NAME>, <NAME>, <NAME>, <NAME>)
# Copyright (c) 2006 Idiap Research Institute (<NAME>)
# Copyright (c) 2001-2004 Idiap Research Institute (<NAME>, <NAME>, <NAME>)
# From Caffe2:
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
# All contributions by Yang<NAME>:
# Copyright (c) 2015 Yang<NAME>
# All rights reserved.
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import megengine as mge
import megengine.module as M
from megengine import functional as F
from megengine import Parameter
from megengine.module.init import xavier_uniform_, zeros_
from typing import List, Tuple, Dict, Optional
import numpy as np
from .utility import safe_masked_fill, has_nan_or_inf
def multi_head_attention_forward(
query: mge.Tensor,
key: mge.Tensor,
value: mge.Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: mge.Tensor,
in_proj_bias: Optional[mge.Tensor],
bias_k: Optional[mge.Tensor],
bias_v: Optional[mge.Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: mge.Tensor,
out_proj_bias: Optional[mge.Tensor],
training: bool = True,
key_padding_mask: Optional[mge.Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[mge.Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[mge.Tensor] = None,
k_proj_weight: Optional[mge.Tensor] = None,
v_proj_weight: Optional[mge.Tensor] = None,
static_k: Optional[mge.Tensor] = None,
static_v: Optional[mge.Tensor] = None,
proj_only: bool = False
) -> Tuple[mge.Tensor, Optional[mge.Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tgt_len, bsz, embed_dim = query.shape
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.shape[0] == value.shape[0] and key.shape[1] == value.shape[1]
if isinstance(embed_dim, mge.Tensor):
# embed_dim can be a tensor when JIT tracing
#head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
#NOTE: when positive number, floor_div is equivalent to trunc_div (in megengine only floor_div is available)
head_dim = F.floor_div(embed_dim, num_heads)
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
assert not use_separate_proj_weight
assert need_weights
assert attn_mask is None
assert bias_k is None and bias_v is None
assert not add_zero_attn
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
q = q * scaling
raw_v = v
raw_v = raw_v.reshape(-1, bsz, num_heads, head_dim)
if proj_only:
return query, None, raw_v
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == np.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.astype(np.bool)
#def _pad_last_dim_right_only(tensor):
# '''
# To replace with torch.nn.functional.pad(tensor, (0, 1))
# '''
# return F.concat([tensor, F.expand_dims(F.zeros(tensor.shape[:-1]), axis=-1)], axis=-1)
#q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(1, 0, 2)
if k is not None:
#k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if v is not None:
#v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(1, 0, 2)
if static_k is not None:
assert static_k.shape[0] == bsz * num_heads
assert static_k.shape[2] == head_dim
k = static_k
if static_v is not None:
assert static_v.shape[0] == bsz * num_heads
assert static_v.shape[2] == head_dim
v = static_v
src_len = k.shape[1]
if key_padding_mask is not None:
assert key_padding_mask.shape[1] == src_len
#attn_output_weights = torch.bmm(q, k.transpose(1, 2))
attn_output_weights = F.matmul(q, k.transpose(0, 2, 1))
assert list(attn_output_weights.shape) == [bsz * num_heads, tgt_len, src_len]
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.reshape(bsz, num_heads, tgt_len, src_len)
key_padding_mask = F.expand_dims(F.expand_dims(key_padding_mask, axis=1), axis=2)
attn_output_weights = safe_masked_fill(attn_output_weights, key_padding_mask, float("-inf"))
attn_output_weights = attn_output_weights.reshape(bsz * num_heads, tgt_len, src_len)
attn_output_weights_no_softmax = attn_output_weights
attn_output_weights = F.softmax(attn_output_weights, axis=-1)
attn_output_weights = F.dropout(attn_output_weights, dropout_p, training=training)
attn_output = F.matmul(attn_output_weights, v)
assert attn_output.shape == (bsz * num_heads, tgt_len, head_dim)
attn_output = F.transpose(attn_output, (1, 0, 2)).reshape(tgt_len, bsz, embed_dim)
attn_output = F.nn.linear(attn_output, out_proj_weight, out_proj_bias)
# average attention weights over heads
attn_output_weights = attn_output_weights_no_softmax.reshape(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights, raw_v
class MultiheadAttention(M.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[mge.Tensor]
bias_v: Optional[mge.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
# True By default
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
# False By default
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(F.zeros((3 * embed_dim, embed_dim)))
if bias:
self.in_proj_bias = Parameter(F.zeros((3 * embed_dim,)))
else:
self.in_proj_bias = None
self.out_proj = M.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(F.zeros((1, 1, embed_dim)))
self.bias_v = Parameter( | F.zeros((1, 1, embed_dim)) | megengine.functional.zeros |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = | F.expand_dims(points_src, axis=2) | megengine.functional.expand_dims |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = | F.expand_dims(points_ref, axis=1) | megengine.functional.expand_dims |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = | F.min(dist_matrix, axis=2) | megengine.functional.min |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = | F.min(dist_matrix, axis=1) | megengine.functional.min |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = | F.concat((init_quat, init_translate), axis=1) | megengine.functional.concat |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = | F.copy(xyz_src, device=xyz_src.device) | megengine.functional.copy |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile( | mge.tensor([1, 0, 0, 0], dtype="float32") | megengine.tensor |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile( | mge.tensor([0, 0, 0], dtype="float32") | megengine.tensor |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = F.concat(src_decoder_feats, axis=1) * F.expand_dims(src_pred_mask, axis=1)
ref_feat = F.concat(ref_decoder_feats, axis=1) * F.expand_dims(ref_pred_mask, axis=1)
concat_feat = | F.concat((src_fused_feat, src_feat, ref_fused_feat, ref_feat), axis=1) | megengine.functional.concat |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = F.concat(src_decoder_feats, axis=1) * F.expand_dims(src_pred_mask, axis=1)
ref_feat = F.concat(ref_decoder_feats, axis=1) * F.expand_dims(ref_pred_mask, axis=1)
concat_feat = F.concat((src_fused_feat, src_feat, ref_fused_feat, ref_feat), axis=1)
concat_feat = | F.max(concat_feat, axis=-1) | megengine.functional.max |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = F.concat(src_decoder_feats, axis=1) * F.expand_dims(src_pred_mask, axis=1)
ref_feat = F.concat(ref_decoder_feats, axis=1) * F.expand_dims(ref_pred_mask, axis=1)
concat_feat = F.concat((src_fused_feat, src_feat, ref_fused_feat, ref_feat), axis=1)
concat_feat = F.max(concat_feat, axis=-1)
pose_pred_iter = self.regression[i](concat_feat) # (B, 7)
xyz_src_iter = quaternion.mge_quat_transform(pose_pred_iter, xyz_src_iter.detach())
pose_pred = quaternion.mge_transform_pose(pose_pred.detach(), pose_pred_iter)
transform_pred = quaternion.mge_quat2mat(pose_pred)
# compute overlap and cls gt
overlap_src_mask, overlap_ref_mask = self.generate_overlap_mask(F.copy(xyz_src, device=xyz_src.device),
F.copy(xyz_ref, device=xyz_ref.device), src_pred_mask,
ref_pred_mask, transform_gt)
# overlap_src_mask, overlap_ref_mask = self.generate_overlap_mask(xyz_src, xyz_ref, src_pred_mask, ref_pred_mask, transform_gt)
src_cls_gt = F.ones((B, src_N)) * overlap_src_mask
ref_cls_gt = F.ones((B, ref_N)) * overlap_ref_mask
src_pred_mask = | F.argmax(src_cls_pred, axis=1) | megengine.functional.argmax |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = F.concat(src_decoder_feats, axis=1) * F.expand_dims(src_pred_mask, axis=1)
ref_feat = F.concat(ref_decoder_feats, axis=1) * F.expand_dims(ref_pred_mask, axis=1)
concat_feat = F.concat((src_fused_feat, src_feat, ref_fused_feat, ref_feat), axis=1)
concat_feat = F.max(concat_feat, axis=-1)
pose_pred_iter = self.regression[i](concat_feat) # (B, 7)
xyz_src_iter = quaternion.mge_quat_transform(pose_pred_iter, xyz_src_iter.detach())
pose_pred = quaternion.mge_transform_pose(pose_pred.detach(), pose_pred_iter)
transform_pred = quaternion.mge_quat2mat(pose_pred)
# compute overlap and cls gt
overlap_src_mask, overlap_ref_mask = self.generate_overlap_mask(F.copy(xyz_src, device=xyz_src.device),
F.copy(xyz_ref, device=xyz_ref.device), src_pred_mask,
ref_pred_mask, transform_gt)
# overlap_src_mask, overlap_ref_mask = self.generate_overlap_mask(xyz_src, xyz_ref, src_pred_mask, ref_pred_mask, transform_gt)
src_cls_gt = F.ones((B, src_N)) * overlap_src_mask
ref_cls_gt = F.ones((B, ref_N)) * overlap_ref_mask
src_pred_mask = F.argmax(src_cls_pred, axis=1)
ref_pred_mask = | F.argmax(ref_cls_pred, axis=1) | megengine.functional.argmax |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum( | F.square(points_src - points_ref) | megengine.functional.square |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = | F.ones((B, src_N), dtype=xyz_src.dtype) | megengine.functional.ones |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = | F.ones((B, ref_N), dtype=xyz_ref.dtype) | megengine.functional.ones |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), | F.expand_dims(ref_pred_mask, axis=1) | megengine.functional.expand_dims |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, | F.expand_dims(src_pred_mask, axis=1) | megengine.functional.expand_dims |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, | F.expand_dims(ref_pred_mask, axis=1) | megengine.functional.expand_dims |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = | F.concat(src_decoder_feats, axis=1) | megengine.functional.concat |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = F.concat(src_decoder_feats, axis=1) * | F.expand_dims(src_pred_mask, axis=1) | megengine.functional.expand_dims |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = F.concat(src_decoder_feats, axis=1) * F.expand_dims(src_pred_mask, axis=1)
ref_feat = | F.concat(ref_decoder_feats, axis=1) | megengine.functional.concat |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = F.concat(src_decoder_feats, axis=1) * F.expand_dims(src_pred_mask, axis=1)
ref_feat = F.concat(ref_decoder_feats, axis=1) * | F.expand_dims(ref_pred_mask, axis=1) | megengine.functional.expand_dims |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = F.concat(src_decoder_feats, axis=1) * F.expand_dims(src_pred_mask, axis=1)
ref_feat = F.concat(ref_decoder_feats, axis=1) * F.expand_dims(ref_pred_mask, axis=1)
concat_feat = F.concat((src_fused_feat, src_feat, ref_fused_feat, ref_feat), axis=1)
concat_feat = F.max(concat_feat, axis=-1)
pose_pred_iter = self.regression[i](concat_feat) # (B, 7)
xyz_src_iter = quaternion.mge_quat_transform(pose_pred_iter, xyz_src_iter.detach())
pose_pred = quaternion.mge_transform_pose(pose_pred.detach(), pose_pred_iter)
transform_pred = quaternion.mge_quat2mat(pose_pred)
# compute overlap and cls gt
overlap_src_mask, overlap_ref_mask = self.generate_overlap_mask( | F.copy(xyz_src, device=xyz_src.device) | megengine.functional.copy |
import megengine as mge
import megengine.module as nn
import megengine.functional as F
from model.module import Encoder, Fusion, Decoder, Regression
from common import se3, quaternion
import math
class OMNet(nn.Module):
def __init__(self, params):
super(OMNet, self).__init__()
self.num_iter = params.titer
self.encoder = [Encoder() for _ in range(self.num_iter)]
self.fusion = [Fusion() for _ in range(self.num_iter)]
self.decoder = [Decoder() for _ in range(self.num_iter)]
self.regression = [Regression() for _ in range(self.num_iter)]
self.overlap_dist = params.overlap_dist
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.msra_normal_(m.weight, a=math.sqrt(5))
if m.bias is not None:
fan_in, _ = nn.init.calculate_fan_in_and_fan_out(m.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(m.bias, -bound, bound)
# elif isinstance(m, nn.BatchNorm1d):
# nn.init.ones_(m.weight)
# nn.init.zeros_(m.bias)
def generate_overlap_mask(self, points_src, points_ref, mask_src, mask_ref, transform_gt):
points_src[F.logical_not(mask_src.astype("bool")), :] = 50.0
points_ref[F.logical_not(mask_ref.astype("bool")), :] = 100.0
points_src = se3.mge_transform(transform_gt, points_src)
points_src = F.expand_dims(points_src, axis=2)
points_ref = F.expand_dims(points_ref, axis=1)
dist_matrix = F.sqrt(F.sum(F.square(points_src - points_ref), axis=-1)) # (B, N, N)
dist_s2r = F.min(dist_matrix, axis=2)
dist_r2s = F.min(dist_matrix, axis=1)
overlap_src_mask = dist_s2r < self.overlap_dist # (B, N)
overlap_ref_mask = dist_r2s < self.overlap_dist # (B, N)
return overlap_src_mask, overlap_ref_mask
def forward(self, data_batch):
endpoints = {}
xyz_src = data_batch["points_src"]
xyz_ref = data_batch["points_ref"]
transform_gt = data_batch["transform_gt"]
pose_gt = data_batch["pose_gt"]
# init endpoints
all_src_cls_pair = []
all_ref_cls_pair = []
all_transform_pair = []
all_pose_pair = []
all_xyz_src_t = [xyz_src]
# init params
B, src_N, _ = xyz_src.shape
_, ref_N, _ = xyz_ref.shape
init_quat = F.tile(mge.tensor([1, 0, 0, 0], dtype="float32"), (B, 1)) # (B, 4)
init_translate = F.tile(mge.tensor([0, 0, 0], dtype="float32"), (B, 1)) # (B, 3)
pose_pred = F.concat((init_quat, init_translate), axis=1) # (B, 7)
# rename xyz_src
xyz_src_iter = F.copy(xyz_src, device=xyz_src.device)
for i in range(self.num_iter):
# deley mask
if i < 2:
src_pred_mask = F.ones((B, src_N), dtype=xyz_src.dtype)
ref_pred_mask = F.ones((B, ref_N), dtype=xyz_ref.dtype)
# encoder
src_encoder_feats, src_glob_feat = self.encoder[i](xyz_src_iter.transpose(0, 2, 1).detach(), F.expand_dims(src_pred_mask,
axis=1))
ref_encoder_feats, ref_glob_feat = self.encoder[i](xyz_ref.transpose(0, 2, 1).detach(), F.expand_dims(ref_pred_mask, axis=1))
# fusion
src_concat_feat = F.concat(
(src_encoder_feats[0], F.repeat(src_glob_feat, src_N, axis=2), F.repeat(ref_glob_feat, src_N, axis=2)), axis=1)
ref_concat_feat = F.concat(
(ref_encoder_feats[0], F.repeat(ref_glob_feat, ref_N, axis=2), F.repeat(src_glob_feat, ref_N, axis=2)), axis=1)
_, src_fused_feat = self.fusion[i](src_concat_feat, F.expand_dims(src_pred_mask, axis=1))
_, ref_fused_feat = self.fusion[i](ref_concat_feat, F.expand_dims(ref_pred_mask, axis=1))
# decoder
src_decoder_feats, src_cls_pred = self.decoder[i](src_fused_feat)
ref_decoder_feats, ref_cls_pred = self.decoder[i](ref_fused_feat)
# regression
src_feat = F.concat(src_decoder_feats, axis=1) * F.expand_dims(src_pred_mask, axis=1)
ref_feat = F.concat(ref_decoder_feats, axis=1) * F.expand_dims(ref_pred_mask, axis=1)
concat_feat = F.concat((src_fused_feat, src_feat, ref_fused_feat, ref_feat), axis=1)
concat_feat = F.max(concat_feat, axis=-1)
pose_pred_iter = self.regression[i](concat_feat) # (B, 7)
xyz_src_iter = quaternion.mge_quat_transform(pose_pred_iter, xyz_src_iter.detach())
pose_pred = quaternion.mge_transform_pose(pose_pred.detach(), pose_pred_iter)
transform_pred = quaternion.mge_quat2mat(pose_pred)
# compute overlap and cls gt
overlap_src_mask, overlap_ref_mask = self.generate_overlap_mask(F.copy(xyz_src, device=xyz_src.device),
| F.copy(xyz_ref, device=xyz_ref.device) | megengine.functional.copy |