repo
stringlengths 3
91
| file
stringlengths 16
152
| code
stringlengths 0
3.77M
| file_length
int64 0
3.77M
| avg_line_length
float64 0
16k
| max_line_length
int64 0
273k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/dense_block.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class _DenseLayer(nn.Module):
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, dilation):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1,
bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=dilation,
dilation=dilation, bias=False)),
self.drop_rate = float(drop_rate)
def bn_function(self, inputs):
# type: (list [Tensor]) -> Tensor
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484
return bottleneck_output
def forward(self, input):
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate,
training=self.training)
return new_features
class _DenseBlock(nn.ModuleDict):
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
dilation=2**i,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for _, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class DenseFeature(nn.Module):
def __init__(self, growth_rate=32, block_config=(4, 4, 4), num_init_features=64,
bn_size=4, drop_rate=0):
super(DenseFeature, self).__init__()
# Denseblocks
self.features = nn.Sequential()
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
# if i != len(block_config) - 1:
if i < 2:
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
self.features.add_module('relu5', nn.ReLU(inplace=True))
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
return features
if __name__ == '__main__':
layer = _DenseBlock(num_layers=4, num_input_features=8,
bn_size=4, growth_rate=16, drop_rate=0.1)
print(layer)
print(layer(torch.rand(3,8,5,5)).shape) | 4,940 | 40.175 | 95 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/SELD_metrics.py | # Implements the localization and detection metrics proposed in [1] with extensions to support multi-instance of the same class from [2].
#
# [1] Joint Measurement of Localization and Detection of Sound Events
# Annamaria Mesaros, Sharath Adavanne, Archontis Politis, Toni Heittola, Tuomas Virtanen
# WASPAA 2019
#
# [2] Overview and Evaluation of Sound Event Localization and Detection in DCASE 2019
# Politis, Archontis, Annamaria Mesaros, Sharath Adavanne, Toni Heittola, and Tuomas Virtanen.
# IEEE/ACM Transactions on Audio, Speech, and Language Processing (2020).
#
# This script has MIT license
#
import numpy as np
eps = np.finfo(np.float).eps
from scipy.optimize import linear_sum_assignment
from IPython import embed
class SELDMetrics(object):
def __init__(self, doa_threshold=20, nb_classes=13, average='macro'):
'''
This class implements both the class-sensitive localization and location-sensitive detection metrics.
Additionally, based on the user input, the corresponding averaging is performed within the segment.
:param nb_classes: Number of sound classes. In the paper, nb_classes = 11
:param doa_thresh: DOA threshold for location sensitive detection.
'''
self._nb_classes = nb_classes
# Variables for Location-senstive detection performance
self._TP = np.zeros(self._nb_classes)
self._FP = np.zeros(self._nb_classes)
self._FP_spatial = np.zeros(self._nb_classes)
self._FN = np.zeros(self._nb_classes)
self._Nref = np.zeros(self._nb_classes)
self._spatial_T = doa_threshold
self._S = 0
self._D = 0
self._I = 0
# Variables for Class-sensitive localization performance
self._total_DE = np.zeros(self._nb_classes)
self._DE_TP = np.zeros(self._nb_classes)
self._DE_FP = np.zeros(self._nb_classes)
self._DE_FN = np.zeros(self._nb_classes)
self._average = average
def early_stopping_metric(self, _er, _f, _le, _lr):
"""
Compute early stopping metric from sed and doa errors.
:param sed_error: [error rate (0 to 1 range), f score (0 to 1 range)]
:param doa_error: [doa error (in degrees), frame recall (0 to 1 range)]
:return: early stopping metric result
"""
seld_metric = np.mean([
_er,
1 - _f,
_le / 180,
1 - _lr
], 0)
return seld_metric
def compute_seld_scores(self):
'''
Collect the final SELD scores
:return: returns both location-sensitive detection scores and class-sensitive localization scores
'''
ER = (self._S + self._D + self._I) / (self._Nref.sum() + eps)
classwise_results = []
if self._average == 'micro':
# Location-sensitive detection performance
F = self._TP.sum() / (eps + self._TP.sum() + self._FP_spatial.sum() + 0.5 * (self._FP.sum() + self._FN.sum()))
# Class-sensitive localization performance
LE = self._total_DE.sum() / float(self._DE_TP.sum() + eps) if self._DE_TP.sum() else 180
LR = self._DE_TP.sum() / (eps + self._DE_TP.sum() + self._DE_FN.sum())
SELD_scr = self.early_stopping_metric(ER, F, LE, LR)
elif self._average == 'macro':
# Location-sensitive detection performance
F = self._TP / (eps + self._TP + self._FP_spatial + 0.5 * (self._FP + self._FN))
# Class-sensitive localization performance
LE = self._total_DE / (self._DE_TP + eps)
LE[self._DE_TP==0] = 180.0
LR = self._DE_TP / (eps + self._DE_TP + self._DE_FN)
SELD_scr = self.early_stopping_metric(np.repeat(ER, self._nb_classes), F, LE, LR)
classwise_results = np.array([np.repeat(ER, self._nb_classes), F, LE, LR, SELD_scr])
F, LE, LR, SELD_scr = F.mean(), LE.mean(), LR.mean(), SELD_scr.mean()
return ER, F, LE, LR, SELD_scr, classwise_results
def update_seld_scores(self, pred, gt):
'''
Implements the spatial error averaging according to equation 5 in the paper [1] (see papers in the title of the code).
Adds the multitrack extensions proposed in paper [2]
The input pred/gt can either both be Cartesian or Degrees
:param pred: dictionary containing class-wise prediction results for each N-seconds segment block
:param gt: dictionary containing class-wise groundtruth for each N-seconds segment block
'''
for block_cnt in range(len(gt.keys())):
loc_FN, loc_FP = 0, 0
for class_cnt in range(self._nb_classes):
# Counting the number of referece tracks for each class in the segment
nb_gt_doas = max([len(val) for val in gt[block_cnt][class_cnt][0][1]]) if class_cnt in gt[block_cnt] else None
nb_pred_doas = max([len(val) for val in pred[block_cnt][class_cnt][0][1]]) if class_cnt in pred[block_cnt] else None
if nb_gt_doas is not None:
self._Nref[class_cnt] += nb_gt_doas
if class_cnt in gt[block_cnt] and class_cnt in pred[block_cnt]:
# True positives or False positive case
# NOTE: For multiple tracks per class, associate the predicted DOAs to corresponding reference
# DOA-tracks using hungarian algorithm and then compute the average spatial distance between
# the associated reference-predicted tracks.
# Reference and predicted track matching
matched_track_dist = {}
matched_track_cnt = {}
gt_ind_list = gt[block_cnt][class_cnt][0][0]
pred_ind_list = pred[block_cnt][class_cnt][0][0]
for gt_ind, gt_val in enumerate(gt_ind_list):
if gt_val in pred_ind_list:
gt_arr = np.array(gt[block_cnt][class_cnt][0][1][gt_ind])
gt_ids = np.arange(len(gt_arr[:, -1])) #TODO if the reference has track IDS use here - gt_arr[:, -1]
gt_doas = gt_arr[:, :]
pred_ind = pred_ind_list.index(gt_val)
pred_arr = np.array(pred[block_cnt][class_cnt][0][1][pred_ind])
pred_doas = pred_arr[:, :]
if gt_doas.shape[-1] == 2: # convert DOAs to radians, if the input is in degrees
gt_doas = gt_doas * np.pi / 180.
pred_doas = pred_doas * np.pi / 180.
dist_list, row_inds, col_inds = least_distance_between_gt_pred(gt_doas, pred_doas)
# Collect the frame-wise distance between matched ref-pred DOA pairs
for dist_cnt, dist_val in enumerate(dist_list):
matched_gt_track = gt_ids[row_inds[dist_cnt]]
if matched_gt_track not in matched_track_dist:
matched_track_dist[matched_gt_track], matched_track_cnt[matched_gt_track] = [], []
matched_track_dist[matched_gt_track].append(dist_val)
matched_track_cnt[matched_gt_track].append(pred_ind)
# Update evaluation metrics based on the distance between ref-pred tracks
if len(matched_track_dist) == 0:
# if no tracks are found. This occurs when the predicted DOAs are not aligned frame-wise to the reference DOAs
loc_FN += nb_pred_doas
self._FN[class_cnt] += nb_pred_doas
self._DE_FN[class_cnt] += nb_pred_doas
else:
# for the associated ref-pred tracks compute the metrics
for track_id in matched_track_dist:
total_spatial_dist = sum(matched_track_dist[track_id])
total_framewise_matching_doa = len(matched_track_cnt[track_id])
avg_spatial_dist = total_spatial_dist / total_framewise_matching_doa
# Class-sensitive localization performance
self._total_DE[class_cnt] += avg_spatial_dist
self._DE_TP[class_cnt] += 1
# Location-sensitive detection performance
if avg_spatial_dist <= self._spatial_T:
self._TP[class_cnt] += 1
else:
loc_FP += 1
self._FP_spatial[class_cnt] += 1
# in the multi-instance of same class scenario, if the number of predicted tracks are greater
# than reference tracks count as FP, if it less than reference count as FN
if nb_pred_doas > nb_gt_doas:
# False positive
loc_FP += (nb_pred_doas-nb_gt_doas)
self._FP[class_cnt] += (nb_pred_doas-nb_gt_doas)
self._DE_FP[class_cnt] += (nb_pred_doas-nb_gt_doas)
elif nb_pred_doas < nb_gt_doas:
# False negative
loc_FN += (nb_gt_doas-nb_pred_doas)
self._FN[class_cnt] += (nb_gt_doas-nb_pred_doas)
self._DE_FN[class_cnt] += (nb_gt_doas-nb_pred_doas)
elif class_cnt in gt[block_cnt] and class_cnt not in pred[block_cnt]:
# False negative
loc_FN += nb_gt_doas
self._FN[class_cnt] += nb_gt_doas
self._DE_FN[class_cnt] += nb_gt_doas
elif class_cnt not in gt[block_cnt] and class_cnt in pred[block_cnt]:
# False positive
loc_FP += nb_pred_doas
self._FP[class_cnt] += nb_pred_doas
self._DE_FP[class_cnt] += nb_pred_doas
self._S += np.minimum(loc_FP, loc_FN)
self._D += np.maximum(0, loc_FN - loc_FP)
self._I += np.maximum(0, loc_FP - loc_FN)
return
def distance_between_spherical_coordinates_rad(az1, ele1, az2, ele2):
"""
Angular distance between two spherical coordinates
MORE: https://en.wikipedia.org/wiki/Great-circle_distance
:return: angular distance in degrees
"""
dist = np.sin(ele1) * np.sin(ele2) + np.cos(ele1) * np.cos(ele2) * np.cos(np.abs(az1 - az2))
# Making sure the dist values are in -1 to 1 range, else np.arccos kills the job
dist = np.clip(dist, -1, 1)
dist = np.arccos(dist) * 180 / np.pi
return dist
def distance_between_cartesian_coordinates(x1, y1, z1, x2, y2, z2):
"""
Angular distance between two cartesian coordinates
MORE: https://en.wikipedia.org/wiki/Great-circle_distance
Check 'From chord length' section
:return: angular distance in degrees
"""
# Normalize the Cartesian vectors
N1 = np.sqrt(x1**2 + y1**2 + z1**2 + 1e-10)
N2 = np.sqrt(x2**2 + y2**2 + z2**2 + 1e-10)
x1, y1, z1, x2, y2, z2 = x1/N1, y1/N1, z1/N1, x2/N2, y2/N2, z2/N2
#Compute the distance
dist = x1*x2 + y1*y2 + z1*z2
dist = np.clip(dist, -1, 1)
dist = np.arccos(dist) * 180 / np.pi
return dist
def least_distance_between_gt_pred(gt_list, pred_list):
"""
Shortest distance between two sets of DOA coordinates. Given a set of groundtruth coordinates,
and its respective predicted coordinates, we calculate the distance between each of the
coordinate pairs resulting in a matrix of distances, where one axis represents the number of groundtruth
coordinates and the other the predicted coordinates. The number of estimated peaks need not be the same as in
groundtruth, thus the distance matrix is not always a square matrix. We use the hungarian algorithm to find the
least cost in this distance matrix.
:param gt_list_xyz: list of ground-truth Cartesian or Polar coordinates in Radians
:param pred_list_xyz: list of predicted Carteisan or Polar coordinates in Radians
:return: cost - distance
:return: less - number of DOA's missed
:return: extra - number of DOA's over-estimated
"""
gt_len, pred_len = gt_list.shape[0], pred_list.shape[0]
ind_pairs = np.array([[x, y] for y in range(pred_len) for x in range(gt_len)])
cost_mat = np.zeros((gt_len, pred_len))
if gt_len and pred_len:
if len(gt_list[0]) == 3: #Cartesian
x1, y1, z1, x2, y2, z2 = gt_list[ind_pairs[:, 0], 0], gt_list[ind_pairs[:, 0], 1], gt_list[ind_pairs[:, 0], 2], pred_list[ind_pairs[:, 1], 0], pred_list[ind_pairs[:, 1], 1], pred_list[ind_pairs[:, 1], 2]
cost_mat[ind_pairs[:, 0], ind_pairs[:, 1]] = distance_between_cartesian_coordinates(x1, y1, z1, x2, y2, z2)
else:
az1, ele1, az2, ele2 = gt_list[ind_pairs[:, 0], 0], gt_list[ind_pairs[:, 0], 1], pred_list[ind_pairs[:, 1], 0], pred_list[ind_pairs[:, 1], 1]
cost_mat[ind_pairs[:, 0], ind_pairs[:, 1]] = distance_between_spherical_coordinates_rad(az1, ele1, az2, ele2)
row_ind, col_ind = linear_sum_assignment(cost_mat)
cost = cost_mat[row_ind, col_ind]
return cost, row_ind, col_ind
| 13,628 | 48.923077 | 215 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/__init__.py | 0 | 0 | 0 | py |
|
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/data_utilities.py | import numpy as np
import pandas as pd
import torch
def _segment_index(x, chunklen, hoplen, last_frame_always_paddding=False):
"""Segment input x with chunklen, hoplen parameters. Return
Args:
x: input, time domain or feature domain (channels, time)
chunklen:
hoplen:
last_frame_always_paddding: to decide if always padding for the last frame
Return:
segmented_indexes: [(begin_index, end_index), (begin_index, end_index), ...]
segmented_pad_width: [(before, after), (before, after), ...]
"""
x_len = x.shape[1]
segmented_indexes = []
segmented_pad_width = []
if x_len < chunklen:
begin_index = 0
end_index = x_len
pad_width_before = 0
pad_width_after = chunklen - x_len
segmented_indexes.append((begin_index, end_index))
segmented_pad_width.append((pad_width_before, pad_width_after))
return segmented_indexes, segmented_pad_width
n_frames = 1 + (x_len - chunklen) // hoplen
for n in range(n_frames):
begin_index = n * hoplen
end_index = n * hoplen + chunklen
segmented_indexes.append((begin_index, end_index))
pad_width_before = 0
pad_width_after = 0
segmented_pad_width.append((pad_width_before, pad_width_after))
if (n_frames - 1) * hoplen + chunklen == x_len:
return segmented_indexes, segmented_pad_width
# the last frame
if last_frame_always_paddding:
begin_index = n_frames * hoplen
end_index = x_len
pad_width_before = 0
pad_width_after = chunklen - (x_len - n_frames * hoplen)
else:
if x_len - n_frames * hoplen >= chunklen // 2:
begin_index = n_frames * hoplen
end_index = x_len
pad_width_before = 0
pad_width_after = chunklen - (x_len - n_frames * hoplen)
else:
begin_index = x_len - chunklen
end_index = x_len
pad_width_before = 0
pad_width_after = 0
segmented_indexes.append((begin_index, end_index))
segmented_pad_width.append((pad_width_before, pad_width_after))
return segmented_indexes, segmented_pad_width
def load_output_format_file(_output_format_file):
"""
Loads DCASE output format csv file and returns it in dictionary format
:param _output_format_file: DCASE output format CSV
:return: _output_dict: dictionary
"""
_output_dict = {}
_fid = open(_output_format_file, 'r')
# next(_fid)
for _line in _fid:
_words = _line.strip().split(',')
_frame_ind = int(_words[0])
if _frame_ind not in _output_dict:
_output_dict[_frame_ind] = []
if len(_words) == 5: #polar coordinates
# _output_dict[_frame_ind].append([int(_words[1]), int(_words[2]), float(_words[3]), float(_words[4])])
_output_dict[_frame_ind].append([int(_words[1]), float(_words[3]), float(_words[4])])
elif len(_words) == 6: # cartesian coordinates
# _output_dict[_frame_ind].append([int(_words[1]), int(_words[2]), float(_words[3]), float(_words[4]), float(_words[5])])
_output_dict[_frame_ind].append([int(_words[1]), float(_words[3]), float(_words[4]), float(_words[5])])
elif len(_words) == 4:
_output_dict[_frame_ind].append([int(_words[1]), float(_words[2]), float(_words[3])])
_fid.close()
return _output_dict
def write_output_format_file(_output_format_file, _output_format_dict):
"""
Writes DCASE output format csv file, given output format dictionary
:param _output_format_file:
:param _output_format_dict:
:return:
"""
_fid = open(_output_format_file, 'w')
for _frame_ind in _output_format_dict.keys():
for _value in _output_format_dict[_frame_ind]:
# Write Cartesian format output. Since baseline does not estimate track count we use a fixed value.
_fid.write('{},{},{},{}\n'.format(int(_frame_ind), int(_value[0]), int(_value[1]), int(_value[2])))
_fid.close()
def to_metrics_format(label_dict, num_frames, label_resolution=0.1):
"""Collect class-wise sound event location information in segments of length 1s (according to DCASE2022) from reference dataset
Reference:
https://github.com/sharathadavanne/seld-dcase2022/blob/main/cls_feature_class.py
Args:
label_dict: Dictionary containing frame-wise sound event time and location information. Dcase format.
num_frames: Total number of frames in the recording.
label_resolution: Groundtruth label resolution.
Output:
output_dict: Dictionary containing class-wise sound event location information in each segment of audio
dictionary_name[segment-index][class-index] = list(frame-cnt-within-segment, azimuth in degree, elevation in degree)
"""
num_label_frames_1s = int(1 / label_resolution)
num_blocks = int(np.ceil(num_frames / float(num_label_frames_1s)))
output_dict = {x: {} for x in range(num_blocks)}
for n_frame in range(0, num_frames, num_label_frames_1s):
# Collect class-wise information for each block
# [class][frame] = <list of doa values>
# Data structure supports multi-instance occurence of same class
n_block = n_frame // num_label_frames_1s
loc_dict = {}
for audio_frame in range(n_frame, n_frame + num_label_frames_1s):
if audio_frame not in label_dict:
continue
for value in label_dict[audio_frame]:
if value[0] not in loc_dict:
loc_dict[value[0]] = {}
block_frame = audio_frame - n_frame
if block_frame not in loc_dict[value[0]]:
loc_dict[value[0]][block_frame] = []
loc_dict[value[0]][block_frame].append(value[1:])
# Update the block wise details collected above in a global structure
for n_class in loc_dict:
if n_class not in output_dict[n_block]:
output_dict[n_block][n_class] = []
keys = [k for k in loc_dict[n_class]]
values = [loc_dict[n_class][k] for k in loc_dict[n_class]]
output_dict[n_block][n_class].append([keys, values])
return output_dict
def track_to_dcase_format(sed_labels, doa_labels):
"""Convert sed and doa labels from track-wise output format to dcase output format
Args:
sed_labels: SED labels, (num_frames, num_tracks=3, logits_events=13 (number of classes))
doa_labels: DOA labels, (num_frames, num_tracks=3, logits_degrees=2 (azi in radiance, ele in radiance))
Output:
output_dict: return a dict containing dcase output format
output_dict[frame-containing-events] = [[class_index_1, azi_1 in degree, ele_1 in degree], [class_index_2, azi_2 in degree, ele_2 in degree]]
"""
frame_size, num_tracks, num_classes= sed_labels.shape
output_dict = {}
for n_idx in range(frame_size):
for n_track in range(num_tracks):
class_index = list(np.where(sed_labels[n_idx, n_track, :])[0])
assert len(class_index) <= 1, 'class_index should be smaller or equal to 1!!\n'
if class_index:
event_doa = [class_index[0], int(np.around(doa_labels[n_idx, n_track, 0] * 180 / np.pi)), \
int(np.around(doa_labels[n_idx, n_track, 1] * 180 / np.pi))] # NOTE: this is in degree
if n_idx not in output_dict:
output_dict[n_idx] = []
output_dict[n_idx].append(event_doa)
return output_dict
def convert_output_format_polar_to_cartesian(in_dict):
out_dict = {}
for frame_cnt in in_dict.keys():
if frame_cnt not in out_dict:
out_dict[frame_cnt] = []
for tmp_val in in_dict[frame_cnt]:
ele_rad = tmp_val[2]*np.pi/180.
azi_rad = tmp_val[1]*np.pi/180
tmp_label = np.cos(ele_rad)
x = np.cos(azi_rad) * tmp_label
y = np.sin(azi_rad) * tmp_label
z = np.sin(ele_rad)
out_dict[frame_cnt].append([tmp_val[0], x, y, z])
return out_dict
def convert_output_format_cartesian_to_polar(in_dict):
out_dict = {}
for frame_cnt in in_dict.keys():
if frame_cnt not in out_dict:
out_dict[frame_cnt] = []
for tmp_val in in_dict[frame_cnt]:
x, y, z = tmp_val[1], tmp_val[2], tmp_val[3]
# in degrees
azimuth = np.arctan2(y, x) * 180 / np.pi
elevation = np.arctan2(z, np.sqrt(x**2 + y**2)) * 180 / np.pi
r = np.sqrt(x**2 + y**2 + z**2)
out_dict[frame_cnt].append([tmp_val[0], azimuth, elevation])
return out_dict
def distance_between_cartesian_coordinates(x1, y1, z1, x2, y2, z2):
"""
Angular distance between two cartesian coordinates
MORE: https://en.wikipedia.org/wiki/Great-circle_distance
Check 'From chord length' section
:return: angular distance in degrees
"""
# Normalize the Cartesian vectors
N1 = np.sqrt(x1**2 + y1**2 + z1**2 + 1e-10)
N2 = np.sqrt(x2**2 + y2**2 + z2**2 + 1e-10)
x1, y1, z1, x2, y2, z2 = x1/N1, y1/N1, z1/N1, x2/N2, y2/N2, z2/N2
#Compute the distance
dist = x1*x2 + y1*y2 + z1*z2
dist = np.clip(dist, -1, 1)
dist = np.arccos(dist) * 180 / np.pi
return dist
########################################
########## multi-accdoa
########################################
def get_multi_accdoa_labels(accdoa_in, nb_classes=13):
"""
Args:
accdoa_in: [batch_size, frames, num_track*num_axis*num_class=3*3*13]
nb_classes: scalar
Return:
sed: [num_track, batch_size, frames, num_class=13]
doa: [num_track, batch_size, frames, num_axis*num_class=3*13]
"""
x0, y0, z0 = accdoa_in[:, :, :1*nb_classes], accdoa_in[:, :, 1*nb_classes:2*nb_classes], accdoa_in[:, :, 2*nb_classes:3*nb_classes]
sed0 = np.sqrt(x0**2 + y0**2 + z0**2) > 0.5
doa0 = accdoa_in[:, :, :3*nb_classes]
x1, y1, z1 = accdoa_in[:, :, 3*nb_classes:4*nb_classes], accdoa_in[:, :, 4*nb_classes:5*nb_classes], accdoa_in[:, :, 5*nb_classes:6*nb_classes]
sed1 = np.sqrt(x1**2 + y1**2 + z1**2) > 0.5
doa1 = accdoa_in[:, :, 3*nb_classes: 6*nb_classes]
x2, y2, z2 = accdoa_in[:, :, 6*nb_classes:7*nb_classes], accdoa_in[:, :, 7*nb_classes:8*nb_classes], accdoa_in[:, :, 8*nb_classes:]
sed2 = np.sqrt(x2**2 + y2**2 + z2**2) > 0.5
doa2 = accdoa_in[:, :, 6*nb_classes:]
sed = np.stack((sed0, sed1, sed2), axis=0)
doa = np.stack((doa0, doa1, doa2), axis=0)
return sed, doa
def determine_similar_location(sed_pred0, sed_pred1, doa_pred0, doa_pred1, class_cnt, thresh_unify, nb_classes):
if (sed_pred0 == 1) and (sed_pred1 == 1):
if distance_between_cartesian_coordinates(doa_pred0[class_cnt], doa_pred0[class_cnt+1*nb_classes], doa_pred0[class_cnt+2*nb_classes],
doa_pred1[class_cnt], doa_pred1[class_cnt+1*nb_classes], doa_pred1[class_cnt+2*nb_classes]) < thresh_unify:
return 1
else:
return 0
else:
return 0
def multi_accdoa_to_dcase_format(sed_pred, doa_pred, threshold_unify=15,nb_classes=13):
sed_pred0, sed_pred1, sed_pred2 = sed_pred
doa_pred0, doa_pred1, doa_pred2 = doa_pred
output_dict = {}
for frame_cnt in range(sed_pred0.shape[0]):
for class_cnt in range(sed_pred0.shape[1]):
# determine whether track0 is similar to track1
flag_0sim1 = determine_similar_location(sed_pred0[frame_cnt][class_cnt], sed_pred1[frame_cnt][class_cnt], \
doa_pred0[frame_cnt], doa_pred1[frame_cnt], class_cnt, threshold_unify, nb_classes)
flag_1sim2 = determine_similar_location(sed_pred1[frame_cnt][class_cnt], sed_pred2[frame_cnt][class_cnt], \
doa_pred1[frame_cnt], doa_pred2[frame_cnt], class_cnt, threshold_unify, nb_classes)
flag_2sim0 = determine_similar_location(sed_pred2[frame_cnt][class_cnt], sed_pred0[frame_cnt][class_cnt], \
doa_pred2[frame_cnt], doa_pred0[frame_cnt], class_cnt, threshold_unify, nb_classes)
# unify or not unify according to flag
if flag_0sim1 + flag_1sim2 + flag_2sim0 == 0:
if sed_pred0[frame_cnt][class_cnt]>0.5:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
output_dict[frame_cnt].append([class_cnt, doa_pred0[frame_cnt][class_cnt], \
doa_pred0[frame_cnt][class_cnt+nb_classes], doa_pred0[frame_cnt][class_cnt+2*nb_classes]])
if sed_pred1[frame_cnt][class_cnt]>0.5:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
output_dict[frame_cnt].append([class_cnt, doa_pred1[frame_cnt][class_cnt], \
doa_pred1[frame_cnt][class_cnt+nb_classes], doa_pred1[frame_cnt][class_cnt+2*nb_classes]])
if sed_pred2[frame_cnt][class_cnt]>0.5:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
output_dict[frame_cnt].append([class_cnt, doa_pred2[frame_cnt][class_cnt], \
doa_pred2[frame_cnt][class_cnt+nb_classes], doa_pred2[frame_cnt][class_cnt+2*nb_classes]])
elif flag_0sim1 + flag_1sim2 + flag_2sim0 == 1:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
if flag_0sim1:
if sed_pred2[frame_cnt][class_cnt]>0.5:
output_dict[frame_cnt].append([class_cnt, doa_pred2[frame_cnt][class_cnt], \
doa_pred2[frame_cnt][class_cnt+nb_classes], doa_pred2[frame_cnt][class_cnt+2*nb_classes]])
doa_pred_fc = (doa_pred0[frame_cnt] + doa_pred1[frame_cnt]) / 2
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], \
doa_pred_fc[class_cnt+nb_classes], doa_pred_fc[class_cnt+2*nb_classes]])
elif flag_1sim2:
if sed_pred0[frame_cnt][class_cnt]>0.5:
output_dict[frame_cnt].append([class_cnt, doa_pred0[frame_cnt][class_cnt], \
doa_pred0[frame_cnt][class_cnt+nb_classes], doa_pred0[frame_cnt][class_cnt+2*nb_classes]])
doa_pred_fc = (doa_pred1[frame_cnt] + doa_pred2[frame_cnt]) / 2
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], \
doa_pred_fc[class_cnt+nb_classes], doa_pred_fc[class_cnt+2*nb_classes]])
elif flag_2sim0:
if sed_pred1[frame_cnt][class_cnt]>0.5:
output_dict[frame_cnt].append([class_cnt, doa_pred1[frame_cnt][class_cnt], \
doa_pred1[frame_cnt][class_cnt+nb_classes], doa_pred1[frame_cnt][class_cnt+2*nb_classes]])
doa_pred_fc = (doa_pred2[frame_cnt] + doa_pred0[frame_cnt]) / 2
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], \
doa_pred_fc[class_cnt+nb_classes], doa_pred_fc[class_cnt+2*nb_classes]])
elif flag_0sim1 + flag_1sim2 + flag_2sim0 >= 2:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
doa_pred_fc = (doa_pred0[frame_cnt] + doa_pred1[frame_cnt] + doa_pred2[frame_cnt]) / 3
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], \
doa_pred_fc[class_cnt+nb_classes], doa_pred_fc[class_cnt+2*nb_classes]])
return output_dict
| 15,990 | 46.734328 | 157 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/conformer/embedding.py | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference: https://github.com/sooftware/conformer
import math
import torch
import torch.nn as nn
from torch import Tensor
class PositionalEncoding(nn.Module):
"""
Positional Encoding proposed in "Attention Is All You Need".
Since transformer contains no recurrence and no convolution, in order for the model to make
use of the order of the sequence, we must add some positional information.
"Attention Is All You Need" use sine and cosine functions of different frequencies:
PE_(pos, 2i) = sin(pos / power(10000, 2i / d_model))
PE_(pos, 2i+1) = cos(pos / power(10000, 2i / d_model))
"""
def __init__(self, d_model: int = 512, max_len: int = 10000) -> None:
super(PositionalEncoding, self).__init__()
pe = torch.zeros(max_len, d_model, requires_grad=False)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, length: int) -> Tensor:
return self.pe[:, :length] | 1,861 | 41.318182 | 98 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/conformer/activation.py | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference: https://github.com/sooftware/conformer
import torch.nn as nn
from torch import Tensor
class Swish(nn.Module):
"""
Swish is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks applied
to a variety of challenging domains such as Image classification and Machine translation.
"""
def __init__(self):
super(Swish, self).__init__()
def forward(self, inputs: Tensor) -> Tensor:
return inputs * inputs.sigmoid()
class GLU(nn.Module):
"""
The gating mechanism is called Gated Linear Units (GLU), which was first introduced for natural language processing
in the paper “Language Modeling with Gated Convolutional Networks”
"""
def __init__(self, dim: int) -> None:
super(GLU, self).__init__()
self.dim = dim
def forward(self, inputs: Tensor) -> Tensor:
outputs, gate = inputs.chunk(2, dim=self.dim)
return outputs * gate.sigmoid()
| 1,588 | 35.113636 | 119 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/conformer/modules.py | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference: https://github.com/sooftware/conformer
import torch
import torch.nn as nn
import torch.nn.init as init
from torch import Tensor
class ResidualConnectionModule(nn.Module):
"""
Residual Connection Module.
outputs = (module(inputs) x module_factor + inputs x input_factor)
"""
def __init__(self, module: nn.Module, module_factor: float = 1.0, input_factor: float = 1.0):
super(ResidualConnectionModule, self).__init__()
self.module = module
self.module_factor = module_factor
self.input_factor = input_factor
def forward(self, inputs: Tensor) -> Tensor:
return (self.module(inputs) * self.module_factor) + (inputs * self.input_factor)
class Linear(nn.Module):
"""
Wrapper class of torch.nn.Linear
Weight initialize by xavier initialization and bias initialize to zeros.
"""
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super(Linear, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
init.xavier_uniform_(self.linear.weight)
if bias:
init.zeros_(self.linear.bias)
def forward(self, x: Tensor) -> Tensor:
return self.linear(x)
class View(nn.Module):
""" Wrapper class of torch.view() for Sequential module. """
def __init__(self, shape: tuple, contiguous: bool = False):
super(View, self).__init__()
self.shape = shape
self.contiguous = contiguous
def forward(self, x: Tensor) -> Tensor:
if self.contiguous:
x = x.contiguous()
return x.view(*self.shape)
class Transpose(nn.Module):
""" Wrapper class of torch.transpose() for Sequential module. """
def __init__(self, shape: tuple):
super(Transpose, self).__init__()
self.shape = shape
def forward(self, x: Tensor) -> Tensor:
return x.transpose(*self.shape)
| 2,540 | 32.434211 | 97 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/conformer/model.py | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference: https://github.com/sooftware/conformer
import torch
import torch.nn as nn
from torch import Tensor
from .decoder import DecoderRNNT
from .encoder import ConformerEncoder
from .modules import Linear
class Conformer(nn.Module):
"""
Conformer: Convolution-augmented Transformer for Speech Recognition
The paper used a one-lstm Transducer decoder, currently still only implemented
the conformer encoder shown in the paper.
Args:
num_classes (int): Number of classification classes
input_dim (int, optional): Dimension of input vector
encoder_dim (int, optional): Dimension of conformer encoder
decoder_dim (int, optional): Dimension of conformer decoder
num_encoder_layers (int, optional): Number of conformer blocks
num_decoder_layers (int, optional): Number of decoder layers
decoder_rnn_type (str, optional): type of RNN cell
num_attention_heads (int, optional): Number of attention heads
feed_forward_expansion_factor (int, optional): Expansion factor of feed forward module
conv_expansion_factor (int, optional): Expansion factor of conformer convolution module
feed_forward_dropout_p (float, optional): Probability of feed forward module dropout
attention_dropout_p (float, optional): Probability of attention module dropout
conv_dropout_p (float, optional): Probability of conformer convolution module dropout
decoder_dropout_p (float, optional): Probability of conformer decoder dropout
conv_kernel_size (int or tuple, optional): Size of the convolving kernel
half_step_residual (bool): Flag indication whether to use half step residual or not
Inputs: inputs
- **inputs** (batch, time, dim): Tensor containing input vector
- **input_lengths** (batch): list of sequence input lengths
Returns: outputs, output_lengths
- **outputs** (batch, out_channels, time): Tensor produces by conformer.
- **output_lengths** (batch): list of sequence output lengths
"""
def __init__(
self,
num_classes: int,
input_dim: int = 80,
encoder_dim: int = 512,
decoder_dim: int = 640,
num_encoder_layers: int = 17,
num_decoder_layers: int = 1,
num_attention_heads: int = 8,
feed_forward_expansion_factor: int = 4,
conv_expansion_factor: int = 2,
input_dropout_p: float = 0.1,
feed_forward_dropout_p: float = 0.1,
attention_dropout_p: float = 0.1,
conv_dropout_p: float = 0.1,
decoder_dropout_p: float = 0.1,
conv_kernel_size: int = 31,
half_step_residual: bool = True,
decoder_rnn_type: str = "lstm",
) -> None:
super(Conformer, self).__init__()
self.encoder = ConformerEncoder(
input_dim=input_dim,
encoder_dim=encoder_dim,
num_layers=num_encoder_layers,
num_attention_heads=num_attention_heads,
feed_forward_expansion_factor=feed_forward_expansion_factor,
conv_expansion_factor=conv_expansion_factor,
input_dropout_p=input_dropout_p,
feed_forward_dropout_p=feed_forward_dropout_p,
attention_dropout_p=attention_dropout_p,
conv_dropout_p=conv_dropout_p,
conv_kernel_size=conv_kernel_size,
half_step_residual=half_step_residual,
)
self.decoder = DecoderRNNT(
num_classes=num_classes,
hidden_state_dim=decoder_dim,
output_dim=encoder_dim,
num_layers=num_decoder_layers,
rnn_type=decoder_rnn_type,
dropout_p=decoder_dropout_p,
)
self.fc = Linear(encoder_dim << 1, num_classes, bias=False)
def set_encoder(self, encoder):
""" Setter for encoder """
self.encoder = encoder
def set_decoder(self, decoder):
""" Setter for decoder """
self.decoder = decoder
def count_parameters(self) -> int:
""" Count parameters of encoder """
num_encoder_parameters = self.encoder.count_parameters()
num_decoder_parameters = self.decoder.count_parameters()
return num_encoder_parameters + num_decoder_parameters
def update_dropout(self, dropout_p) -> None:
""" Update dropout probability of model """
self.encoder.update_dropout(dropout_p)
self.decoder.update_dropout(dropout_p)
def joint(self, encoder_outputs: Tensor, decoder_outputs: Tensor) -> Tensor:
"""
Joint `encoder_outputs` and `decoder_outputs`.
Args:
encoder_outputs (torch.FloatTensor): A output sequence of encoder. `FloatTensor` of size
``(batch, seq_length, dimension)``
decoder_outputs (torch.FloatTensor): A output sequence of decoder. `FloatTensor` of size
``(batch, seq_length, dimension)``
Returns:
* outputs (torch.FloatTensor): outputs of joint `encoder_outputs` and `decoder_outputs`..
"""
if encoder_outputs.dim() == 3 and decoder_outputs.dim() == 3:
input_length = encoder_outputs.size(1)
target_length = decoder_outputs.size(1)
encoder_outputs = encoder_outputs.unsqueeze(2)
decoder_outputs = decoder_outputs.unsqueeze(1)
encoder_outputs = encoder_outputs.repeat([1, 1, target_length, 1])
decoder_outputs = decoder_outputs.repeat([1, input_length, 1, 1])
outputs = torch.cat((encoder_outputs, decoder_outputs), dim=-1)
outputs = self.fc(outputs)
return outputs
def forward(
self,
inputs: Tensor,
input_lengths: Tensor,
targets: Tensor,
target_lengths: Tensor
) -> Tensor:
"""
Forward propagate a `inputs` and `targets` pair for training.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoder. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
targets (torch.LongTensr): A target sequence passed to decoder. `IntTensor` of size ``(batch, seq_length)``
target_lengths (torch.LongTensor): The length of target tensor. ``(batch)``
Returns:
* predictions (torch.FloatTensor): Result of model predictions.
"""
encoder_outputs, _ = self.encoder(inputs, input_lengths)
decoder_outputs, _ = self.decoder(targets, target_lengths)
outputs = self.joint(encoder_outputs, decoder_outputs)
return outputs
@torch.no_grad()
def decode(self, encoder_output: Tensor, max_length: int) -> Tensor:
"""
Decode `encoder_outputs`.
Args:
encoder_output (torch.FloatTensor): A output sequence of encoder. `FloatTensor` of size
``(seq_length, dimension)``
max_length (int): max decoding time step
Returns:
* predicted_log_probs (torch.FloatTensor): Log probability of model predictions.
"""
pred_tokens, hidden_state = list(), None
decoder_input = encoder_output.new_tensor([[self.decoder.sos_id]], dtype=torch.long)
for t in range(max_length):
decoder_output, hidden_state = self.decoder(decoder_input, hidden_states=hidden_state)
step_output = self.joint(encoder_output[t].view(-1), decoder_output.view(-1))
step_output = step_output.softmax(dim=0)
pred_token = step_output.argmax(dim=0)
pred_token = int(pred_token.item())
pred_tokens.append(pred_token)
decoder_input = step_output.new_tensor([[pred_token]], dtype=torch.long)
return torch.LongTensor(pred_tokens)
@torch.no_grad()
def recognize(self, inputs: Tensor, input_lengths: Tensor):
"""
Recognize input speech. This method consists of the forward of the encoder and the decode() of the decoder.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoder. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
* predictions (torch.FloatTensor): Result of model predictions.
"""
outputs = list()
encoder_outputs, output_lengths = self.encoder(inputs, input_lengths)
max_length = encoder_outputs.size(1)
for encoder_output in encoder_outputs:
decoded_seq = self.decode(encoder_output, max_length)
outputs.append(decoded_seq)
outputs = torch.stack(outputs, dim=1).transpose(0, 1)
return outputs
| 9,594 | 41.268722 | 119 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/conformer/encoder.py | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference: https://github.com/sooftware/conformer
import torch
import torch.nn as nn
from torch import Tensor
from typing import Tuple
from .feed_forward import FeedForwardModule
from .attention import MultiHeadedSelfAttentionModule
from .convolution import (
ConformerConvModule,
Conv2dSubampling,
)
from .modules import (
ResidualConnectionModule,
Linear,
)
class ConformerBlock(nn.Module):
"""
Conformer block contains two Feed Forward modules sandwiching the Multi-Headed Self-Attention module
and the Convolution module. This sandwich structure is inspired by Macaron-Net, which proposes replacing
the original feed-forward layer in the Transformer block into two half-step feed-forward layers,
one before the attention layer and one after.
Args:
encoder_dim (int, optional): Dimension of conformer encoder
num_attention_heads (int, optional): Number of attention heads
feed_forward_expansion_factor (int, optional): Expansion factor of feed forward module
conv_expansion_factor (int, optional): Expansion factor of conformer convolution module
feed_forward_dropout_p (float, optional): Probability of feed forward module dropout
attention_dropout_p (float, optional): Probability of attention module dropout
conv_dropout_p (float, optional): Probability of conformer convolution module dropout
conv_kernel_size (int or tuple, optional): Size of the convolving kernel
half_step_residual (bool): Flag indication whether to use half step residual or not
Inputs: inputs
- **inputs** (batch, time, dim): Tensor containing input vector
Returns: outputs
- **outputs** (batch, time, dim): Tensor produces by conformer block.
"""
def __init__(
self,
encoder_dim: int = 512,
num_attention_heads: int = 8,
feed_forward_expansion_factor: int = 4,
conv_expansion_factor: int = 2,
feed_forward_dropout_p: float = 0.1,
attention_dropout_p: float = 0.1,
conv_dropout_p: float = 0.1,
conv_kernel_size: int = 31,
half_step_residual: bool = True,
):
super(ConformerBlock, self).__init__()
if half_step_residual:
self.feed_forward_residual_factor = 0.5
else:
self.feed_forward_residual_factor = 1
self.sequential = nn.Sequential(
ResidualConnectionModule(
module=FeedForwardModule(
encoder_dim=encoder_dim,
expansion_factor=feed_forward_expansion_factor,
dropout_p=feed_forward_dropout_p,
),
module_factor=self.feed_forward_residual_factor,
),
ResidualConnectionModule(
module=MultiHeadedSelfAttentionModule(
d_model=encoder_dim,
num_heads=num_attention_heads,
dropout_p=attention_dropout_p,
),
),
ResidualConnectionModule(
module=ConformerConvModule(
in_channels=encoder_dim,
kernel_size=conv_kernel_size,
expansion_factor=conv_expansion_factor,
dropout_p=conv_dropout_p,
),
),
ResidualConnectionModule(
module=FeedForwardModule(
encoder_dim=encoder_dim,
expansion_factor=feed_forward_expansion_factor,
dropout_p=feed_forward_dropout_p,
),
module_factor=self.feed_forward_residual_factor,
),
nn.LayerNorm(encoder_dim),
)
def forward(self, inputs: Tensor) -> Tensor:
return self.sequential(inputs)
class ConformerEncoder(nn.Module):
"""
Conformer encoder first processes the input with a convolution subsampling layer and then
with a number of conformer blocks.
Args:
input_dim (int, optional): Dimension of input vector
encoder_dim (int, optional): Dimension of conformer encoder
num_layers (int, optional): Number of conformer blocks
num_attention_heads (int, optional): Number of attention heads
feed_forward_expansion_factor (int, optional): Expansion factor of feed forward module
conv_expansion_factor (int, optional): Expansion factor of conformer convolution module
feed_forward_dropout_p (float, optional): Probability of feed forward module dropout
attention_dropout_p (float, optional): Probability of attention module dropout
conv_dropout_p (float, optional): Probability of conformer convolution module dropout
conv_kernel_size (int or tuple, optional): Size of the convolving kernel
half_step_residual (bool): Flag indication whether to use half step residual or not
Inputs: inputs, input_lengths
- **inputs** (batch, time, dim): Tensor containing input vector
- **input_lengths** (batch): list of sequence input lengths
Returns: outputs, output_lengths
- **outputs** (batch, out_channels, time): Tensor produces by conformer encoder.
- **output_lengths** (batch): list of sequence output lengths
"""
def __init__(
self,
input_dim: int = 80,
encoder_dim: int = 512,
num_layers: int = 17,
num_attention_heads: int = 8,
feed_forward_expansion_factor: int = 4,
conv_expansion_factor: int = 2,
input_dropout_p: float = 0.1,
feed_forward_dropout_p: float = 0.1,
attention_dropout_p: float = 0.1,
conv_dropout_p: float = 0.1,
conv_kernel_size: int = 31,
half_step_residual: bool = True,
):
super(ConformerEncoder, self).__init__()
self.conv_subsample = Conv2dSubampling(in_channels=1, out_channels=encoder_dim)
self.input_projection = nn.Sequential(
Linear(encoder_dim * (((input_dim - 1) // 2 - 1) // 2), encoder_dim),
nn.Dropout(p=input_dropout_p),
)
self.layers = nn.ModuleList([ConformerBlock(
encoder_dim=encoder_dim,
num_attention_heads=num_attention_heads,
feed_forward_expansion_factor=feed_forward_expansion_factor,
conv_expansion_factor=conv_expansion_factor,
feed_forward_dropout_p=feed_forward_dropout_p,
attention_dropout_p=attention_dropout_p,
conv_dropout_p=conv_dropout_p,
conv_kernel_size=conv_kernel_size,
half_step_residual=half_step_residual,
) for _ in range(num_layers)])
def count_parameters(self) -> int:
""" Count parameters of encoder """
return sum([p.numel for p in self.parameters()])
def update_dropout(self, dropout_p: float) -> None:
""" Update dropout probability of encoder """
for name, child in self.named_children():
if isinstance(child, nn.Dropout):
child.p = dropout_p
def forward(self, inputs: Tensor, input_lengths: Tensor) -> Tuple[Tensor, Tensor]:
"""
Forward propagate a `inputs` for encoder training.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoder. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
(Tensor, Tensor)
* outputs (torch.FloatTensor): A output sequence of encoder. `FloatTensor` of size
``(batch, seq_length, dimension)``
* output_lengths (torch.LongTensor): The length of output tensor. ``(batch)``
"""
outputs, output_lengths = self.conv_subsample(inputs, input_lengths)
outputs = self.input_projection(outputs)
for layer in self.layers:
outputs = layer(outputs)
return outputs, output_lengths
class ConformerBlocks(nn.Module):
def __init__(
self,
encoder_dim: int = 512,
num_attention_heads: int = 8,
feed_forward_expansion_factor: int = 4,
conv_expansion_factor: int = 2,
feed_forward_dropout_p: float = 0.1,
attention_dropout_p: float = 0.1,
conv_dropout_p: float = 0.1,
conv_kernel_size: int = 31,
half_step_residual: bool = True,
num_layers: int = 2
):
super(ConformerBlocks, self).__init__()
self.layers = nn.ModuleList([ConformerBlock(
encoder_dim=encoder_dim,
num_attention_heads=num_attention_heads,
feed_forward_expansion_factor=feed_forward_expansion_factor,
conv_expansion_factor=conv_expansion_factor,
feed_forward_dropout_p=feed_forward_dropout_p,
attention_dropout_p=attention_dropout_p,
conv_dropout_p=conv_dropout_p,
conv_kernel_size=conv_kernel_size,
half_step_residual=half_step_residual,
) for _ in range(num_layers)])
def forward(self, inputs: Tensor) -> Tensor:
for layer in self.layers:
inputs = layer(inputs)
return inputs
| 10,007 | 40.7 | 118 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/conformer/convolution.py | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference: https://github.com/sooftware/conformer
import torch
import torch.nn as nn
from torch import Tensor
from typing import Tuple
from .activation import Swish, GLU
from .modules import Transpose
class DepthwiseConv1d(nn.Module):
"""
When groups == in_channels and out_channels == K * in_channels, where K is a positive integer,
this operation is termed in literature as depthwise convolution.
Args:
in_channels (int): Number of channels in the input
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
bias (bool, optional): If True, adds a learnable bias to the output. Default: True
Inputs: inputs
- **inputs** (batch, in_channels, time): Tensor containing input vector
Returns: outputs
- **outputs** (batch, out_channels, time): Tensor produces by depthwise 1-D convolution.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
bias: bool = False,
) -> None:
super(DepthwiseConv1d, self).__init__()
assert out_channels % in_channels == 0, "out_channels should be constant multiple of in_channels"
self.conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
groups=in_channels,
stride=stride,
padding=padding,
bias=bias,
)
def forward(self, inputs: Tensor) -> Tensor:
return self.conv(inputs)
class PointwiseConv1d(nn.Module):
"""
When kernel size == 1 conv1d, this operation is termed in literature as pointwise convolution.
This operation often used to match dimensions.
Args:
in_channels (int): Number of channels in the input
out_channels (int): Number of channels produced by the convolution
stride (int, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
bias (bool, optional): If True, adds a learnable bias to the output. Default: True
Inputs: inputs
- **inputs** (batch, in_channels, time): Tensor containing input vector
Returns: outputs
- **outputs** (batch, out_channels, time): Tensor produces by pointwise 1-D convolution.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
stride: int = 1,
padding: int = 0,
bias: bool = True,
) -> None:
super(PointwiseConv1d, self).__init__()
self.conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
bias=bias,
)
def forward(self, inputs: Tensor) -> Tensor:
return self.conv(inputs)
class ConformerConvModule(nn.Module):
"""
Conformer convolution module starts with a pointwise convolution and a gated linear unit (GLU).
This is followed by a single 1-D depthwise convolution layer. Batchnorm is deployed just after the convolution
to aid training deep models.
Args:
in_channels (int): Number of channels in the input
kernel_size (int or tuple, optional): Size of the convolving kernel Default: 31
dropout_p (float, optional): probability of dropout
Inputs: inputs
inputs (batch, time, dim): Tensor contains input sequences
Outputs: outputs
outputs (batch, time, dim): Tensor produces by conformer convolution module.
"""
def __init__(
self,
in_channels: int,
kernel_size: int = 31,
expansion_factor: int = 2,
dropout_p: float = 0.1,
) -> None:
super(ConformerConvModule, self).__init__()
assert (kernel_size - 1) % 2 == 0, "kernel_size should be a odd number for 'SAME' padding"
assert expansion_factor == 2, "Currently, Only Supports expansion_factor 2"
self.sequential = nn.Sequential(
nn.LayerNorm(in_channels),
Transpose(shape=(1, 2)),
PointwiseConv1d(in_channels, in_channels * expansion_factor, stride=1, padding=0, bias=True),
GLU(dim=1),
DepthwiseConv1d(in_channels, in_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2),
nn.BatchNorm1d(in_channels),
Swish(),
PointwiseConv1d(in_channels, in_channels, stride=1, padding=0, bias=True),
nn.Dropout(p=dropout_p),
)
def forward(self, inputs: Tensor) -> Tensor:
return self.sequential(inputs).transpose(1, 2)
class Conv2dSubampling(nn.Module):
"""
Convolutional 2D subsampling (to 1/4 length)
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
Inputs: inputs
- **inputs** (batch, time, dim): Tensor containing sequence of inputs
Returns: outputs, output_lengths
- **outputs** (batch, time, dim): Tensor produced by the convolution
- **output_lengths** (batch): list of sequence output lengths
"""
def __init__(self, in_channels: int, out_channels: int) -> None:
super(Conv2dSubampling, self).__init__()
self.sequential = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=2),
nn.ReLU(),
)
def forward(self, inputs: Tensor, input_lengths: Tensor) -> Tuple[Tensor, Tensor]:
outputs = self.sequential(inputs.unsqueeze(1))
batch_size, channels, subsampled_lengths, sumsampled_dim = outputs.size()
outputs = outputs.permute(0, 2, 1, 3)
outputs = outputs.contiguous().view(batch_size, subsampled_lengths, channels * sumsampled_dim)
output_lengths = input_lengths >> 2
output_lengths -= 1
return outputs, output_lengths
| 7,029 | 36.195767 | 115 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/conformer/feed_forward.py | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference: https://github.com/sooftware/conformer
import torch
import torch.nn as nn
from torch import Tensor
from .activation import Swish
from .modules import Linear
class FeedForwardModule(nn.Module):
"""
Conformer Feed Forward Module follow pre-norm residual units and apply layer normalization within the residual unit
and on the input before the first linear layer. This module also apply Swish activation and dropout, which helps
regularizing the network.
Args:
encoder_dim (int): Dimension of conformer encoder
expansion_factor (int): Expansion factor of feed forward module.
dropout_p (float): Ratio of dropout
Inputs: inputs
- **inputs** (batch, time, dim): Tensor contains input sequences
Outputs: outputs
- **outputs** (batch, time, dim): Tensor produces by feed forward module.
"""
def __init__(
self,
encoder_dim: int = 512,
expansion_factor: int = 4,
dropout_p: float = 0.1,
) -> None:
super(FeedForwardModule, self).__init__()
self.sequential = nn.Sequential(
nn.LayerNorm(encoder_dim),
Linear(encoder_dim, encoder_dim * expansion_factor, bias=True),
Swish(),
nn.Dropout(p=dropout_p),
Linear(encoder_dim * expansion_factor, encoder_dim, bias=True),
nn.Dropout(p=dropout_p),
)
def forward(self, inputs: Tensor) -> Tensor:
return self.sequential(inputs)
| 2,117 | 34.3 | 119 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/conformer/decoder.py | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference: https://github.com/sooftware/conformer
import torch.nn as nn
from torch import Tensor
from typing import Tuple
from .modules import Linear
class DecoderRNNT(nn.Module):
"""
Decoder of RNN-Transducer
Args:
num_classes (int): number of classification
hidden_state_dim (int, optional): hidden state dimension of decoder (default: 512)
output_dim (int, optional): output dimension of encoder and decoder (default: 512)
num_layers (int, optional): number of decoder layers (default: 1)
rnn_type (str, optional): type of rnn cell (default: lstm)
sos_id (int, optional): start of sentence identification
eos_id (int, optional): end of sentence identification
dropout_p (float, optional): dropout probability of decoder
Inputs: inputs, input_lengths
inputs (torch.LongTensor): A target sequence passed to decoder. `IntTensor` of size ``(batch, seq_length)``
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
hidden_states (torch.FloatTensor): A previous hidden state of decoder. `FloatTensor` of size
``(batch, seq_length, dimension)``
Returns:
(Tensor, Tensor):
* decoder_outputs (torch.FloatTensor): A output sequence of decoder. `FloatTensor` of size
``(batch, seq_length, dimension)``
* hidden_states (torch.FloatTensor): A hidden state of decoder. `FloatTensor` of size
``(batch, seq_length, dimension)``
"""
supported_rnns = {
'lstm': nn.LSTM,
'gru': nn.GRU,
'rnn': nn.RNN,
}
def __init__(
self,
num_classes: int,
hidden_state_dim: int,
output_dim: int,
num_layers: int,
rnn_type: str = 'lstm',
sos_id: int = 1,
eos_id: int = 2,
dropout_p: float = 0.2,
):
super(DecoderRNNT, self).__init__()
self.hidden_state_dim = hidden_state_dim
self.sos_id = sos_id
self.eos_id = eos_id
self.embedding = nn.Embedding(num_classes, hidden_state_dim)
rnn_cell = self.supported_rnns[rnn_type.lower()]
self.rnn = rnn_cell(
input_size=hidden_state_dim,
hidden_size=hidden_state_dim,
num_layers=num_layers,
bias=True,
batch_first=True,
dropout=dropout_p,
bidirectional=False,
)
self.out_proj = Linear(hidden_state_dim, output_dim)
def count_parameters(self) -> int:
""" Count parameters of encoder """
return sum([p.numel for p in self.parameters()])
def update_dropout(self, dropout_p: float) -> None:
""" Update dropout probability of encoder """
for name, child in self.named_children():
if isinstance(child, nn.Dropout):
child.p = dropout_p
def forward(
self,
inputs: Tensor,
input_lengths: Tensor = None,
hidden_states: Tensor = None,
) -> Tuple[Tensor, Tensor]:
"""
Forward propage a `inputs` (targets) for training.
Args:
inputs (torch.LongTensor): A target sequence passed to decoder. `IntTensor` of size ``(batch, seq_length)``
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
hidden_states (torch.FloatTensor): A previous hidden state of decoder. `FloatTensor` of size
``(batch, seq_length, dimension)``
Returns:
(Tensor, Tensor):
* decoder_outputs (torch.FloatTensor): A output sequence of decoder. `FloatTensor` of size
``(batch, seq_length, dimension)``
* hidden_states (torch.FloatTensor): A hidden state of decoder. `FloatTensor` of size
``(batch, seq_length, dimension)``
"""
embedded = self.embedding(inputs)
if input_lengths is not None:
embedded = nn.utils.rnn.pack_padded_sequence(embedded.transpose(0, 1), input_lengths.cpu())
outputs, hidden_states = self.rnn(embedded, hidden_states)
outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs)
outputs = self.out_proj(outputs.transpose(0, 1))
else:
outputs, hidden_states = self.rnn(embedded, hidden_states)
outputs = self.out_proj(outputs)
return outputs, hidden_states
| 5,064 | 37.664122 | 119 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/conformer/__init__.py | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from methods.ein_seld.models.conformer.model import Conformer
| 667 | 40.75 | 74 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/conformer/attention.py | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference: https://github.com/sooftware/conformer
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import Optional
from .embedding import PositionalEncoding
from .modules import Linear
class RelativeMultiHeadAttention(nn.Module):
"""
Multi-head attention with relative positional encoding.
This concept was proposed in the "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"
Args:
d_model (int): The dimension of model
num_heads (int): The number of attention heads.
dropout_p (float): probability of dropout
Inputs: query, key, value, pos_embedding, mask
- **query** (batch, time, dim): Tensor containing query vector
- **key** (batch, time, dim): Tensor containing key vector
- **value** (batch, time, dim): Tensor containing value vector
- **pos_embedding** (batch, time, dim): Positional embedding tensor
- **mask** (batch, 1, time2) or (batch, time1, time2): Tensor containing indices to be masked
Returns:
- **outputs**: Tensor produces by relative multi head attention module.
"""
def __init__(
self,
d_model: int = 512,
num_heads: int = 16,
dropout_p: float = 0.1,
):
super(RelativeMultiHeadAttention, self).__init__()
assert d_model % num_heads == 0, "d_model % num_heads should be zero."
self.d_model = d_model
self.d_head = int(d_model / num_heads)
self.num_heads = num_heads
self.sqrt_dim = math.sqrt(d_model)
self.query_proj = Linear(d_model, d_model)
self.key_proj = Linear(d_model, d_model)
self.value_proj = Linear(d_model, d_model)
self.pos_proj = Linear(d_model, d_model, bias=False)
self.dropout = nn.Dropout(p=dropout_p)
self.u_bias = nn.Parameter(torch.Tensor(self.num_heads, self.d_head))
self.v_bias = nn.Parameter(torch.Tensor(self.num_heads, self.d_head))
torch.nn.init.xavier_uniform_(self.u_bias)
torch.nn.init.xavier_uniform_(self.v_bias)
self.out_proj = Linear(d_model, d_model)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
pos_embedding: Tensor,
mask: Optional[Tensor] = None,
) -> Tensor:
batch_size = value.size(0)
query = self.query_proj(query).view(batch_size, -1, self.num_heads, self.d_head)
key = self.key_proj(key).view(batch_size, -1, self.num_heads, self.d_head).permute(0, 2, 1, 3)
value = self.value_proj(value).view(batch_size, -1, self.num_heads, self.d_head).permute(0, 2, 1, 3)
pos_embedding = self.pos_proj(pos_embedding).view(batch_size, -1, self.num_heads, self.d_head)
content_score = torch.matmul((query + self.u_bias).transpose(1, 2), key.transpose(2, 3))
pos_score = torch.matmul((query + self.v_bias).transpose(1, 2), pos_embedding.permute(0, 2, 3, 1))
pos_score = self._relative_shift(pos_score)
score = (content_score + pos_score) / self.sqrt_dim
if mask is not None:
mask = mask.unsqueeze(1)
score.masked_fill_(mask, -1e9)
attn = F.softmax(score, -1)
attn = self.dropout(attn)
context = torch.matmul(attn, value).transpose(1, 2)
context = context.contiguous().view(batch_size, -1, self.d_model)
return self.out_proj(context)
def _relative_shift(self, pos_score: Tensor) -> Tensor:
batch_size, num_heads, seq_length1, seq_length2 = pos_score.size()
zeros = pos_score.new_zeros(batch_size, num_heads, seq_length1, 1)
padded_pos_score = torch.cat([zeros, pos_score], dim=-1)
padded_pos_score = padded_pos_score.view(batch_size, num_heads, seq_length2 + 1, seq_length1)
pos_score = padded_pos_score[:, :, 1:].view_as(pos_score)
return pos_score
class MultiHeadedSelfAttentionModule(nn.Module):
"""
Conformer employ multi-headed self-attention (MHSA) while integrating an important technique from Transformer-XL,
the relative sinusoidal positional encoding scheme. The relative positional encoding allows the self-attention
module to generalize better on different input length and the resulting encoder is more robust to the variance of
the utterance length. Conformer use prenorm residual units with dropout which helps training
and regularizing deeper models.
Args:
d_model (int): The dimension of model
num_heads (int): The number of attention heads.
dropout_p (float): probability of dropout
Inputs: inputs, mask
- **inputs** (batch, time, dim): Tensor containing input vector
- **mask** (batch, 1, time2) or (batch, time1, time2): Tensor containing indices to be masked
Returns:
- **outputs** (batch, time, dim): Tensor produces by relative multi headed self attention module.
"""
def __init__(self, d_model: int, num_heads: int, dropout_p: float = 0.1):
super(MultiHeadedSelfAttentionModule, self).__init__()
self.positional_encoding = PositionalEncoding(d_model)
self.layer_norm = nn.LayerNorm(d_model)
self.attention = RelativeMultiHeadAttention(d_model, num_heads, dropout_p)
self.dropout = nn.Dropout(p=dropout_p)
def forward(self, inputs: Tensor, mask: Optional[Tensor] = None):
batch_size, seq_length, _ = inputs.size()
pos_embedding = self.positional_encoding(seq_length)
pos_embedding = pos_embedding.repeat(batch_size, 1, 1)
inputs = self.layer_norm(inputs)
outputs = self.attention(inputs, inputs, inputs, pos_embedding=pos_embedding, mask=mask)
return self.dropout(outputs)
| 6,428 | 40.746753 | 117 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/ein_seld/inference.py | from pathlib import Path
import h5py
import numpy as np
import torch
from tqdm import tqdm
from methods.inference import BaseInferer
from methods.utils.data_utilities import *
class Inferer(BaseInferer):
def __init__(self, cfg, dataset, af_extractor, model, cuda, test_set=None):
super().__init__()
self.cfg = cfg
self.af_extractor = af_extractor
self.model = model
self.cuda = cuda
self.dataset = dataset
self.paths_dict = test_set.paths_dict
# Scalar
cfg_data = cfg['data']
dataset_name = '_'.join(sorted(str(cfg['dataset_synth']).split(',')))
scalar_h5_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('data').\
joinpath('{}fs'.format(cfg_data['sample_rate'])).joinpath('scalar')
fn_scalar = '{}_nfft{}_hop{}_mel{}_{}.h5'.format(cfg['data']['audio_feature'], \
cfg_data['nfft'], cfg_data['hoplen'], cfg_data['n_mels'], dataset_name)
self.scalar_path = scalar_h5_dir.joinpath(fn_scalar)
if self.scalar_path.is_file():
print('scalar path is used!', self.scalar_path)
with h5py.File(self.scalar_path, 'r') as hf:
self.mean = hf['mean'][:]
self.std = hf['std'][:]
if self.cuda:
self.mean = torch.tensor(self.mean, dtype=torch.float32).cuda(non_blocking=True)
self.std = torch.tensor(self.std, dtype=torch.float32).cuda(non_blocking=True)
self.label_resolution = dataset.label_resolution
def infer(self, generator):
pred_sed_list, pred_doa_list = [], []
iterator = tqdm(generator)
for batch_sample in iterator:
batch_x = batch_sample['data']
if self.cuda:
batch_x = batch_x.cuda(non_blocking=True)
with torch.no_grad():
if self.af_extractor:
self.af_extractor.eval()
batch_x = self.af_extractor(batch_x)
self.model.eval()
if self.scalar_path.is_file():
batch_x = (batch_x - self.mean) / self.std
pred = self.model(batch_x)
pred['sed'] = torch.sigmoid(pred['sed'])
pred_sed_list.append(pred['sed'].cpu().detach().numpy())
pred_doa_list.append(pred['doa'].cpu().detach().numpy())
iterator.close()
pred_sed = np.concatenate(pred_sed_list, axis=0)
pred_doa = np.concatenate(pred_doa_list, axis=0)
pred_sed = pred_sed.reshape((pred_sed.shape[0] * pred_sed.shape[1], 3, -1))
pred_doa = pred_doa.reshape((pred_doa.shape[0] * pred_doa.shape[1], 3, -1))
pred = {
'sed': pred_sed,
'doa': pred_doa
}
return pred
def fusion(self, submissions_dir, predictions_dir, preds):
""" Average ensamble predictions
"""
num_preds = len(preds)
pred_sed = []
pred_doa = []
for n in range(num_preds):
pred_sed.append(preds[n]['sed'])
pred_doa.append(preds[n]['doa'])
pred_sed = np.array(pred_sed).mean(axis=0) # Ensemble
pred_doa = np.array(pred_doa).mean(axis=0) # Ensemble
prediction_path = predictions_dir.joinpath('predictions.h5')
with h5py.File(prediction_path, 'w') as hf:
hf.create_dataset(name='sed', data=pred_sed, dtype=np.float32)
hf.create_dataset(name='doa', data=pred_doa, dtype=np.float32)
N = pred_sed.shape[0]
pred_sed_max = pred_sed.max(axis=-1)
pred_sed_max_idx = pred_sed.argmax(axis=-1)
pred_sed = np.zeros_like(pred_sed)
for b_idx in range(N):
for track_idx in range(3):
pred_sed[b_idx, track_idx, pred_sed_max_idx[b_idx, track_idx]] = \
pred_sed_max[b_idx, track_idx]
pred_sed = (pred_sed > self.cfg['inference']['threshold_sed']).astype(np.float32)
# convert Catesian to Spherical
azi = np.arctan2(pred_doa[..., 1], pred_doa[..., 0])
elev = np.arctan2(pred_doa[..., 2], np.sqrt(pred_doa[..., 0]**2 + pred_doa[..., 1]**2))
pred_doa = np.stack((azi, elev), axis=-1) # (N, tracks, (azi, elev))
frame_ind = 0
for idx, path in enumerate(self.paths_dict):
loc_frames = self.paths_dict[path]
fn = path.split('/')[-1].replace('h5','csv')
num_frames = int(np.ceil(loc_frames / (self.cfg['data']['test_chunklen_sec'] / self.label_resolution)) *\
(self.cfg['data']['test_chunklen_sec'] / self.label_resolution))
pred_dcase_format = track_to_dcase_format(pred_sed[frame_ind:frame_ind+loc_frames], pred_doa[frame_ind:frame_ind+loc_frames])
csv_path = submissions_dir.joinpath(fn)
write_output_format_file(csv_path, pred_dcase_format)
frame_ind += num_frames
print('Rsults are saved to {}\n'.format(str(submissions_dir)))
| 5,046 | 40.710744 | 137 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/ein_seld/losses.py | import numpy as np
import torch
import sys
from methods.utils.loss_utilities import BCEWithLogitsLoss, MSELoss
from torch import linalg as LA
from itertools import permutations
class Losses:
def __init__(self, cfg):
self.cfg = cfg
self.beta = cfg['training']['loss_beta']
self.losses = [BCEWithLogitsLoss(reduction='mean'), MSELoss(reduction='mean')]
self.losses_pit = [BCEWithLogitsLoss(reduction='PIT'), MSELoss(reduction='PIT')]
self.names = ['loss_all'] + [loss.name for loss in self.losses]
def calculate(self, pred, target, epoch_it=0):
if 'PIT' not in self.cfg['training']['PIT_type']:
loss_sed = self.losses[0].calculate_loss(pred['sed'], target['sed'])
loss_doa = self.losses[1].calculate_loss(pred['doa'], target['doa'])
elif self.cfg['training']['PIT_type'] == 'tPIT':
loss_sed, loss_doa = self.tPIT(pred, target)
loss_all = self.beta * loss_sed + (1 - self.beta) * loss_doa
losses_dict = {
'all': loss_all,
'sed': loss_sed,
'doa': loss_doa,
}
return losses_dict
#### modify tracks
def tPIT(self, pred, target):
"""Frame Permutation Invariant Training for 6 possible combinations
Args:
pred: {
'sed': [batch_size, T, num_tracks=3, num_classes],
'doa': [batch_size, T, num_tracks=3, doas=3]
}
target: {
'sed': [batch_size, T, num_tracks=3, num_classes],
'doa': [batch_size, T, num_tracks=3, doas=3]
}
Return:
loss_sed: Find a possible permutation to get the lowest loss of sed.
loss_doa: Find a possible permutation to get the lowest loss of doa.
"""
perm_list = list(permutations(range(pred['doa'].shape[2])))
loss_sed_list = []
loss_doa_list = []
loss_list = []
loss_sed = 0
loss_doa = 0
updated_target_doa = 0
updated_target_sed = 0
for idx, perm in enumerate(perm_list):
loss_sed_list.append(self.losses_pit[0].calculate_loss(pred['sed'], target['sed'][:,:,list(perm),:]))
loss_doa_list.append(self.losses_pit[1].calculate_loss(pred['doa'], target['doa'][:,:,list(perm),:]))
loss_list.append(loss_sed_list[idx]+loss_doa_list[idx])
loss_list = torch.stack(loss_list, dim=0)
loss_idx = torch.argmin(loss_list, dim=0)
for idx, perm in enumerate(perm_list):
loss_sed += loss_sed_list[idx] * (loss_idx == idx)
loss_doa += loss_doa_list[idx] * (loss_idx == idx)
updated_target_doa += target['doa'][:, :, list(perm), :] * ((loss_idx == idx)[:, :, None, None])
updated_target_sed += target['sed'][:, :, list(perm), :] * ((loss_idx == idx)[:, :, None, None])
loss_sed = loss_sed.mean()
loss_doa = loss_doa.mean()
updated_target = {
'doa': updated_target_doa,
'sed': updated_target_sed,
}
return loss_sed, loss_doa | 3,138 | 41.418919 | 114 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/ein_seld/training.py | from pathlib import Path
import random
import sys
from timeit import default_timer as timer
import h5py
import numpy as np
import torch
from methods.training import BaseTrainer
from utils.ddp_init import reduce_value, gather_value, get_rank, get_world_size
from methods.utils.data_utilities import track_to_dcase_format, to_metrics_format
class Trainer(BaseTrainer):
def __init__(self, args, cfg, dataset, af_extractor, valid_set, model, optimizer, losses, metrics):
super().__init__()
self.cfg = cfg
self.af_extractor = af_extractor
self.model = model
self.optimizer = optimizer
self.losses = losses
self.metrics = metrics
self.cuda = args.cuda
self.max_ov = dataset.max_ov
self.rank = get_rank()
self.world_size = get_world_size()
self.label_resolution = dataset.label_resolution
# Load ground truth for dcase metrics
self.valid_paths_dict = valid_set.paths_dict
self.gt_metrics_dict = valid_set.gt_metrics_dict
self.points_per_predictions = valid_set.points_per_predictions
# Scalar
cfg_data = cfg['data']
dataset_name = '_'.join(sorted(str(cfg['dataset_synth']).split(',')))
scalar_h5_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('data').\
joinpath('{}fs'.format(cfg_data['sample_rate'])).joinpath('scalar')
fn_scalar = '{}_nfft{}_hop{}_mel{}_{}.h5'.format(cfg['data']['audio_feature'], \
cfg_data['nfft'], cfg_data['hoplen'], cfg_data['n_mels'], dataset_name)
self.scalar_path = scalar_h5_dir.joinpath(fn_scalar)
if self.scalar_path.is_file():
print('scalar path is used!', self.scalar_path)
with h5py.File(self.scalar_path, 'r') as hf:
self.mean = hf['mean'][:]
self.std = hf['std'][:]
if args.cuda:
self.mean = torch.tensor(self.mean, dtype=torch.float32).to(self.rank)
self.std = torch.tensor(self.std, dtype=torch.float32).to(self.rank)
self.init_train_losses()
def init_train_losses(self):
""" Initialize train losses
"""
self.train_losses = {
'loss_all': 0.,
'loss_sed': 0.,
'loss_doa': 0.,
}
def train_step(self, batch_sample, epoch_it):
""" Perform a train step
"""
batch_x = batch_sample['data']
batch_target = {
'sed': batch_sample['sed_label'],
'doa': batch_sample['doa_label'],
'ov': batch_sample['ov'],
}
if self.cuda:
batch_x = batch_x.to(self.rank, non_blocking=True)
batch_target['sed'] = batch_target['sed'].to(self.rank, non_blocking=True)
batch_target['doa'] = batch_target['doa'].to(self.rank, non_blocking=True)
self.optimizer.zero_grad()
if self.af_extractor:
self.af_extractor.train()
batch_x = self.af_extractor(batch_x)
self.model.train()
if self.scalar_path.is_file():
batch_x = (batch_x - self.mean) / self.std
pred = self.model(batch_x)
loss_dict = self.losses.calculate(pred, batch_target)
loss_dict[self.cfg['training']['loss_type']].backward()
self.optimizer.step()
self.train_losses['loss_all'] += loss_dict['all'].detach()
self.train_losses['loss_sed'] += loss_dict['sed'].detach()
self.train_losses['loss_doa'] += loss_dict['doa'].detach()
def validate_step(self, generator=None, max_batch_num=None, valid_type='train', epoch_it=0):
""" Perform the validation on the train, valid set
Generate a batch of segmentations each time
"""
if valid_type == 'train':
train_losses = self.train_losses.copy()
self.init_train_losses()
return train_losses
elif valid_type == 'valid':
pred_sed_list, pred_doa_list = [], []
loss_all, loss_sed, loss_doa = 0., 0., 0.
for batch_idx, batch_sample in enumerate(generator):
if batch_idx == max_batch_num:
break
batch_x = batch_sample['data']
batch_target = {
'sed': batch_sample['sed_label'],
'doa': batch_sample['doa_label'],
}
if self.cuda:
batch_x = batch_x.to(self.rank, non_blocking=True)
batch_target['sed'] = batch_target['sed'].to(self.rank, non_blocking=True)
batch_target['doa'] = batch_target['doa'].to(self.rank, non_blocking=True)
with torch.no_grad():
if self.af_extractor:
self.af_extractor.eval()
batch_x = self.af_extractor(batch_x)
self.model.eval()
if self.scalar_path.is_file():
batch_x = (batch_x - self.mean) / self.std
pred = self.model(batch_x)
loss_dict = self.losses.calculate(pred, batch_target, epoch_it)
pred['sed'] = torch.sigmoid(pred['sed'])
loss_all += loss_dict['all'].detach()
loss_sed += loss_dict['sed'].detach()
loss_doa += loss_dict['doa'].detach()
pred_sed_list.append(pred['sed'].detach())
pred_doa_list.append(pred['doa'].detach())
pred_sed = torch.concat(pred_sed_list, axis=0)
pred_doa = torch.concat(pred_doa_list, axis=0)
# gather data
pred_sed = gather_value(pred_sed).cpu().numpy()
pred_doa = gather_value(pred_doa).cpu().numpy()
pred_sed_max = pred_sed.max(axis=-1)
pred_sed_max_idx = pred_sed.argmax(axis=-1)
pred_sed = np.zeros_like(pred_sed)
for b_idx in range(pred_sed.shape[0]):
for t_idx in range(pred_sed.shape[1]):
for track_idx in range(self.max_ov):
pred_sed[b_idx, t_idx, track_idx, pred_sed_max_idx[b_idx, t_idx, track_idx]] = \
pred_sed_max[b_idx, t_idx, track_idx]
pred_sed = (pred_sed > self.cfg['training']['threshold_sed']).astype(np.float32)
pred_sed = pred_sed.reshape(pred_sed.shape[0] * pred_sed.shape[1], self.max_ov, -1)
pred_doa = pred_doa.reshape(pred_doa.shape[0] * pred_doa.shape[1], self.max_ov, -1)
# convert Catesian to Spherical
azi = np.arctan2(pred_doa[..., 1], pred_doa[..., 0])
elev = np.arctan2(pred_doa[..., 2], np.sqrt(pred_doa[..., 0]**2 + pred_doa[..., 1]**2))
pred_doa = np.stack((azi, elev), axis=-1) # (N, tracks, (azi, elev))
frame_ind = 0
for _, path in enumerate(self.valid_paths_dict):
loc_frames = self.valid_paths_dict[path]
num_frames = int(np.ceil(loc_frames / (self.cfg['data']['test_chunklen_sec'] / self.label_resolution)) *\
(self.cfg['data']['test_chunklen_sec'] / self.label_resolution))
pred_dcase_format = track_to_dcase_format(pred_sed[frame_ind:frame_ind+loc_frames], pred_doa[frame_ind:frame_ind+loc_frames])
pred_metrics_format = to_metrics_format(pred_dcase_format, num_frames=loc_frames)
frame_ind += num_frames
self.metrics.update(pred_metrics_format, self.gt_metrics_dict[path])
out_losses = {
'loss_all': loss_all / (batch_idx + 1),
'loss_sed': loss_sed / (batch_idx + 1),
'loss_doa': loss_doa / (batch_idx + 1),
}
for k, v in out_losses.items():
out_losses[k] = reduce_value(v).cpu().numpy()
metrics_scores = self.metrics.calculate()
return out_losses, metrics_scores
| 8,056 | 41.856383 | 141 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/ein_seld/data.py | from pathlib import Path
import pandas as pd
from timeit import default_timer as timer
import h5py
import numpy as np
import torch
from methods.utils.data_utilities import load_output_format_file, to_metrics_format
from torch.utils.data import Dataset, Sampler
from utils.common import int16_samples_to_float32
from utils.ddp_init import get_rank, get_world_size
class UserDataset(Dataset):
""" User defined datset
"""
def __init__(self, cfg, dataset, dataset_type='train'):
"""
Args:
cfg: configurations
dataset: dataset used
dataset_type: 'train' | 'dev' | 'fold4_test' | 'eval_test' .
'train' and 'dev' are only used while training.
'fold4_test' and 'eval_test' are only used while infering.
"""
super().__init__()
self.cfg = cfg
self.dataset_type = dataset_type
self.label_resolution = dataset.label_resolution
self.max_ov = dataset.max_ov
self.num_classes = dataset.num_classes
self.rank = get_rank()
self.num_replicas = get_world_size()
self.audio_feature = cfg['data']['audio_feature']
self.data_type = 'wav' if cfg['data']['audio_feature'] == 'logmelIV' else 'feature'
dataset_stage = 'eval' if 'eval' in dataset_type else 'dev'
# data dir
hdf5_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset'])
main_data_dir = hdf5_dir.joinpath('data').joinpath('{}fs'.format(cfg['data']['sample_rate']))\
.joinpath(self.data_type)
dataset_list = str(cfg['dataset_synth']).split(',')
dataset_list.append('STARSS22')
if self.data_type == 'feature':
self.data_dir = main_data_dir.joinpath(dataset_stage).joinpath(cfg['data']['audio_feature'])
self.points_per_predictions = int(dataset.label_resolution / (cfg['data']['hoplen'] / cfg['data']['sample_rate']))
else:
self.data_dir = main_data_dir.joinpath(dataset_stage).joinpath(cfg['data']['type'])
self.points_per_predictions = cfg['data']['sample_rate'] * dataset.label_resolution
# mete dir
label_dir = hdf5_dir.joinpath('label')
self.frame_meta_dir = label_dir.joinpath('frame')
# self.track_meta_dir = label_dir.joinpath('track_pit_ov{}of5'.format(dataset.max_ov)).joinpath(dataset_stage)
self.track_meta_dir = label_dir.joinpath('track_pit_ov{}of5_discontinuous'.format(dataset.max_ov)).joinpath(dataset_stage)
# segments_list: data path and n_th segment
if self.dataset_type == 'train':
indexes_path = main_data_dir.joinpath('{}set_{}sChunklen_{}sHoplen_train.csv'\
.format(dataset_stage, cfg['data']['train_chunklen_sec'], cfg['data']['train_hoplen_sec']))
segments_indexes = pd.read_csv(indexes_path, header=None).values
train_fold = ['fold'+fold.strip() for fold in str(cfg['training']['train_fold']).split(',')]
segments_indexes = [segment for segment in segments_indexes for _dataset in dataset_list if _dataset in segment[0]]
self.segments_list = [clip_segment for clip_segment in segments_indexes \
for fold in train_fold if fold in clip_segment[0] ]
elif self.dataset_type == 'dev':
indexes_path = main_data_dir.joinpath('{}set_{}sChunklen_{}sHoplen_test.csv'\
.format(dataset_stage, cfg['data']['test_chunklen_sec'], cfg['data']['test_hoplen_sec']))
segments_indexes = pd.read_csv(indexes_path, header=None).values
valid_fold = ['fold'+fold.strip() for fold in str(cfg['training']['valid_fold']).split(',')]
segments_indexes = [segment for segment in segments_indexes for _dataset in dataset_list if _dataset in segment[0]]
self.segments_list = [clip_segment for clip_segment in segments_indexes \
for fold in valid_fold if fold in clip_segment[0] ]
# load metadata
self.paths_dict = {} # {path: num_frames}
for segment in self.segments_list:
self.paths_dict[segment[0]] = int(np.ceil(segment[2]/self.points_per_predictions))
# each gpu use different sampler (the same as DistributedSampler)
num_segments_per_gpu = int(np.ceil(len(self.segments_list) / self.num_replicas))
self.segments_list = self.segments_list[self.rank * num_segments_per_gpu : (self.rank+1) * num_segments_per_gpu]
self.gt_metrics_dict = {} # {path: metrics_dict}
for file in self.paths_dict:
path = self.frame_meta_dir.joinpath(str(file).replace('h5', 'csv'))
valid_gt_dcaseformat = load_output_format_file(path)
self.gt_metrics_dict[file] = to_metrics_format(label_dict=valid_gt_dcaseformat, \
num_frames=self.paths_dict[file], label_resolution=self.label_resolution)
elif 'test' in self.dataset_type:
indexes_path = main_data_dir.joinpath('{}set_{}sChunklen_{}sHoplen_train.csv'\
.format(dataset_stage, cfg['data']['test_chunklen_sec'], cfg['data']['test_hoplen_sec']))
segments_indexes = pd.read_csv(indexes_path, header=None).values
# test_fold = ['fold'+fold.strip() for fold in str(cfg['inference']['test_fold']).split(',')]
test_fold = ['fold'+fold.strip() for fold in str(cfg['inference']['test_fold']).split(',')] \
if 'eval' not in dataset_type else ['mix']
self.segments_list = [clip_segment for clip_segment in segments_indexes \
for fold in test_fold if fold in clip_segment[0] ]
self.paths_dict = {} # {path: num_frames}
for segment in self.segments_list:
self.paths_dict[segment[0]] = int(np.ceil(segment[2]/self.points_per_predictions))
def __len__(self):
"""Get length of the dataset
"""
return len(self.segments_list)
def __getitem__(self, idx):
"""
Read features from the dataset
"""
clip_indexes = self.segments_list[idx]
fn, segments = clip_indexes[0], clip_indexes[1:]
data_path = self.data_dir.joinpath(fn)
index_begin = segments[0]
index_end = segments[1]
pad_width_before = segments[2]
pad_width_after = segments[3]
if self.data_type == 'wav':
with h5py.File(data_path, 'r') as hf:
x = int16_samples_to_float32(hf['waveform'][:, index_begin: index_end])
pad_width = ((0, 0), (pad_width_before, pad_width_after))
else:
with h5py.File(data_path, 'r') as hf:
x = hf['feature'][:, index_begin: index_end]
pad_width = ((0, 0), (pad_width_before, pad_width_after), (0, 0))
x = np.pad(x, pad_width, mode='constant')
if 'test' not in self.dataset_type:
meta_path = self.track_meta_dir.joinpath(fn)
index_begin_label = int(index_begin / self.points_per_predictions)
index_end_label = int(index_end / self.points_per_predictions)
with h5py.File(meta_path, 'r') as hf:
sed_label = hf['sed_label'][index_begin_label: index_end_label, ...]
doa_label = hf['doa_label'][index_begin_label: index_end_label, ...]
pad_width_after_label = int(self.cfg['data']['train_chunklen_sec'] / self.label_resolution - sed_label.shape[0])
if pad_width_after_label != 0:
sed_label_new = np.zeros((pad_width_after_label, self.max_ov, self.num_classes))
sed_label = np.concatenate((sed_label, sed_label_new), axis=0)
doa_label_new = np.zeros((pad_width_after_label, self.max_ov, 3))
doa_label = np.concatenate((doa_label, doa_label_new), axis=0)
if 'test' not in self.dataset_type:
sample = {
'filename': fn,
'data': x,
'sed_label': sed_label,
'doa_label': doa_label,
'ov': str(max(np.sum(sed_label, axis=(1,2)).max(),1)),
}
else:
sample = {
'filename': fn,
'data': x
}
return sample
class UserBatchSampler(Sampler):
"""User defined batch sampler. Only for train set.
"""
def __init__(self, clip_num, batch_size, seed=2022, drop_last=False):
self.clip_num = clip_num
self.batch_size = batch_size
self.random_state = None
self.indexes = np.arange(self.clip_num)
self.pointer = 0
self.epoch = 0
self.drop_last = drop_last
self.seed = seed
self.num_replicas = get_world_size()
self.rank = get_rank()
self.random_state = np.random.RandomState(self.seed+self.epoch)
self.random_state.shuffle(self.indexes)
if not self.drop_last:
if self.clip_num % (self.batch_size*self.num_replicas) != 0:
padding_size = self.batch_size*self.num_replicas - self.clip_num % (self.batch_size*self.num_replicas)
self.indexes = np.append(self.indexes, self.indexes[:padding_size])
self.clip_num = self.clip_num + padding_size
def get_state(self):
sampler_state = {
'random': self.random_state.get_state(),
'indexes': self.indexes,
'pointer': self.pointer
}
return sampler_state
def set_state(self, sampler_state):
self.random_state.set_state(sampler_state['random'])
self.indexes = sampler_state['indexes']
self.pointer = sampler_state['pointer']
def __iter__(self):
"""
Return:
batch_indexes (int): indexes of batch
"""
while True:
if self.pointer >= self.clip_num:
self.pointer = 0
self.random_state.shuffle(self.indexes)
batch_indexes = self.indexes[self.pointer: self.pointer + self.batch_size * self.num_replicas]
self.pointer += self.batch_size * self.num_replicas
batch_indexes = batch_indexes[self.rank:self.clip_num:self.num_replicas]
yield batch_indexes
def __len__(self):
return (self.clip_num + self.num_replicas * self.batch_size - 1) // (self.num_replicas * self.batch_size)
class PinMemCustomBatch:
def __init__(self, batch_dict):
batch_fn = []
batch_x = []
batch_ov = []
batch_sed_label = []
batch_doa_label = []
for n in range(len(batch_dict)):
batch_fn.append(batch_dict[n]['filename'])
batch_x.append(batch_dict[n]['data'])
batch_ov.append(batch_dict[n]['ov'])
batch_sed_label.append(batch_dict[n]['sed_label'])
batch_doa_label.append(batch_dict[n]['doa_label'])
batch_x = np.stack(batch_x, axis=0)
batch_sed_label = np.stack(batch_sed_label, axis=0)
batch_doa_label = np.stack(batch_doa_label, axis=0)
self.batch_out_dict = {
'filename': batch_fn,
'ov': batch_ov,
'data': torch.tensor(batch_x, dtype=torch.float32),
'sed_label': torch.tensor(batch_sed_label, dtype=torch.float32),
'doa_label': torch.tensor(batch_doa_label, dtype=torch.float32),
}
def pin_memory(self):
self.batch_out_dict['data'] = self.batch_out_dict['data'].pin_memory()
self.batch_out_dict['sed_label'] = self.batch_out_dict['sed_label'].pin_memory()
self.batch_out_dict['doa_label'] = self.batch_out_dict['doa_label'].pin_memory()
return self.batch_out_dict
def collate_fn(batch_dict):
"""
Merges a list of samples to form a mini-batch
Pin memory for customized dataset
"""
return PinMemCustomBatch(batch_dict)
class PinMemCustomBatchTest:
def __init__(self, batch_dict):
batch_fn = []
batch_x = []
for n in range(len(batch_dict)):
batch_fn.append(batch_dict[n]['filename'])
batch_x.append(batch_dict[n]['data'])
batch_x = np.stack(batch_x, axis=0)
self.batch_out_dict = {
'filename': batch_fn,
'data': torch.tensor(batch_x, dtype=torch.float32)
}
def pin_memory(self):
self.batch_out_dict['data'] = self.batch_out_dict['data'].pin_memory()
return self.batch_out_dict
def collate_fn_test(batch_dict):
"""
Merges a list of samples to form a mini-batch
Pin memory for customized dataset
"""
return PinMemCustomBatchTest(batch_dict)
| 12,771 | 44.29078 | 130 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/ein_seld/__init__.py | from .. import metrics
from . import data, losses, models, training, inference | 78 | 38.5 | 55 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/ein_seld/models/__init__.py | from .ConvConformer import *
from .DenseConformer import *
from .ConvTransformer import *
| 91 | 17.4 | 30 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/ein_seld/models/ConvConformer.py | import torch
import torch.nn as nn
from methods.utils.model_utilities import (DoubleConv, init_layer)
from methods.utils.conformer.encoder import ConformerBlocks
class ConvConformer(nn.Module):
def __init__(self, cfg, dataset):
super().__init__()
self.cfg = cfg
self.num_classes = dataset.num_classes
if cfg['data']['audio_feature'] in ['logmelIV', 'salsa', 'salsalite']:
self.sed_in_channels = 4
self.doa_in_channels = 7
elif cfg['data']['audio_feature'] in ['logmelgcc']:
self.sed_in_channels = 4
self.doa_in_channels = 10
self.sed_conv_block1 = nn.Sequential(
DoubleConv(in_channels=self.sed_in_channels, out_channels=64),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.sed_conv_block2 = nn.Sequential(
DoubleConv(in_channels=64, out_channels=128),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.sed_conv_block3 = nn.Sequential(
DoubleConv(in_channels=128, out_channels=256),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.sed_conv_block4 = nn.Sequential(
DoubleConv(in_channels=256, out_channels=512),
nn.AvgPool2d(kernel_size=(1, 2)),
)
self.doa_conv_block1 = nn.Sequential(
DoubleConv(in_channels=self.doa_in_channels, out_channels=64),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.doa_conv_block2 = nn.Sequential(
DoubleConv(in_channels=64, out_channels=128),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.doa_conv_block3 = nn.Sequential(
DoubleConv(in_channels=128, out_channels=256),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.doa_conv_block4 = nn.Sequential(
DoubleConv(in_channels=256, out_channels=512),
nn.AvgPool2d(kernel_size=(1, 2)),
)
self.stitch = nn.ParameterList([
nn.Parameter(torch.FloatTensor(64, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(128, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(256, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(512, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(512, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(512, 2, 2).uniform_(0.1, 0.9))
])
self.sed_conformer_track1 = ConformerBlocks(encoder_dim=512, num_layers=2)
self.sed_conformer_track2 = ConformerBlocks(encoder_dim=512, num_layers=2)
self.sed_conformer_track3 = ConformerBlocks(encoder_dim=512, num_layers=2)
self.doa_conformer_track1 = ConformerBlocks(encoder_dim=512, num_layers=2)
self.doa_conformer_track2 = ConformerBlocks(encoder_dim=512, num_layers=2)
self.doa_conformer_track3 = ConformerBlocks(encoder_dim=512, num_layers=2)
self.fc_sed_track1 = nn.Linear(512, self.num_classes, bias=True)
self.fc_sed_track2 = nn.Linear(512, self.num_classes, bias=True)
self.fc_sed_track3 = nn.Linear(512, self.num_classes, bias=True)
self.fc_doa_track1 = nn.Linear(512, 3, bias=True)
self.fc_doa_track2 = nn.Linear(512, 3, bias=True)
self.fc_doa_track3 = nn.Linear(512, 3, bias=True)
self.final_act_sed = nn.Sequential() # nn.Sigmoid()
self.final_act_doa = nn.Tanh()
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.init_weight()
def init_weight(self):
init_layer(self.fc_sed_track1)
init_layer(self.fc_sed_track2)
init_layer(self.fc_sed_track3)
init_layer(self.fc_doa_track1)
init_layer(self.fc_doa_track2)
init_layer(self.fc_doa_track3)
def forward(self, x: torch.Tensor):
"""
x: spectrogram, (batch_size, num_channels, num_frames, num_freqBins)
"""
x_sed = x[:, :self.sed_in_channels]
x_doa = x
# cnn
x_sed = self.sed_conv_block1(x_sed)
x_doa = self.doa_conv_block1(x_doa)
x_sed = torch.einsum('c, nctf -> nctf', self.stitch[0][:, 0, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[0][:, 0, 1], x_doa)
x_doa = torch.einsum('c, nctf -> nctf', self.stitch[0][:, 1, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[0][:, 1, 1], x_doa)
x_sed = self.sed_conv_block2(x_sed)
x_doa = self.doa_conv_block2(x_doa)
x_sed = torch.einsum('c, nctf -> nctf', self.stitch[1][:, 0, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[1][:, 0, 1], x_doa)
x_doa = torch.einsum('c, nctf -> nctf', self.stitch[1][:, 1, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[1][:, 1, 1], x_doa)
x_sed = self.sed_conv_block3(x_sed)
x_doa = self.doa_conv_block3(x_doa)
x_sed = torch.einsum('c, nctf -> nctf', self.stitch[2][:, 0, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[2][:, 0, 1], x_doa)
x_doa = torch.einsum('c, nctf -> nctf', self.stitch[2][:, 1, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[2][:, 1, 1], x_doa)
x_sed = self.sed_conv_block4(x_sed)
x_doa = self.doa_conv_block4(x_doa)
x_sed = x_sed.mean(dim=3) # (N, C, T)
x_doa = x_doa.mean(dim=3) # (N, C, T)
# Conformer
x_sed = x_sed.permute(0, 2, 1) # (N, T, C)
x_doa = x_doa.permute(0, 2, 1) # (N, T, C)
x_sed_1 = self.sed_conformer_track1(x_sed) # (N, T, C)
x_doa_1 = self.doa_conformer_track1(x_doa) # (N, T, C)
x_sed_1 = torch.einsum('c, ntc -> ntc', self.stitch[3][:, 0, 0], x_sed_1) + \
torch.einsum('c, ntc -> ntc', self.stitch[3][:, 0, 1], x_doa_1)
x_doa_1 = torch.einsum('c, ntc -> ntc', self.stitch[3][:, 1, 0], x_sed_1) + \
torch.einsum('c, ntc -> ntc', self.stitch[3][:, 1, 1], x_doa_1)
x_sed_2 = self.sed_conformer_track2(x_sed) # (N, T, C)
x_doa_2 = self.doa_conformer_track2(x_doa) # (N, T, C)
x_sed_2 = torch.einsum('c, ntc -> ntc', self.stitch[4][:, 0, 0], x_sed_2) + \
torch.einsum('c, ntc -> ntc', self.stitch[4][:, 0, 1], x_doa_2)
x_doa_2 = torch.einsum('c, ntc -> ntc', self.stitch[4][:, 1, 0], x_sed_2) + \
torch.einsum('c, ntc -> ntc', self.stitch[4][:, 1, 1], x_doa_2)
x_sed_3 = self.sed_conformer_track3(x_sed) # (N, T, C)
x_doa_3 = self.doa_conformer_track3(x_doa) # (N, T, C)
x_sed_3 = torch.einsum('c, ntc -> ntc', self.stitch[5][:, 0, 0], x_sed_3) + \
torch.einsum('c, ntc -> ntc', self.stitch[5][:, 0, 1], x_doa_3)
x_doa_3 = torch.einsum('c, ntc -> ntc', self.stitch[5][:, 1, 0], x_sed_3) + \
torch.einsum('c, ntc -> ntc', self.stitch[5][:, 1, 1], x_doa_3)
# fc
x_sed_1 = self.final_act_sed(self.fc_sed_track1(x_sed_1))
x_sed_2 = self.final_act_sed(self.fc_sed_track2(x_sed_2))
x_sed_3 = self.final_act_sed(self.fc_sed_track3(x_sed_3))
x_sed = torch.stack((x_sed_1, x_sed_2, x_sed_3), 2)
x_doa_1 = self.final_act_doa(self.fc_doa_track1(x_doa_1))
x_doa_2 = self.final_act_doa(self.fc_doa_track2(x_doa_2))
x_doa_3 = self.final_act_doa(self.fc_doa_track3(x_doa_3))
x_doa = torch.stack((x_doa_1, x_doa_2, x_doa_3), 2)
output = {
'sed': x_sed,
'doa': x_doa,
}
return output
| 7,746 | 44.840237 | 85 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/ein_seld/models/ConvTransformer.py | import torch
import torch.nn as nn
from methods.utils.model_utilities import (DoubleConv, PositionalEncoding,
init_layer)
class ConvTransformer(nn.Module):
def __init__(self, cfg, dataset):
super().__init__()
self.pe_enable = False # Ture | False
self.cfg = cfg
self.num_classes = dataset.num_classes
if cfg['data']['audio_feature'] in ['logmelIV', 'salsa', 'salsalite']:
self.sed_in_channels = 4
self.doa_in_channels = 7
elif cfg['data']['audio_feature'] in ['logmelgcc']:
self.sed_in_channels = 4
self.doa_in_channels = 10
self.sed_conv_block1 = nn.Sequential(
DoubleConv(in_channels=self.sed_in_channels, out_channels=64),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.sed_conv_block2 = nn.Sequential(
DoubleConv(in_channels=64, out_channels=128),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.sed_conv_block3 = nn.Sequential(
DoubleConv(in_channels=128, out_channels=256),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.sed_conv_block4 = nn.Sequential(
DoubleConv(in_channels=256, out_channels=512),
nn.AvgPool2d(kernel_size=(1, 2)),
)
self.doa_conv_block1 = nn.Sequential(
DoubleConv(in_channels=self.doa_in_channels, out_channels=64),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.doa_conv_block2 = nn.Sequential(
DoubleConv(in_channels=64, out_channels=128),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.doa_conv_block3 = nn.Sequential(
DoubleConv(in_channels=128, out_channels=256),
nn.AvgPool2d(kernel_size=(2, 2)),
)
self.doa_conv_block4 = nn.Sequential(
DoubleConv(in_channels=256, out_channels=512),
nn.AvgPool2d(kernel_size=(1, 2)),
)
self.stitch = nn.ParameterList([
nn.Parameter(torch.FloatTensor(64, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(128, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(256, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(512, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(512, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(512, 2, 2).uniform_(0.1, 0.9))
])
if self.pe_enable:
self.pe = PositionalEncoding(pos_len=100, d_model=512, pe_type='t', dropout=0.0)
self.sed_trans_track1 = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=512, nhead=8, dim_feedforward=1024, dropout=0.2), num_layers=2)
self.sed_trans_track2 = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=512, nhead=8, dim_feedforward=1024, dropout=0.2), num_layers=2)
self.sed_trans_track3 = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=512, nhead=8, dim_feedforward=1024, dropout=0.2), num_layers=2)
self.doa_trans_track1 = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=512, nhead=8, dim_feedforward=1024, dropout=0.2), num_layers=2)
self.doa_trans_track2 = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=512, nhead=8, dim_feedforward=1024, dropout=0.2), num_layers=2)
self.doa_trans_track3 = nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=512, nhead=8, dim_feedforward=1024, dropout=0.2), num_layers=2)
self.fc_sed_track1 = nn.Linear(512, self.num_classes, bias=True)
self.fc_sed_track2 = nn.Linear(512, self.num_classes, bias=True)
self.fc_sed_track3 = nn.Linear(512, self.num_classes, bias=True)
self.fc_doa_track1 = nn.Linear(512, 3, bias=True)
self.fc_doa_track2 = nn.Linear(512, 3, bias=True)
self.fc_doa_track3 = nn.Linear(512, 3, bias=True)
self.final_act_sed = nn.Sequential() # nn.Sigmoid()
self.final_act_doa = nn.Tanh()
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.init_weight()
def init_weight(self):
init_layer(self.fc_sed_track1)
init_layer(self.fc_sed_track2)
init_layer(self.fc_sed_track3)
init_layer(self.fc_doa_track1)
init_layer(self.fc_doa_track2)
init_layer(self.fc_doa_track3)
def forward(self, x):
"""
x: waveform, (batch_size, num_channels, data_length)
"""
x_sed = x[:, :self.sed_in_channels]
x_doa = x
# cnn
x_sed = self.sed_conv_block1(x_sed)
x_doa = self.doa_conv_block1(x_doa)
x_sed = torch.einsum('c, nctf -> nctf', self.stitch[0][:, 0, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[0][:, 0, 1], x_doa)
x_doa = torch.einsum('c, nctf -> nctf', self.stitch[0][:, 1, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[0][:, 1, 1], x_doa)
x_sed = self.sed_conv_block2(x_sed)
x_doa = self.doa_conv_block2(x_doa)
x_sed = torch.einsum('c, nctf -> nctf', self.stitch[1][:, 0, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[1][:, 0, 1], x_doa)
x_doa = torch.einsum('c, nctf -> nctf', self.stitch[1][:, 1, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[1][:, 1, 1], x_doa)
x_sed = self.sed_conv_block3(x_sed)
x_doa = self.doa_conv_block3(x_doa)
x_sed = torch.einsum('c, nctf -> nctf', self.stitch[2][:, 0, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[2][:, 0, 1], x_doa)
x_doa = torch.einsum('c, nctf -> nctf', self.stitch[2][:, 1, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[2][:, 1, 1], x_doa)
x_sed = self.sed_conv_block4(x_sed)
x_doa = self.doa_conv_block4(x_doa)
x_sed = x_sed.mean(dim=3) # (N, C, T)
x_doa = x_doa.mean(dim=3) # (N, C, T)
# transformer
if self.pe_enable:
x_sed = self.pe(x_sed)
if self.pe_enable:
x_doa = self.pe(x_doa)
x_sed = x_sed.permute(2, 0, 1) # (T, N, C)
x_doa = x_doa.permute(2, 0, 1) # (T, N, C)
x_sed_1 = self.sed_trans_track1(x_sed).transpose(0, 1) # (N, T, C)
x_doa_1 = self.doa_trans_track1(x_doa).transpose(0, 1) # (N, T, C)
x_sed_1 = torch.einsum('c, ntc -> ntc', self.stitch[3][:, 0, 0], x_sed_1) + \
torch.einsum('c, ntc -> ntc', self.stitch[3][:, 0, 1], x_doa_1)
x_doa_1 = torch.einsum('c, ntc -> ntc', self.stitch[3][:, 1, 0], x_sed_1) + \
torch.einsum('c, ntc -> ntc', self.stitch[3][:, 1, 1], x_doa_1)
x_sed_2 = self.sed_trans_track2(x_sed).transpose(0, 1) # (N, T, C)
x_doa_2 = self.doa_trans_track2(x_doa).transpose(0, 1) # (N, T, C)
x_sed_2 = torch.einsum('c, ntc -> ntc', self.stitch[4][:, 0, 0], x_sed_2) + \
torch.einsum('c, ntc -> ntc', self.stitch[4][:, 0, 1], x_doa_2)
x_doa_2 = torch.einsum('c, ntc -> ntc', self.stitch[4][:, 1, 0], x_sed_2) + \
torch.einsum('c, ntc -> ntc', self.stitch[4][:, 1, 1], x_doa_2)
x_sed_3 = self.sed_trans_track3(x_sed).transpose(0, 1) # (N, T, C)
x_doa_3 = self.doa_trans_track3(x_doa).transpose(0, 1) # (N, T, C)
x_sed_3 = torch.einsum('c, ntc -> ntc', self.stitch[5][:, 0, 0], x_sed_3) + \
torch.einsum('c, ntc -> ntc', self.stitch[5][:, 0, 1], x_doa_3)
x_doa_3 = torch.einsum('c, ntc -> ntc', self.stitch[5][:, 1, 0], x_sed_3) + \
torch.einsum('c, ntc -> ntc', self.stitch[5][:, 1, 1], x_doa_3)
# fc
x_sed_1 = self.final_act_sed(self.fc_sed_track1(x_sed_1))
x_sed_2 = self.final_act_sed(self.fc_sed_track2(x_sed_2))
x_sed_3 = self.final_act_sed(self.fc_sed_track3(x_sed_3))
x_sed = torch.stack((x_sed_1, x_sed_2, x_sed_3), 2)
x_doa_1 = self.final_act_doa(self.fc_doa_track1(x_doa_1))
x_doa_2 = self.final_act_doa(self.fc_doa_track2(x_doa_2))
x_doa_3 = self.final_act_doa(self.fc_doa_track3(x_doa_3))
x_doa = torch.stack((x_doa_1, x_doa_2, x_doa_3), 2)
output = {
'sed': x_sed,
'doa': x_doa,
}
return output
| 8,566 | 46.594444 | 110 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/ein_seld/models/DenseConformer.py | import torch
import torch.nn as nn
from methods.utils.model_utilities import init_layer
from methods.utils.conformer.encoder import ConformerBlocks
from methods.utils.dense_block import _DenseBlock, _Transition
class DenseConformer(nn.Module):
def __init__(self, cfg, dataset):
super().__init__()
self.pe_enable = False # Ture | False
self.cfg = cfg
self.num_classes = dataset.num_classes
if cfg['data']['audio_feature'] in ['logmelIV', 'salsa', 'salsalite']:
self.sed_in_channels = 4
self.doa_in_channels = 7
elif cfg['data']['audio_feature'] in ['logmelgcc']:
self.sed_in_channels = 4
self.doa_in_channels = 10
growth_rate = (16, 24, 32, 40)
num_layers = 4
drop_rate = 0.
self.sed_dense_block1 = nn.Sequential(
_DenseBlock(num_layers=num_layers, num_input_features=self.sed_in_channels,
bn_size=4, growth_rate=growth_rate[0], drop_rate=drop_rate),
_Transition(num_input_features=growth_rate[0]*(num_layers)+self.sed_in_channels, num_output_features=growth_rate[0])
)
self.sed_dense_block2 = nn.Sequential(
_DenseBlock(num_layers=num_layers, num_input_features=growth_rate[0],
bn_size=4, growth_rate=growth_rate[1], drop_rate=drop_rate),
_Transition(num_input_features=growth_rate[1]*(num_layers)+growth_rate[0], num_output_features=growth_rate[1])
)
self.sed_dense_block3 =nn.Sequential(
_DenseBlock(num_layers=num_layers, num_input_features=growth_rate[1],
bn_size=4, growth_rate=growth_rate[2], drop_rate=drop_rate),
_Transition(num_input_features=growth_rate[2]*(num_layers)+growth_rate[1], num_output_features=growth_rate[2])
)
self.sed_dense_block4 = nn.Sequential(
_DenseBlock(num_layers=num_layers, num_input_features=growth_rate[2],
bn_size=4, growth_rate=growth_rate[3], drop_rate=drop_rate),
nn.BatchNorm2d(num_features=growth_rate[3]*(num_layers)+growth_rate[2]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=growth_rate[3]*(num_layers)+growth_rate[2], out_channels=256,
kernel_size=(3,3), stride=(1,1), padding=(1,1), bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
)
self.doa_dense_block1 = nn.Sequential(
_DenseBlock(num_layers=num_layers, num_input_features=self.doa_in_channels,
bn_size=4, growth_rate=growth_rate[0], drop_rate=drop_rate),
_Transition(num_input_features=growth_rate[0]*(num_layers)+self.doa_in_channels, num_output_features=growth_rate[0])
)
self.doa_dense_block2 = nn.Sequential(
_DenseBlock(num_layers=num_layers, num_input_features=growth_rate[0],
bn_size=4, growth_rate=growth_rate[1], drop_rate=drop_rate),
_Transition(num_input_features=growth_rate[1]*(num_layers)+growth_rate[0], num_output_features=growth_rate[1])
)
self.doa_dense_block3 =nn.Sequential(
_DenseBlock(num_layers=num_layers, num_input_features=growth_rate[1],
bn_size=4, growth_rate=growth_rate[2], drop_rate=drop_rate),
_Transition(num_input_features=growth_rate[2]*(num_layers)+growth_rate[1], num_output_features=growth_rate[2])
)
self.doa_dense_block4 = nn.Sequential(
_DenseBlock(num_layers=num_layers, num_input_features=growth_rate[2],
bn_size=4, growth_rate=growth_rate[3], drop_rate=drop_rate),
nn.BatchNorm2d(num_features=growth_rate[3]*(num_layers)+growth_rate[2]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=growth_rate[3]*(num_layers)+growth_rate[2], out_channels=256,
kernel_size=(3,3), stride=(1,1), padding=(1,1), bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
)
self.stitch = nn.ParameterList([
nn.Parameter(torch.FloatTensor(growth_rate[0], 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(growth_rate[1], 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(growth_rate[2], 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(256, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(256, 2, 2).uniform_(0.1, 0.9)),
nn.Parameter(torch.FloatTensor(256, 2, 2).uniform_(0.1, 0.9))
])
self.sed_conformer_track1 = ConformerBlocks(encoder_dim=256, num_layers=2)
self.sed_conformer_track2 = ConformerBlocks(encoder_dim=256, num_layers=2)
self.sed_conformer_track3 = ConformerBlocks(encoder_dim=256, num_layers=2)
self.doa_conformer_track1 = ConformerBlocks(encoder_dim=256, num_layers=2)
self.doa_conformer_track2 = ConformerBlocks(encoder_dim=256, num_layers=2)
self.doa_conformer_track3 = ConformerBlocks(encoder_dim=256, num_layers=2)
self.fc_sed_track1 = nn.Linear(256, self.num_classes, bias=True)
self.fc_sed_track2 = nn.Linear(256, self.num_classes, bias=True)
self.fc_sed_track3 = nn.Linear(256, self.num_classes, bias=True)
self.fc_doa_track1 = nn.Linear(256, 3, bias=True)
self.fc_doa_track2 = nn.Linear(256, 3, bias=True)
self.fc_doa_track3 = nn.Linear(256, 3, bias=True)
self.final_act_sed = nn.Sequential() # nn.Sigmoid()
self.final_act_doa = nn.Tanh()
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.init_weight()
def init_weight(self):
init_layer(self.fc_sed_track1)
init_layer(self.fc_sed_track2)
init_layer(self.fc_sed_track3)
init_layer(self.fc_doa_track1)
init_layer(self.fc_doa_track2)
init_layer(self.fc_doa_track3)
def forward(self, x):
"""
x: spectrogram, (batch_size, num_channels, num_frames, num_freqBins)
"""
x_sed = x[:, :self.sed_in_channels]
x_doa = x
# cnn
x_sed = self.sed_dense_block1(x_sed)
x_doa = self.doa_dense_block1(x_doa)
x_sed = torch.einsum('c, nctf -> nctf', self.stitch[0][:, 0, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[0][:, 0, 1], x_doa)
x_doa = torch.einsum('c, nctf -> nctf', self.stitch[0][:, 1, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[0][:, 1, 1], x_doa)
x_sed = self.sed_dense_block2(x_sed)
x_doa = self.doa_dense_block2(x_doa)
x_sed = torch.einsum('c, nctf -> nctf', self.stitch[1][:, 0, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[1][:, 0, 1], x_doa)
x_doa = torch.einsum('c, nctf -> nctf', self.stitch[1][:, 1, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[1][:, 1, 1], x_doa)
x_sed = self.sed_dense_block3(x_sed)
x_doa = self.doa_dense_block3(x_doa)
x_sed = torch.einsum('c, nctf -> nctf', self.stitch[2][:, 0, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[2][:, 0, 1], x_doa)
x_doa = torch.einsum('c, nctf -> nctf', self.stitch[2][:, 1, 0], x_sed) + \
torch.einsum('c, nctf -> nctf', self.stitch[2][:, 1, 1], x_doa)
x_sed = self.sed_dense_block4(x_sed)
x_doa = self.doa_dense_block4(x_doa)
x_sed = x_sed.mean(dim=3) # (N, C, T)
x_doa = x_doa.mean(dim=3) # (N, C, T)
# Conformer
x_sed = x_sed.permute(0, 2, 1) # (N, T, C)
x_doa = x_doa.permute(0, 2, 1) # (N, T, C)
x_sed_1 = self.sed_conformer_track1(x_sed) # (N, T, C)
x_doa_1 = self.doa_conformer_track1(x_doa) # (N, T, C)
x_sed_1 = torch.einsum('c, ntc -> ntc', self.stitch[3][:, 0, 0], x_sed_1) + \
torch.einsum('c, ntc -> ntc', self.stitch[3][:, 0, 1], x_doa_1)
x_doa_1 = torch.einsum('c, ntc -> ntc', self.stitch[3][:, 1, 0], x_sed_1) + \
torch.einsum('c, ntc -> ntc', self.stitch[3][:, 1, 1], x_doa_1)
x_sed_2 = self.sed_conformer_track2(x_sed) # (N, T, C)
x_doa_2 = self.doa_conformer_track2(x_doa) # (N, T, C)
x_sed_2 = torch.einsum('c, ntc -> ntc', self.stitch[4][:, 0, 0], x_sed_2) + \
torch.einsum('c, ntc -> ntc', self.stitch[4][:, 0, 1], x_doa_2)
x_doa_2 = torch.einsum('c, ntc -> ntc', self.stitch[4][:, 1, 0], x_sed_2) + \
torch.einsum('c, ntc -> ntc', self.stitch[4][:, 1, 1], x_doa_2)
x_sed_3 = self.sed_conformer_track3(x_sed) # (N, T, C)
x_doa_3 = self.doa_conformer_track3(x_doa) # (N, T, C)
x_sed_3 = torch.einsum('c, ntc -> ntc', self.stitch[5][:, 0, 0], x_sed_3) + \
torch.einsum('c, ntc -> ntc', self.stitch[5][:, 0, 1], x_doa_3)
x_doa_3 = torch.einsum('c, ntc -> ntc', self.stitch[5][:, 1, 0], x_sed_3) + \
torch.einsum('c, ntc -> ntc', self.stitch[5][:, 1, 1], x_doa_3)
# fc
x_sed_1 = self.final_act_sed(self.fc_sed_track1(x_sed_1))
x_sed_2 = self.final_act_sed(self.fc_sed_track2(x_sed_2))
x_sed_3 = self.final_act_sed(self.fc_sed_track3(x_sed_3))
x_sed = torch.stack((x_sed_1, x_sed_2, x_sed_3), 2)
x_doa_1 = self.final_act_doa(self.fc_doa_track1(x_doa_1))
x_doa_2 = self.final_act_doa(self.fc_doa_track2(x_doa_2))
x_doa_3 = self.final_act_doa(self.fc_doa_track3(x_doa_3))
x_doa = torch.stack((x_doa_1, x_doa_2, x_doa_3), 2)
output = {
'sed': x_sed,
'doa': x_doa,
}
return output
| 9,821 | 50.968254 | 128 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/learning/checkpoint.py | import logging
import random
import numpy as np
import pandas as pd
import torch
from utils.ddp_init import get_rank, get_world_size
class CheckpointIO:
"""CheckpointIO class.
It handles saving and loading checkpoints.
"""
def __init__(self, checkpoints_dir, model, optimizer, batch_sampler, metrics_names, num_checkpoints=1, remark=None):
"""
Args:
checkpoint_dir (Path obj): path where checkpoints are saved
model: model
optimizer: optimizer
batch_sampler: batch_sampler
metrics_names: metrics names to be saved in a checkpoints csv file
num_checkpoints: maximum number of checkpoints to save. When it exceeds the number, the older
(older, smaller or higher) checkpoints will be deleted
remark (optional): to remark the name of the checkpoint
"""
self.checkpoints_dir = checkpoints_dir
self.checkpoints_dir.mkdir(parents=True, exist_ok=True)
self.model = model
self.optimizer = optimizer
self.batch_sampler = batch_sampler
self.num_checkpoints = num_checkpoints
self.remark = remark
self.value_list = []
self.epoch_list = []
self.checkpoints_csv_path = checkpoints_dir.joinpath('metrics_statistics.csv')
# save checkpoints_csv header
if get_rank() == 0:
metrics_keys_list = [name for name in metrics_names]
header = ['epoch'] + metrics_keys_list
df_header = pd.DataFrame(columns=header)
df_header.to_csv(self.checkpoints_csv_path, sep='\t', index=False, mode='a+')
def save(self, epoch, it, metrics, key_rank=None, rank_order='high', max_epoch=100):
"""Save model. It will save a latest model, a best model of rank_order for value, and
'self.num_checkpoints' best models of rank_order for value.
Args:
metrics: metrics to log
key_rank (str): the key of metrics to rank
rank_order: 'low' | 'high' | 'latest'
'low' to keep the models of lowest values
'high' to keep the models of highest values
'latest' to keep the models of latest epochs
"""
## save checkpionts_csv
metrics_values_list = [value for value in metrics.values()]
checkpoint_list = [[epoch] + metrics_values_list]
df_checkpoint = pd.DataFrame(checkpoint_list)
df_checkpoint.to_csv(self.checkpoints_csv_path, sep='\t', header=False, index=False, mode='a+')
## save checkpoints
current_value = None if rank_order == 'latest' else metrics[key_rank]
# latest model
latest_checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_latest.pth'.format(self.remark))
self.save_file(latest_checkpoint_path, epoch, it)
# save 5 latest models
# if epoch >= max_epoch - 5:
# checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_{}th.pth'.format(self.remark, epoch))
# self.save_file(checkpoint_path, epoch, it)
if len(self.value_list) < self.num_checkpoints:
self.value_list.append(current_value)
self.epoch_list.append(epoch)
checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_{}.pth'.format(self.remark, epoch))
self.save_file(checkpoint_path, epoch, it)
logging.info('Checkpoint saved to {}'.format(checkpoint_path))
elif len(self.value_list) >= self.num_checkpoints:
value_list = np.array(self.value_list)
if rank_order == 'high' and current_value >= value_list.min():
worst_index = value_list.argmin()
self.del_and_save(worst_index, current_value, epoch, it)
elif rank_order == 'low' and current_value <= value_list.max():
worst_index = value_list.argmax()
self.del_and_save(worst_index, current_value, epoch, it)
elif rank_order == 'latest':
worst_index = 0
self.del_and_save(worst_index, current_value, epoch, it)
# best model
value_list = np.array(self.value_list)
best_checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_best.pth'.format(self.remark))
if rank_order == 'high' and current_value >= value_list.max():
self.save_file(best_checkpoint_path, epoch, it)
elif rank_order == 'low' and current_value <= value_list.min():
self.save_file(best_checkpoint_path, epoch, it)
elif rank_order == 'latest':
self.save_file(best_checkpoint_path, epoch, it)
def del_and_save(self, worst_index, current_value, epoch, it):
"""Delete and save checkpoint
Args:
worst_index: worst index,
current_value: current value,
epoch: epoch,
it: it,
"""
worst_chpt_path = self.checkpoints_dir.joinpath('{}_epoch_{}.pth'.format(self.remark, self.epoch_list[worst_index]))
if worst_chpt_path.is_file():
worst_chpt_path.unlink()
self.value_list.pop(worst_index)
self.epoch_list.pop(worst_index)
self.value_list.append(current_value)
self.epoch_list.append(epoch)
checkpoint_path = self.checkpoints_dir.joinpath('{}_epoch_{}.pth'.format(self.remark, epoch))
self.save_file(checkpoint_path, epoch, it)
logging.info('Checkpoint saved to {}'.format(checkpoint_path))
def save_file(self, checkpoint_path, epoch, it):
"""Save a module to a file
Args:
checkpoint_path (Path obj): checkpoint path, including .pth file name
epoch: epoch,
it: it
"""
outdict = {
'epoch': epoch,
'it': it,
'model': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'sampler': self.batch_sampler.get_state(),
'rng': torch.get_rng_state(),
'cuda_rng': torch.cuda.get_rng_state(),
'random': random.getstate(),
'np_random': np.random.get_state(),
}
torch.save(outdict, checkpoint_path)
def load(self, checkpoint_path):
"""Load a module from a file
"""
state_dict = torch.load(checkpoint_path, map_location=torch.device('cpu'))
epoch = state_dict['epoch']
it = state_dict['it']
self.model.module.load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
self.batch_sampler.set_state(state_dict['sampler'])
torch.set_rng_state(state_dict['rng'])
torch.cuda.set_rng_state(state_dict['cuda_rng'])
random.setstate(state_dict['random'])
np.random.set_state(state_dict['np_random'])
logging.info('Resuming complete from {}\n'.format(checkpoint_path))
return epoch, it
| 7,008 | 40.97006 | 124 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/learning/initialize.py | import logging
import random
import shutil
import socket
from datetime import datetime
from pathlib import Path
import numpy as np
import torch
import torch.distributed as dist
import torch.optim as optim
from torch.backends import cudnn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from utils.common import create_logging
from utils.config import get_generator, store_config, get_losses, get_afextractor, get_models, get_optimizer, get_metrics, get_trainer
from utils.ddp_init import get_rank, get_world_size, rank_barrier
from learning.checkpoint import CheckpointIO
def init_train(args, cfg, dataset):
""" Training initialization.
Including Data generator, model, optimizer initialization.
"""
train_initializer = None
'''Cuda'''
args.cuda = not args.no_cuda and torch.cuda.is_available()
rank = get_rank()
world_size = get_world_size()
''' Reproducible seed set'''
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
cudnn.deterministic = True # Using random seed to fix the algorithm
cudnn.benchmark = True # Automatically find the most efficient algorithm for the current configuration. Set it False to reduce random
'''Sharing directories'''
out_train_dir = Path(cfg['workspace_dir']).joinpath('results').joinpath('out_train') \
.joinpath(cfg['method']).joinpath(cfg['training']['train_id'])
ckpts_dir = out_train_dir.joinpath('checkpoints')
if rank == 0:
print('Train ID is {}\n'.format(cfg['training']['train_id']))
out_train_dir.mkdir(parents=True, exist_ok=True)
'''tensorboard and logging'''
if rank == 0:
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
tb_dir = out_train_dir.joinpath('tb').joinpath(current_time + '_' + socket.gethostname())
tb_dir.mkdir(parents=True, exist_ok=True)
logs_dir = out_train_dir.joinpath('logs')
create_logging(logs_dir, filemode='w')
writer = SummaryWriter(log_dir=str(tb_dir))
param_file = out_train_dir.joinpath('config.yaml')
if param_file.is_file():
param_file.unlink()
store_config(param_file, cfg)
rank_barrier()
'''Data generator'''
train_set, train_generator, batch_sampler = get_generator(args, cfg, dataset, generator_type='train')
valid_set, valid_generator, _ = get_generator(args, cfg, dataset, generator_type='valid')
'''Loss'''
losses = get_losses(cfg)
'''Metrics'''
metrics = get_metrics(cfg, dataset)
'''Audio feature extractor'''
af_extractor = get_afextractor(cfg, args.cuda)
'''Model'''
model = get_models(cfg, dataset, args.cuda)
if dist.is_initialized():
torch.cuda.set_device(rank) # it's necessary, or CUDA_OUT_OF_MEMORY
model = DDP(model, device_ids=[rank], output_device=rank)
'''Optimizer'''
optimizer = get_optimizer(cfg, af_extractor, model)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=cfg['training']['lr_step_size'],
gamma=cfg['training']['lr_gamma'])
'''Trainer'''
trainer = get_trainer(args=args, cfg=cfg, dataset=dataset, valid_set=valid_set,
af_extractor=af_extractor, model=model, optimizer=optimizer, losses=losses, metrics=metrics)
'''CheckpointIO'''
if not cfg['training']['valid_fold']:
metrics_names = losses.names
else:
metrics_names = metrics.names
ckptIO = CheckpointIO(
checkpoints_dir=ckpts_dir,
model=model,
optimizer=optimizer,
batch_sampler=batch_sampler,
metrics_names=metrics_names,
num_checkpoints=1,
remark=cfg['training']['remark']
)
if cfg['training']['resume_model']:
resume_path = ckpts_dir.joinpath(cfg['training']['resume_model'])
logging.info('=====>> Resume from the checkpoint: {}......\n'.format(str(resume_path)))
epoch_it, it = ckptIO.load(resume_path)
for param_group in optimizer.param_groups:
param_group['lr'] = cfg['training']['lr']
else:
epoch_it, it = 0, 0
''' logging and return '''
logging.info('Train folds are: {}\n'.format(cfg['training']['train_fold']))
logging.info('Valid folds are: {}\n'.format(cfg['training']['valid_fold']))
logging.info('Training clip number is: {}\n'.format(len(train_set)))
logging.info('Number of batches per epoch is: {}\n'.format(len(batch_sampler)))
logging.info('Validation clip number is: {}\n'.format(len(valid_set) * world_size))
train_initializer = {
'writer': writer if rank == 0 else None,
'train_generator': train_generator,
'valid_generator': valid_generator,
'lr_scheduler': lr_scheduler,
'trainer': trainer,
'ckptIO': ckptIO ,
'epoch_it': epoch_it,
'it': it
}
return train_initializer
def init_infer(args, cfg, dataset):
""" Inference initialization.
Including Data generator, model, optimizer initialization.
"""
''' Cuda '''
args.cuda = not args.no_cuda and torch.cuda.is_available()
''' Directories '''
print('Inference ID is {}\n'.format(cfg['inference']['infer_id']))
out_infer_dir = Path(cfg['workspace_dir']).joinpath('results').joinpath('out_infer')\
.joinpath(cfg['method']).joinpath(cfg['inference']['infer_id'])
if out_infer_dir.is_dir():
shutil.rmtree(str(out_infer_dir))
submissions_dir = out_infer_dir.joinpath('submissions')
predictions_dir = out_infer_dir.joinpath('predictions')
submissions_dir.mkdir(parents=True, exist_ok=True)
predictions_dir.mkdir(parents=True, exist_ok=True)
train_ids = [train_id.strip() for train_id in str(cfg['inference']['train_ids']).split(',')]
models = [model.strip() for model in str(cfg['inference']['models']).split(',')]
ckpts_paths_list = []
ckpts_models_list = []
for train_id, model_name in zip(train_ids, models):
ckpts_dir = Path(cfg['workspace_dir']).joinpath('results').joinpath('out_train').joinpath(cfg['method'])\
.joinpath(train_id).joinpath('checkpoints')
ckpt_path = [path for path in sorted(ckpts_dir.iterdir()) if cfg['inference']['model_mark'] in path.stem]
print('ckpt_name: ', ckpt_path, 'model_name: ', model_name)
# ckpt_path = [path for path in sorted(ckpts_dir.iterdir()) if path.stem.split('_')[-1].isnumeric()] #
for path in ckpt_path:
ckpts_paths_list.append(path)
ckpts_models_list.append(model_name)
''' Parameters '''
param_file = out_infer_dir.joinpath('config.yaml')
if param_file.is_file():
param_file.unlink()
store_config(param_file, cfg)
''' Data generator '''
test_set, test_generator, _ = get_generator(args, cfg, dataset, generator_type='test')
''' logging and return '''
logging.info('Test clip number is: {}\n'.format(len(test_set)))
infer_initializer = {
'submissions_dir': submissions_dir,
'predictions_dir': predictions_dir,
'ckpts_paths_list': ckpts_paths_list,
'ckpts_models_list': ckpts_models_list,
'test_generator': test_generator,
'cuda': args.cuda,
'test_set': test_set
}
return infer_initializer
| 7,430 | 37.304124 | 140 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/learning/__init__.py | 0 | 0 | 0 | py |
|
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/learning/infer.py | import torch
from utils.config import get_afextractor, get_inferer, get_models
def infer(cfg, dataset, **infer_initializer):
""" Infer, only save the testset predictions
"""
submissions_dir = infer_initializer['submissions_dir']
predictions_dir = infer_initializer['predictions_dir']
ckpts_paths_list = infer_initializer['ckpts_paths_list']
ckpts_models_list = infer_initializer['ckpts_models_list']
test_generator = infer_initializer['test_generator']
cuda = infer_initializer['cuda']
test_set = infer_initializer['test_set']
preds = []
for ckpt_path, model_name in zip(ckpts_paths_list, ckpts_models_list):
print('=====>> Resuming from the checkpoint: {}\n'.format(ckpt_path))
af_extractor = get_afextractor(cfg, cuda)
model = get_models(cfg, dataset, cuda, model_name=model_name)
state_dict = torch.load(ckpt_path)
model.load_state_dict(state_dict['model'])
print(' Resuming complete\n')
inferer = get_inferer(cfg, dataset, af_extractor, model, cuda, test_set)
pred = inferer.infer(test_generator)
preds.append(pred)
print('\n Inference finished for {}\n'.format(ckpt_path))
inferer.fusion(submissions_dir, predictions_dir, preds)
| 1,270 | 38.71875 | 80 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/learning/train.py | import logging
from timeit import default_timer as timer
from tqdm import tqdm
from utils.common import print_metrics
from utils.ddp_init import reduce_value, get_rank, get_world_size, rank_barrier
def train(cfg, **initializer):
"""Train
"""
writer = initializer['writer']
train_generator = initializer['train_generator']
valid_generator = initializer['valid_generator']
lr_scheduler = initializer['lr_scheduler']
trainer = initializer['trainer']
ckptIO = initializer['ckptIO']
epoch_it = initializer['epoch_it']
it = initializer['it']
world_size = get_world_size()
rank = get_rank()
batchNum_per_epoch = len(train_generator)
max_epoch = cfg['training']['max_epoch']
if rank == 0:
logging.info('===> Training mode\n')
iterator = tqdm(train_generator, total=max_epoch*batchNum_per_epoch-it, unit='it')
train_begin_time = timer()
for batch_sample in iterator:
epoch_it, rem_batch = it // batchNum_per_epoch, it % batchNum_per_epoch
################
## Validation
################
if it % int(1*batchNum_per_epoch) == 0 :
valid_begin_time = timer()
train_time = valid_begin_time - train_begin_time
train_losses = trainer.validate_step(valid_type='train', epoch_it=epoch_it)
for k, v in train_losses.items():
train_losses[k] = reduce_value(v).cpu().numpy() / batchNum_per_epoch
if not cfg['training']['valid_fold'] == 'none':
valid_losses, valid_metrics = trainer.validate_step(
generator=valid_generator,
valid_type='valid',
epoch_it=epoch_it
)
valid_time = timer() - valid_begin_time
if rank == 0:
writer.add_scalar('train/lr', lr_scheduler.get_last_lr()[0], it)
logging.info('---------------------------------------------------------------------------------------------------'
+'------------------------------------------------------')
logging.info('Iter: {}, Epoch/Total Epoch: {}/{}, Batch/Total Batch: {}/{}'.format(
it, epoch_it, max_epoch, rem_batch, batchNum_per_epoch))
print_metrics(logging, writer, train_losses, it, set_type='train')
if not cfg['training']['valid_fold'] == 'none':
print_metrics(logging, writer, valid_losses, it, set_type='valid')
if not cfg['training']['valid_fold'] == 'none':
#### SELD Metrics ####
if cfg['method'] in ['ein_seld', 'multi_accdoa']:
print_metrics(logging, writer, valid_metrics['macro'], it, set_type='valid')
print_metrics(logging, writer, valid_metrics['micro'], it, set_type='valid')
logging.info('Train time: {:.3f}s, Valid time: {:.3f}s, Lr: {}'.format(
train_time, valid_time, lr_scheduler.get_last_lr()[0]))
if 'PIT_type' in cfg['training']:
logging.info('PIT type: {}'.format(cfg['training']['PIT_type']))
logging.info('---------------------------------------------------------------------------------------------------'
+'------------------------------------------------------')
rank_barrier()
train_begin_time = timer()
###############
## Save model
###############
# if rank == 0 :
# if rem_batch == 0 and it > 0:
# if not cfg['training']['valid_fold'] == 'none':
# if cfg['method'] in ['ein_seld', 'multi_accdoa']:
# ckptIO.save(epoch_it, it, metrics=valid_metrics['macro'], key_rank='seld_macro', rank_order='low', max_epoch=max_epoch)
# else:
# ckptIO.save(epoch_it, it, metrics=valid_losses, key_rank='loss_all', rank_order='low', max_epoch=max_epoch)
# else:
# ckptIO.save(epoch_it, it, metrics=train_losses, key_rank='loss_all', rank_order='low', max_epoch=max_epoch)
# rank_barrier()
###############
## Finish training
###############
if it == max_epoch * batchNum_per_epoch:
iterator.close()
break
###############
## Train
###############
trainer.train_step(batch_sample, epoch_it)
if rem_batch == 0 and it > 0:
lr_scheduler.step()
it += 1
iterator.close()
| 4,676 | 45.306931 | 145 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/learning/preprocess.py | import shutil
import sys
from functools import reduce
from pathlib import Path
from timeit import default_timer as timer
import h5py
import librosa
import numpy as np
import pandas as pd
import torch
from sklearn import preprocessing
from torch.utils.data import DataLoader
from tqdm import tqdm
from methods.data import BaseDataset, collate_fn
from methods.feature import Features_Extractor_MIC
from methods.utils.data_utilities import (
_segment_index, convert_output_format_polar_to_cartesian,
load_output_format_file)
from utils.common import float_samples_to_int16, int16_samples_to_float32
from utils.config import get_afextractor
class Preprocess:
"""Preprocess the audio data.
1. Extract wav file and store to hdf5 file
2. Extract meta file and store to hdf5 file
"""
def __init__(self, args, cfg, dataset):
"""
Args:
args: parsed args
cfg: configurations
dataset: dataset class
"""
self.args = args
self.cfg = cfg
self.dataset = dataset
self.fs = cfg['data']['sample_rate']
self.n_fft = cfg['data']['nfft']
self.n_mels = cfg['data']['n_mels']
self.hoplen = cfg['data']['hoplen']
# Path for dataset
hdf5_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset'])
# Path for extraction of wav
self.data_dir_list = [
dataset.dataset_dir[args.dataset_type]['foa'][args.dataset],
dataset.dataset_dir[args.dataset_type]['mic'][args.dataset],
]
data_h5_dir = hdf5_dir.joinpath('data').joinpath('{}fs'.format(self.fs))
wav_h5_dir = data_h5_dir.joinpath('wav')
self.wav_h5_dir_list = [
wav_h5_dir.joinpath(args.dataset_type).joinpath('foa').joinpath(args.dataset),
wav_h5_dir.joinpath(args.dataset_type).joinpath('mic').joinpath(args.dataset),
]
self.data_statistics_path_list = [
wav_h5_dir.joinpath(args.dataset_type).joinpath('foa').joinpath(args.dataset+'statistics_foa.txt'),
wav_h5_dir.joinpath(args.dataset_type).joinpath('mic').joinpath(args.dataset+'statistics_mic.txt')
]
# Path for extraction of label
label_dir = hdf5_dir.joinpath('label')
self.meta_dir_list = dataset.dataset_dir[args.dataset_type]['meta'][args.dataset]
self.meta_pit_dir = label_dir.joinpath('track_pit_ov'+str(dataset.max_ov)+'of5_discontinuous')\
.joinpath(args.dataset_type).joinpath(args.dataset)
self.meta_sed_dir = label_dir.joinpath('sed').joinpath(args.dataset)
self.meta_adpit_dir = label_dir.joinpath('adpit').joinpath(args.dataset_type).joinpath(args.dataset)
# Path for extraction of features
self.feature_h5_dir = data_h5_dir.joinpath('feature').joinpath(args.dataset_type).joinpath(cfg['data']['audio_feature'])
# Path for indexes of data
self.data_type = 'wav' if self.cfg['data']['audio_feature'] in ['logmelIV', 'logmel'] else 'feature'
self.channels_dict = {'logmel': 4, 'logmelIV': 7}
self.indexes_path_list = [
data_h5_dir.joinpath(self.data_type).joinpath('{}set_{}sChunklen_{}sHoplen_train.csv'\
.format(args.dataset_type, cfg['data']['train_chunklen_sec'], cfg['data']['train_hoplen_sec'])),
data_h5_dir.joinpath(self.data_type).joinpath('{}set_{}sChunklen_{}sHoplen_test.csv'\
.format(args.dataset_type, cfg['data']['test_chunklen_sec'], cfg['data']['test_hoplen_sec']))]
# Path for scalar
self.scalar_h5_dir = data_h5_dir.joinpath('scalar')
dataset_name = '_'.join(sorted(str(cfg['dataset_synth']).split(',')))
fn_scalar = '{}_nfft{}_hop{}_mel{}_{}.h5'.format(cfg['data']['audio_feature'], \
self.n_fft, self.hoplen, self.n_mels, dataset_name)
self.scalar_path = self.scalar_h5_dir.joinpath(fn_scalar)
def extract_data(self):
""" Extract wave and store to hdf5 file
"""
print('Converting wav file to hdf5 file starts......\n')
for h5_dir in self.wav_h5_dir_list:
if h5_dir.is_dir():
flag = input("HDF5 folder {} is already existed, delete it? (y/n)".format(h5_dir)).lower()
if flag == 'y':
shutil.rmtree(h5_dir)
elif flag == 'n':
print("User select not to remove the HDF5 folder {}. The process will quit.\n".format(h5_dir))
return
h5_dir.mkdir(parents=True)
for statistic_path in self.data_statistics_path_list:
if statistic_path.is_file():
statistic_path.unlink()
for idx, data_dir in enumerate(self.data_dir_list):
begin_time = timer()
h5_dir = self.wav_h5_dir_list[idx]
statistic_path = self.data_statistics_path_list[idx]
audio_count = 0
silent_audio_count = 0
data_list = [path for data_subdir in data_dir for path in sorted(data_subdir.glob('**/*.wav')) if not path.name.startswith('.')]
iterator = tqdm(data_list, total=len(data_list), unit='it')
for data_path in iterator:
# read data
data, _ = librosa.load(data_path, sr=self.fs, mono=False)
if len(data.shape) == 1:
data = data[None,:]
'''data: (channels, samples)'''
# silent data statistics
lst = np.sum(np.abs(data), axis=1) > data.shape[1]*1e-4
if not reduce(lambda x, y: x*y, lst):
with statistic_path.open(mode='a+') as f:
print(f"Silent file in feature extractor: {data_path.name}\n", file=f)
silent_audio_count += 1
tqdm.write("Silent file in feature extractor: {}".format(data_path.name))
tqdm.write("Total silent files are: {}\n".format(silent_audio_count))
# save to h5py
h5_path = h5_dir.joinpath(data_path.stem + '.h5')
with h5py.File(h5_path, 'w') as hf:
hf.create_dataset(name='waveform', data=float_samples_to_int16(data), dtype=np.int16)
audio_count += 1
tqdm.write('{}, {}, {}'.format(audio_count, h5_path, data.shape))
with statistic_path.open(mode='a+') as f:
print(f"Total number of audio clips extracted: {audio_count}", file=f)
print(f"Total number of silent audio clips is: {silent_audio_count}\n", file=f)
iterator.close()
print("Extacting feature finished! Time spent: {:.3f} s".format(timer() - begin_time))
def extract_ADPIT_label(self):
"""
Reads description file and returns classification based SED labels and regression based DOA labels
for multi-ACCDOA with Auxiliary Duplicating Permutation Invariant Training (ADPIT)
"""
def _get_adpit_labels_for_file(_desc_file):
"""
Reads description file and returns classification based SED labels and regression based DOA labels
for multi-ACCDOA with Auxiliary Duplicating Permutation Invariant Training (ADPIT)
:param _desc_file: dcase format of the meta file
:return: label_mat: of dimension [nb_frames, 6, 4(=act+XYZ), max_classes]
"""
_nb_label_frames = list(_desc_file.keys())[-1]
_nb_lasses = self.dataset.num_classes
se_label = np.zeros((_nb_label_frames, 6, _nb_lasses)) # [nb_frames, 6, max_classes]
x_label = np.zeros((_nb_label_frames, 6, _nb_lasses))
y_label = np.zeros((_nb_label_frames, 6, _nb_lasses))
z_label = np.zeros((_nb_label_frames, 6, _nb_lasses))
for frame_ind, active_event_list in _desc_file.items():
if frame_ind < _nb_label_frames:
active_event_list.sort(key=lambda x: x[0]) # sort for ov from the same class
active_event_list_per_class = []
for i, active_event in enumerate(active_event_list):
active_event_list_per_class.append(active_event)
if i == len(active_event_list) - 1: # if the last
if len(active_event_list_per_class) == 1: # if no ov from the same class
# a0----
active_event_a0 = active_event_list_per_class[0]
se_label[frame_ind, 0, active_event_a0[0]] = 1
x_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[1]
y_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[2]
z_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[3]
elif len(active_event_list_per_class) == 2: # if ov with 2 sources from the same class
# --b0--
active_event_b0 = active_event_list_per_class[0]
se_label[frame_ind, 1, active_event_b0[0]] = 1
x_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[1]
y_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[2]
z_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[3]
# --b1--
active_event_b1 = active_event_list_per_class[1]
se_label[frame_ind, 2, active_event_b1[0]] = 1
x_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[1]
y_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[2]
z_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[3]
else: # if ov with more than 2 sources from the same class
# ----c0
active_event_c0 = active_event_list_per_class[0]
se_label[frame_ind, 3, active_event_c0[0]] = 1
x_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[1]
y_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[2]
z_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[3]
# ----c1
active_event_c1 = active_event_list_per_class[1]
se_label[frame_ind, 4, active_event_c1[0]] = 1
x_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[1]
y_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[2]
z_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[3]
# ----c2
active_event_c2 = active_event_list_per_class[2]
se_label[frame_ind, 5, active_event_c2[0]] = 1
x_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[1]
y_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[2]
z_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[3]
elif active_event[0] != active_event_list[i + 1][0]: # if the next is not the same class
if len(active_event_list_per_class) == 1: # if no ov from the same class
# a0----
active_event_a0 = active_event_list_per_class[0]
se_label[frame_ind, 0, active_event_a0[0]] = 1
x_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[1]
y_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[2]
z_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[3]
elif len(active_event_list_per_class) == 2: # if ov with 2 sources from the same class
# --b0--
active_event_b0 = active_event_list_per_class[0]
se_label[frame_ind, 1, active_event_b0[0]] = 1
x_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[1]
y_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[2]
z_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[3]
# --b1--
active_event_b1 = active_event_list_per_class[1]
se_label[frame_ind, 2, active_event_b1[0]] = 1
x_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[1]
y_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[2]
z_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[3]
else: # if ov with more than 2 sources from the same class
# ----c0
active_event_c0 = active_event_list_per_class[0]
se_label[frame_ind, 3, active_event_c0[0]] = 1
x_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[1]
y_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[2]
z_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[3]
# ----c1
active_event_c1 = active_event_list_per_class[1]
se_label[frame_ind, 4, active_event_c1[0]] = 1
x_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[1]
y_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[2]
z_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[3]
# ----c2
active_event_c2 = active_event_list_per_class[2]
se_label[frame_ind, 5, active_event_c2[0]] = 1
x_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[1]
y_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[2]
z_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[3]
active_event_list_per_class = []
label_mat = np.stack((se_label, x_label, y_label, z_label), axis=2) # [nb_frames, 6, 4(=act+XYZ), max_classes]
return label_mat
meta_list = [path for subdir in self.meta_dir_list for path in sorted(subdir.glob('*.csv')) if not path.name.startswith('.')]
iterator = tqdm(enumerate(meta_list), total=len(meta_list), unit='it')
self.meta_adpit_dir.mkdir(parents=True, exist_ok=True)
for idx, meta_file in iterator:
fn = meta_file.stem
meta_dcase_format = load_output_format_file(meta_file)
meta_dcase_format = convert_output_format_polar_to_cartesian(meta_dcase_format)
meta_adpit = _get_adpit_labels_for_file(meta_dcase_format)
meta_h5_path = self.meta_adpit_dir.joinpath(fn + '.h5')
with h5py.File(meta_h5_path, 'w') as hf:
hf.create_dataset(name='adpit', data=meta_adpit, dtype=np.float32)
tqdm.write('{}, {}'.format(idx, meta_h5_path))
def extract_PIT_label(self):
""" Extract track label for permutation invariant training. Store to h5 file
"""
num_tracks = 5
num_classes = self.dataset.num_classes
meta_list = [path for subdir in self.meta_dir_list for path in sorted(subdir.glob('*.csv')) if not path.name.startswith('.')]
iterator = tqdm(enumerate(meta_list), total=len(meta_list), unit='it')
self.meta_pit_dir.mkdir(parents=True, exist_ok=True)
for idx, meta_file in iterator:
fn = meta_file.stem
df = pd.read_csv(meta_file, header=None, sep=',')
df = df.values
num_frames = df[-1, 0] + 1
sed_label = np.zeros((num_frames, num_tracks, num_classes))
doa_label = np.zeros((num_frames, num_tracks, 3))
event_indexes = np.array([[None] * num_tracks] * num_frames) # event indexes of all frames
track_numbers = np.array([[None] * num_tracks] * num_frames) # track number of all frames
for row in df:
frame_idx = row[0]
event_idx = row[1]
track_number = row[2]
azi = row[3]
elev = row[4]
##### track indexing #####
# default assign current_track_idx to the first available track
current_event_indexes = event_indexes[frame_idx]
current_track_indexes = np.where(current_event_indexes == None)[0].tolist()
# if current_track_indexes:
# continue
current_track_idx = current_track_indexes[0]
# tracking from the last frame if the last frame is not empty
# last_event_indexes = np.array([None] * num_tracks) if frame_idx == 0 else event_indexes[frame_idx-1]
# last_track_indexes = np.where(last_event_indexes != None)[0].tolist() # event index of the last frame
# last_events_tracks = list(zip(event_indexes[frame_idx-1], track_numbers[frame_idx-1]))
# if last_track_indexes != []:
# for track_idx in last_track_indexes:
# if last_events_tracks[track_idx] == (event_idx, track_number):
# if current_track_idx != track_idx: # swap tracks if track_idx is not consistant
# sed_label[frame_idx, [current_track_idx, track_idx]] = \
# sed_label[frame_idx, [track_idx, current_track_idx]]
# doa_label[frame_idx, [current_track_idx, track_idx]] = \
# doa_label[frame_idx, [track_idx, current_track_idx]]
# event_indexes[frame_idx, [current_track_idx, track_idx]] = \
# event_indexes[frame_idx, [track_idx, current_track_idx]]
# track_numbers[frame_idx, [current_track_idx, track_idx]] = \
# track_numbers[frame_idx, [track_idx, current_track_idx]]
# current_track_idx = track_idx
#########################
# label encode
azi_rad, elev_rad = azi * np.pi / 180, elev * np.pi / 180
sed_label[frame_idx, current_track_idx, event_idx] = 1.0
doa_label[frame_idx, current_track_idx, :] = np.cos(elev_rad) * np.cos(azi_rad), \
np.cos(elev_rad) * np.sin(azi_rad), np.sin(elev_rad)
event_indexes[frame_idx, current_track_idx] = event_idx
track_numbers[frame_idx, current_track_idx] = track_number
meta_h5_path = self.meta_pit_dir.joinpath(fn + '.h5')
with h5py.File(meta_h5_path, 'w') as hf:
hf.create_dataset(name='sed_label', data=sed_label[:, :self.dataset.max_ov, :], dtype=np.float32)
hf.create_dataset(name='doa_label', data=doa_label[:, :self.dataset.max_ov, :], dtype=np.float32)
tqdm.write('{}, {}'.format(idx, meta_h5_path))
def extract_mic_features(self):
""" Extract features from MIC format signals
"""
print('Extracting {} features starts......\n'.format(self.cfg['data']['audio_feature']))
if self.feature_h5_dir.is_dir():
flag = input("HDF5 folder {} is already existed, delete it? (y/n)".format(self.feature_h5_dir)).lower()
if flag == 'y':
shutil.rmtree(self.feature_h5_dir)
elif flag == 'n':
print("User select not to remove the HDF5 folder {}. The process will quit.\n".format(self.feature_h5_dir))
return
self.feature_h5_dir.mkdir(parents=True)
af_extractor_mic = Features_Extractor_MIC(self.cfg)
mic_path_list = sorted(self.wav_h5_dir_list[1].glob('*.h5'))
iterator = tqdm(enumerate(mic_path_list), total=len(mic_path_list), unit='it')
for count, file in iterator:
fn = file.stem
feature_path =self.feature_h5_dir.joinpath(fn+'.h5')
with h5py.File(file, 'r') as hf:
waveform = int16_samples_to_float32(hf['waveform'][:]).T
nb_feat_frams = int(len(waveform) / self.hoplen)
spect = af_extractor_mic._spectrogram(waveform, nb_feat_frams)
# spect: [n_frames, n_freqs, n_chs]
if self.cfg['data']['audio_feature'] == 'logmelgcc':
logmel_spec = af_extractor_mic._get_logmel_spectrogram(spect)
# logmel_spec: [n_frames, n_mels, n_chs]
gcc = af_extractor_mic._get_gcc(spect)
# gcc: [n_frames, n_mels, n_chs]
feature = np.concatenate((logmel_spec, gcc), axis=-1).transpose((2,0,1))
# feature: [n_chs, n_frames, n_mels]
print('feature shape: ', feature.shape)
elif self.cfg['data']['audio_feature'] == 'salsalite':
feature = af_extractor_mic._get_salsalite(spect)
with h5py.File(feature_path, 'w') as hf:
hf.create_dataset('feature', data=feature, dtype=np.float32)
tqdm.write('{}, {}, features: {}'.format(count, fn, feature.shape))
iterator.close()
print('Extracting {} features finished!'.format(self.cfg['data']['audio_feature']))
def extract_index(self):
"""Extract index of clips for training and testing
"""
chunklen_sec = [self.cfg['data']['train_chunklen_sec'], self.cfg['data']['test_chunklen_sec']]
hoplen_sec = [self.cfg['data']['train_hoplen_sec'], self.cfg['data']['test_hoplen_sec']]
last_frame_always_padding = [False, True]
for idx, indexes_path in enumerate(self.indexes_path_list):
if indexes_path.is_file():
# indexes_path.unlink()
with open(indexes_path, 'r') as f:
indices = f.read()
if self.args.dataset in indices:
sys.exit(print('indices of dataset {} have been already extracted!'.format(self.args.dataset)))
audio_cnt = 0
f = open(indexes_path, 'a')
if self.data_type == 'feature':
frames_per_prediction = int(self.dataset.label_resolution / (self.cfg['data']['hoplen'] / self.cfg['data']['sample_rate']))
paths_list_absolute = [path for path in sorted(self.feature_h5_dir.glob('*.h5')) if not path.name.startswith('.')]
paths_list_relative = [path.relative_to(path.parent.parent) for path in paths_list_absolute]
chunklen = int(chunklen_sec[idx] / self.dataset.label_resolution * frames_per_prediction)
hoplen = int(hoplen_sec[idx] / self.dataset.label_resolution * frames_per_prediction)
iterator = tqdm(paths_list_absolute, total=len(paths_list_absolute), unit='it')
for path in iterator:
fn = paths_list_relative[audio_cnt]
with h5py.File(path, 'r') as hf:
num_frames = hf['feature'][:].shape[1]
data = np.zeros((1, num_frames))
segmented_indexes, segmented_pad_width = _segment_index(data, chunklen, hoplen, last_frame_always_paddding=last_frame_always_padding[idx])
for segmented_pairs in list(zip(segmented_indexes, segmented_pad_width)):
f.write('{},{},{},{},{}\n'.format(fn, segmented_pairs[0][0], segmented_pairs[0][1],\
segmented_pairs[1][0], segmented_pairs[1][1]))
audio_cnt += 1
tqdm.write('{},{}'.format(audio_cnt, fn))
else:
chunklen = int(chunklen_sec[idx] * self.cfg['data']['sample_rate'])
hoplen = int(hoplen_sec[idx] * self.cfg['data']['sample_rate'])
paths_list_absolute = [path for path in sorted(self.wav_h5_dir_list[0].glob('*.h5')) if not path.name.startswith('.')]
paths_list_relative = [path.relative_to(path.parent.parent) for path in paths_list_absolute]
iterator = tqdm(paths_list_absolute, total=len(paths_list_absolute), unit='it')
for path in iterator:
fn = paths_list_relative[audio_cnt]
with h5py.File(path, 'r') as hf:
data_length = hf['waveform'][:].shape[1]
data = np.zeros((1, data_length))
segmented_indexes, segmented_pad_width = _segment_index(data, chunklen, hoplen, last_frame_always_paddding=last_frame_always_padding[idx])
for segmented_pairs in list(zip(segmented_indexes, segmented_pad_width)):
f.write('{},{},{},{},{}\n'.format(fn, segmented_pairs[0][0], segmented_pairs[0][1],\
segmented_pairs[1][0], segmented_pairs[1][1]))
audio_cnt += 1
tqdm.write('{},{}'.format(audio_cnt, fn))
f.close()
def extract_scalar(self):
"""Extract scalar of features for normalization
"""
if self.scalar_path.is_file():
sys.exit('{} exists!'.format(self.scalar_path))
if self.data_type == 'wav':
self.extract_scalar_data()
def extract_scalar_data(self):
""" Extract scalar and store to hdf5 file
"""
print('Extracting scalar......\n')
self.scalar_h5_dir.mkdir(parents=True, exist_ok=True)
cuda_enabled = not self.args.no_cuda and torch.cuda.is_available()
train_set = BaseDataset(self.args, self.cfg, self.dataset)
data_generator = DataLoader(
dataset=train_set,
batch_size=32,
shuffle=False,
num_workers=self.args.num_workers,
collate_fn=collate_fn,
pin_memory=True
)
af_extractor = get_afextractor(self.cfg, cuda_enabled).eval()
iterator = tqdm(enumerate(data_generator), total=len(data_generator), unit='it')
scalar_list = [preprocessing.StandardScaler() for _ in range(self.channels_dict[self.cfg['data']['audio_feature']])]
begin_time = timer()
for it, batch_sample in iterator:
if it == len(data_generator):
break
batch_x = batch_sample['waveform'][:]
batch_x.require_grad = False
if cuda_enabled:
batch_x = batch_x.cuda(non_blocking=True)
batch_y = af_extractor(batch_x).transpose(0, 1) # (C,N,T,F)
C, _, _, F = batch_y.shape
batch_y = batch_y.reshape(C, -1, F).cpu().numpy()
for i_channel in range(len(scalar_list)):
scalar_list[i_channel].partial_fit(batch_y[i_channel])
iterator.close()
mean = []
std = []
for i_chan in range(len(scalar_list)):
mean.append(scalar_list[i_chan].mean_)
std.append(np.sqrt(scalar_list[i_chan].var_))
mean = np.stack(mean)[None, :, None, :]
std = np.stack(std)[None, :, None, :]
# save to h5py
with h5py.File(self.scalar_path, 'w') as hf:
hf.create_dataset(name='mean', data=mean, dtype=np.float32)
hf.create_dataset(name='std', data=std, dtype=np.float32)
print('Mean shape: ', mean.shape, ' Std shape: ', std.shape)
print("\nScalar saved to {}\n".format(str(self.scalar_path)))
print("Extacting scalar finished! Time spent: {:.3f} s\n".format(timer() - begin_time))
| 28,736 | 55.90495 | 158 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/utils/ddp_init.py | import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.multiprocessing as mp
import os
def get_world_size():
if dist.is_initialized():
return dist.get_world_size()
else:
return 1
def get_rank():
if dist.is_initialized():
return dist.get_rank()
else:
return 0
def spawn_nproc(demo_fn, args, cfg, dataset):
mp.spawn(demo_fn,
args=(args, cfg, dataset),
nprocs=torch.cuda.device_count(),
join=True)
def cleanup():
dist.destroy_process_group()
def setup(rank, world_size=torch.cuda.device_count(), args=None):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(args.port)
# initialize the process group
dist.init_process_group("nccl", rank=rank, world_size=world_size)
def reduce_value(value, average=True):
world_size = get_world_size()
if not type(value) == torch.Tensor:
value = torch.as_tensor(value).to(get_rank())
if world_size > 1: # single GPU
dist.all_reduce(value)
if average:
value /= world_size
return value
def gather_value(value):
world_size = get_world_size()
if not type(value) == torch.Tensor:
value = torch.as_tensor(value).to(get_rank())
if world_size > 1: # more than 1 GPU
value_list = [torch.zeros_like(value) for _ in range(world_size)]
dist.all_gather(value_list, value)
return torch.concat(value_list, dim=0)
else:
return value
def rank_barrier():
rank = get_rank()
if dist.is_initialized():
dist.barrier(device_ids=[rank]) | 1,671 | 27.827586 | 73 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/utils/config.py | from utils.datasets import dacase2022_dask3
from methods import ein_seld
from torch.utils.data import DataLoader
import torch.distributed as dist
from ruamel.yaml import YAML
import logging
from utils.common import convert_ordinal, count_parameters, move_model_to_gpu
import methods.feature as feature
import torch.optim as optim
from utils.ddp_init import get_rank, get_world_size
dataset_dict = {
'dcase2022task3': dacase2022_dask3
}
method_dict = {
'ein_seld': ein_seld,
}
# Datasets
def get_dataset(root_dir, cfg, args):
dataset = dataset_dict[cfg['dataset']](root_dir, cfg, args)
print('\nDataset {} is being developed......\n'.format(cfg['dataset']))
return dataset
def store_config(output_path, config):
""" Write the given config parameter values to a YAML file.
Args:
output_path (str): Output file path.
config: Parameter values to log.
"""
yaml = YAML()
with open(output_path, 'w') as f:
yaml.dump(config, f)
# Dataloaders
def get_generator(args, cfg, dataset, generator_type):
""" Get generator.
Args:
args: input args
cfg: configuration
dataset: dataset used
generator_type: 'train' | 'valid' | 'test'
'train' for training,
'valid' for validation of valid set,
'test' for infering.
Output:
subset: train_set, valid_set, or test_set
data_generator: 'train_generator', 'valid_generator', or 'test_generator'
"""
assert generator_type == 'train' or generator_type == 'valid' or generator_type == 'test', \
"Data generator type '{}' is not 'train', 'valid' or 'test'".format(generator_type)
batch_sampler = None
if generator_type == 'train':
subset = method_dict[cfg['method']].data.UserDataset(cfg, dataset, dataset_type='train')
batch_sampler = method_dict[cfg['method']].data.UserBatchSampler(
clip_num=len(subset),
batch_size=cfg['training']['batch_size'],
seed=args.seed
)
data_generator = DataLoader(
dataset=subset,
batch_sampler=batch_sampler,
num_workers=args.num_workers,
collate_fn=method_dict[cfg['method']].data.collate_fn,
pin_memory=True
)
elif generator_type == 'valid':
subset = method_dict[cfg['method']].data.UserDataset(cfg, dataset, dataset_type='dev')
data_generator = DataLoader(
dataset=subset,
batch_size=cfg['training']['batch_size'],
shuffle=False,
num_workers=args.num_workers,
collate_fn=method_dict[cfg['method']].data.collate_fn,
pin_memory=True
)
elif generator_type == 'test':
dataset_type = 'fold'+str(cfg['inference']['test_fold'])+'_test'
subset = method_dict[cfg['method']].data.UserDataset(cfg, dataset, dataset_type=dataset_type)
data_generator = DataLoader(
dataset=subset,
batch_size=cfg['inference']['batch_size'],
shuffle=False,
num_workers=args.num_workers,
collate_fn=method_dict[cfg['method']].data.collate_fn_test,
pin_memory=True
)
return subset, data_generator, batch_sampler
# Losses
def get_losses(cfg):
""" Get losses
"""
losses = method_dict[cfg['method']].losses.Losses(cfg)
for idx, loss_name in enumerate(losses.names):
logging.info('{} is used as the {} loss.'.format(loss_name, convert_ordinal(idx + 1)))
logging.info('')
return losses
# Audio feature extractor
def get_afextractor(cfg, cuda):
""" Get audio feature extractor
"""
if cfg['data']['audio_feature'] == 'logmelIV':
afextractor = feature.LogmelIntensity_Extractor(cfg)
afextractor = move_model_to_gpu(afextractor, cuda)
elif cfg['data']['audio_feature'] == 'logmel':
afextractor = feature.Logmel_Extractor(cfg)
afextractor = move_model_to_gpu(afextractor, cuda)
else:
afextractor = None
return afextractor
# Models
def get_models(cfg, dataset, cuda, model_name=None):
""" Get models
"""
logging.info('=====>> Building a model\n')
if not model_name:
model = vars(method_dict[cfg['method']].models)[cfg['training']['model']](cfg, dataset)
else:
model = vars(method_dict[cfg['method']].models)[model_name](cfg, dataset)
model = move_model_to_gpu(model, cuda)
logging.info('Model architectures:\n{}\n'.format(model))
count_parameters(model)
return model
# Optimizers
def get_optimizer(cfg, af_extractor, model):
""" Get optimizers
"""
opt_method = cfg['training']['optimizer']
lr = cfg['training']['lr']
if cfg['data']['audio_feature'] == 'logmelIV':
params = list(af_extractor.parameters()) + list(model.parameters())
else:
params = list(model.parameters())
if opt_method == 'adam':
optimizer = optim.Adam(params, lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=True)
elif opt_method == 'sgd':
optimizer = optim.SGD(params, lr=lr, momentum=0, weight_decay=0)
elif opt_method == 'adamw':
# optimizer = AdamW(params, lr=lr, betas=(0.9, 0.999), weight_decay=0, warmup=0)
optimizer = optim.AdamW(params, lr=lr, betas=(0.9, 0.999), eps=1e-08,
weight_decay=0.01, amsgrad=True)
logging.info('Optimizer is: {}\n'.format(opt_method))
return optimizer
# Metrics
def get_metrics(cfg, dataset):
""" Get metrics
"""
metrics = method_dict[cfg['method']].metrics.Metrics(dataset)
for idx, metric_name in enumerate(metrics.names):
logging.info('{} is used as the {} metric.'.format(metric_name, convert_ordinal(idx + 1)))
logging.info('')
return metrics
# Trainer
def get_trainer(args, cfg, dataset, valid_set, af_extractor, model, optimizer, losses, metrics):
""" Get trainer
"""
trainer = method_dict[cfg['method']].training.Trainer(
args=args, cfg=cfg, dataset=dataset, valid_set=valid_set, af_extractor=af_extractor,
model=model, optimizer=optimizer, losses=losses, metrics=metrics
)
return trainer
# Inferer
def get_inferer(cfg, dataset, af_extractor, model, cuda, test_set):
""" Get inferer
"""
inferer = method_dict[cfg['method']].inference.Inferer(
cfg=cfg, dataset=dataset, af_extractor=af_extractor, model=model, cuda=cuda, test_set=test_set
)
return inferer | 6,513 | 33.104712 | 106 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/utils/datasets.py | from pathlib import Path
class dacase2022_dask3:
''' DCASE 2022 Task 3 dataset
'''
def __init__(self, root_dir, cfg, args):
self.label_dic = {'Female speech, woman speaking': 0,
'Male speech, man speaking': 1,
'Clapping': 2,
'Telephone': 3,
'Laughter': 4,
'Domestic sounds': 5,
'Walk, footsteps': 6,
'Door, open or close': 7,
'Music': 8,
'Musical instrument': 9,
'Water tap, faucet': 10,
'Bell': 11,
'Knock': 12 }
self.label_resolution = 0.1 # 0.1s is the label resolution
self.max_ov = 3 # max overlap
self.num_classes = len(self.label_dic)
self.root_dir = Path(root_dir)
self.starss22_dir = self.root_dir.joinpath('STARSS22')
self.synth_dir = self.root_dir.joinpath('synth_dataset')
self.dataset_dir = dict()
self.dataset_dir['dev'] = {'foa': dict(), 'mic': dict(), 'meta': dict()}
self.dataset_dir['dev']['foa']['STARSS22'] = \
[self.starss22_dir.joinpath('foa_dev').joinpath('dev-train-sony'),
self.starss22_dir.joinpath('foa_dev').joinpath('dev-train-tau'),
self.starss22_dir.joinpath('foa_dev').joinpath('dev-test-sony'),
self.starss22_dir.joinpath('foa_dev').joinpath('dev-test-tau')]
self.dataset_dir['dev']['mic']['STARSS22'] = \
[self.starss22_dir.joinpath('mic_dev').joinpath('dev-train-sony'),
self.starss22_dir.joinpath('mic_dev').joinpath('dev-train-tau'),
self.starss22_dir.joinpath('mic_dev').joinpath('dev-test-sony'),
self.starss22_dir.joinpath('mic_dev').joinpath('dev-test-tau')]
self.dataset_dir['dev']['meta']['STARSS22'] = \
[self.starss22_dir.joinpath('metadata_dev').joinpath('dev-train-sony'),
self.starss22_dir.joinpath('metadata_dev').joinpath('dev-train-tau'),
self.starss22_dir.joinpath('metadata_dev').joinpath('dev-test-sony'),
self.starss22_dir.joinpath('metadata_dev').joinpath('dev-test-tau')]
self.dataset_dir['eval'] = {
'foa': { 'STARSS22': [self.starss22_dir.joinpath('foa_eval')] },
'mic': { 'STARSS22': [self.starss22_dir.joinpath('mic_eval')] },
'meta': { 'STARSS22': [] },
}
if not args.dataset == 'STARSS22':
synth_dataset_list = args.dataset.split(',')
for _synth_dataset in synth_dataset_list:
self.dataset_dir['dev']['foa'][_synth_dataset] = [self.synth_dir.joinpath(_synth_dataset).joinpath('foa')]
self.dataset_dir['dev']['mic'][_synth_dataset] = [self.synth_dir.joinpath(_synth_dataset).joinpath('mic')]
self.dataset_dir['dev']['meta'][_synth_dataset] = [self.synth_dir.joinpath(_synth_dataset).joinpath('metadata')]
| 3,132 | 49.532258 | 128 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/utils/common.py | import numpy as np
import torch
import logging
from datetime import datetime
from tqdm import tqdm
import math
import torch.distributed as dist
import shutil
from pathlib import Path
from .ddp_init import get_rank, get_world_size
def float_samples_to_int16(y):
"""Convert floating-point numpy array of audio samples to int16."""
if not issubclass(y.dtype.type, np.floating):
raise ValueError('input samples not floating-point')
return (y * np.iinfo(np.int16).max).astype(np.int16)
def int16_samples_to_float32(y):
"""Convert int16 numpy array of audio samples to float32."""
if y.dtype != np.int16:
raise ValueError('input samples not int16')
return y.astype(np.float32) / np.iinfo(np.int16).max
def prepare_train_id(args, cfg):
""" Delete out train directory if it exists"""
out_train_dir = Path(cfg['workspace_dir']).joinpath('results').joinpath('out_train') \
.joinpath(cfg['method']).joinpath(cfg['training']['train_id'])
if out_train_dir.is_dir():
flag = input("Train ID folder {} is existed, delete it? (y/n)". \
format(str(out_train_dir))).lower()
print('')
if flag == 'y':
shutil.rmtree(str(out_train_dir))
elif flag == 'n':
print("User select not to remove the training ID folder {}.\n". \
format(str(out_train_dir)))
def create_logging(logs_dir, filemode):
"""Create log objective.
Args:
logs_dir (Path obj): logs directory
filenmode: open file mode
"""
logs_dir.mkdir(parents=True, exist_ok=True)
i1 = 0
while logs_dir.joinpath('{:04d}.log'.format(i1)).is_file():
i1 += 1
logs_path = logs_dir.joinpath('{:04d}.log'.format(i1))
logging.basicConfig(
level=logging.INFO,
# format='%(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=logs_path,
filemode=filemode)
# Print to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
# logging.getLogger('').addHandler(console)
logging.getLogger('').addHandler(TqdmLoggingHandler())
dt_string = datetime.now().strftime('%a, %d %b %Y %H:%M:%S')
logging.info(dt_string)
logging.info('')
return logging
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except:
self.handleError(record)
def convert_ordinal(n):
"""Convert a number to a ordinal number
"""
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4])
return ordinal(n)
def move_model_to_gpu(model, cuda):
"""Move model to GPU
"""
if cuda:
logging.info('Utilize GPUs for computation')
logging.info('Number of GPU available: {}\n'.format(torch.cuda.device_count()))
model.to(get_rank(), non_blocking=True)
else:
logging.info('Utilize CPU for computation')
return model
def count_parameters(model):
"""Count model parameters
"""
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
logging.info('Total number of parameters: {}\n'.format(params_num))
def print_metrics(logging, writer, values_dict, it, set_type='train'):
"""Print losses and metrics, and write it to tensorboard
Args:
logging: logging
writer: tensorboard writer
values_dict: losses or metrics
it: iter
set_type: 'train' | 'valid' | 'test'
"""
out_str = ''
if set_type == 'train':
out_str += 'Train: '
elif set_type == 'valid':
out_str += 'valid: '
for key, value in values_dict.items():
out_str += '{}: {:.3f}, '.format(key, value)
writer.add_scalar('{}/{}'.format(set_type, key), value, it)
logging.info(out_str)
| 4,202 | 29.904412 | 90 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/utils/__init__.py | 0 | 0 | 0 | py |
|
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/utils/cli_parser.py | import argparse
import sys
from pathlib import Path
from ruamel.yaml import YAML
from termcolor import cprint
def parse_cli_overides():
"""Parse the command-line arguments.
Parse args from CLI and override config dictionary entries
This function implements the command-line interface of the program.
The interface accepts general command-line arguments as well as
arguments that are specific to a sub-command. The sub-commands are
*preprocess*, *train*, *predict*, and *evaluate*. Specifying a
sub-command is required, as it specifies the task that the program
should carry out.
Returns:
args: The parsed arguments.
"""
# Parse the command-line arguments, but separate the `--config_file`
# option from the other arguments. This way, options can be parsed
# from the config file(s) first and then overidden by the other
# command-line arguments later.
parser = argparse.ArgumentParser(
description='Event Independent Network for DCASE2022.',
add_help=False
)
parser.add_argument('-c', '--config_file', default='./configs/ein_seld/seld.yaml', help='Specify config file', metavar='FILE')
parser.add_argument('--dataset', default='STARSS22', type=str)
subparsers = parser.add_subparsers(dest='mode')
parser_preproc = subparsers.add_parser('preprocess')
parser_train = subparsers.add_parser('train')
parser_infer = subparsers.add_parser('infer')
subparsers.add_parser('evaluate')
# Require the user to specify a sub-command
subparsers.required = True
parser_preproc.add_argument('--preproc_mode', choices=['extract_data', 'extract_indexes', 'extract_pit_label',
'extract_mic_features', 'extract_scalar', 'extract_adpit_label'],
required=True, help='select preprocessing mode')
parser_preproc.add_argument('--dataset_type', default='dev', choices=['dev', 'eval'],
help='select dataset to preprocess')
parser_preproc.add_argument('--num_workers', type=int, default=8, metavar='N')
parser_preproc.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
parser_train.add_argument('--seed', type=int, default=2022, metavar='N')
parser_train.add_argument('--num_workers', type=int, default=8, metavar='N')
parser_train.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
parser_train.add_argument('--port', type=int, default=12359, metavar='N')
parser_infer.add_argument('--num_workers', type=int, default=8, metavar='N')
parser_infer.add_argument('--no_cuda', action='store_true', help='Do not use cuda.')
args = parser.parse_args()
args_dict = vars(args)
cprint("Args:", "green")
for key, value in args_dict.items():
print(f" {key:25s} -> {value}")
yaml = YAML()
yaml.indent(mapping=4, sequence=6, offset=3)
yaml.default_flow_style = False
with open(args.config_file, 'r') as f:
cfg = yaml.load(f)
cprint("Cfg:", "red")
yaml.dump(cfg, sys.stdout, transform=replace_indent)
return args, cfg
def replace_indent(stream):
stream = " " + stream
return stream.replace("\n", "\n ")
| 3,260 | 42.48 | 130 | py |
OpenPSG | OpenPSG-main/setup.py | #!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'openpsg/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
if platform.system() == 'Windows':
# set `copy` mode here since symlink fails on Windows.
mode = 'copy'
else:
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmdet', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extension()
setup(
name='openpsg',
version=get_version(),
description='Benchmarking Panoptic Scene Graph Generation',
long_description=readme(),
long_description_content_type='text/markdown',
author='OpenPSG Contributors',
author_email='yangjingkang001@gmail',
keywords='computer vision, detection, segmentation, scene graph',
url='https://github.com/Jingkang50/OpenPSG',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='Apache License 2.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False) | 7,887 | 35.518519 | 125 | py |
OpenPSG | OpenPSG-main/predict.py | import os
import tempfile
import shutil
from typing import List
from cog import BasePredictor, Path, Input, BaseModel
from openpsg.utils.utils import show_result
from mmdet.apis import init_detector, inference_detector
from mmcv import Config
import mmcv
class ModelOutput(BaseModel):
image: Path
class Predictor(BasePredictor):
def setup(self):
model_ckt = "epoch_60.pth"
cfg = Config.fromfile("configs/psgtr/psgtr_r50_psg_inference.py")
self.model = init_detector(cfg, model_ckt, device="cpu")
def predict(
self,
image: Path = Input(
description="Input image.",
),
num_rel: int = Input(
description="Number of Relations. Each relation will generate a scene graph",
default=5,
ge=1,
le=20,
),
) -> List[ModelOutput]:
input_image = mmcv.imread(str(image))
result = inference_detector(self.model, input_image)
out_path = Path(tempfile.mkdtemp()) / "output.png"
out_dir = "temp"
show_result(
str(image),
result,
is_one_stage=True,
num_rel=num_rel,
out_dir=out_dir,
out_file=str(out_path),
)
output = []
output.append(ModelOutput(image=out_path))
for i, img_path in enumerate(os.listdir(out_dir)):
img = mmcv.imread(os.path.join(out_dir, img_path))
out_path = Path(tempfile.mkdtemp()) / f"output_{i}.png"
mmcv.imwrite(img, str(out_path))
output.append(ModelOutput(image=out_path))
shutil.rmtree(out_dir)
return output
| 1,670 | 28.315789 | 89 | py |
OpenPSG | OpenPSG-main/ce7454/main.py | import argparse
import os
import time
import torch
from dataset import PSGClsDataset
from evaluator import Evaluator
from torch.utils.data import DataLoader
from torchvision.models import resnet50
from trainer import BaseTrainer
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default='res50')
parser.add_argument('--epoch', type=int, default=36)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=0.0005)
args = parser.parse_args()
savename = f'{args.model_name}_e{args.epoch}_lr{args.lr}_bs{args.batch_size}_m{args.momentum}_wd{args.weight_decay}'
os.makedirs('./checkpoints', exist_ok=True)
os.makedirs('./results', exist_ok=True)
# loading dataset
train_dataset = PSGClsDataset(stage='train')
train_dataloader = DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=8)
val_dataset = PSGClsDataset(stage='val')
val_dataloader = DataLoader(val_dataset,
batch_size=32,
shuffle=False,
num_workers=8)
test_dataset = PSGClsDataset(stage='test')
test_dataloader = DataLoader(test_dataset,
batch_size=32,
shuffle=False,
num_workers=8)
print('Data Loaded...', flush=True)
# loading model
model = resnet50(pretrained=True)
model.fc = torch.nn.Linear(2048, 56)
model.cuda()
print('Model Loaded...', flush=True)
# loading trainer
trainer = BaseTrainer(model,
train_dataloader,
learning_rate=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
epochs=args.epoch)
evaluator = Evaluator(model, k=3)
# train!
print('Start Training...', flush=True)
begin_epoch = time.time()
best_val_recall = 0.0
for epoch in range(0, args.epoch):
train_metrics = trainer.train_epoch()
val_metrics = evaluator.eval_recall(val_dataloader)
# show log
print(
'{} | Epoch {:3d} | Time {:5d}s | Train Loss {:.4f} | Test Loss {:.3f} | mR {:.2f}'
.format(savename, (epoch + 1), int(time.time() - begin_epoch),
train_metrics['train_loss'], val_metrics['test_loss'],
100.0 * val_metrics['mean_recall']),
flush=True)
# save model
if val_metrics['mean_recall'] >= best_val_recall:
torch.save(model.state_dict(), f'./checkpoints/{savename}_best.ckpt')
best_val_recall = val_metrics['mean_recall']
print('Training Completed...', flush=True)
# saving result!
print('Loading Best Ckpt...', flush=True)
checkpoint = torch.load(f'checkpoints/{savename}_best.ckpt')
model.load_state_dict(checkpoint)
test_evaluator = Evaluator(model, k=3)
check_metrics = test_evaluator.eval_recall(val_dataloader)
if best_val_recall == check_metrics['mean_recall']:
print('Successfully load best checkpoint with acc {:.2f}'.format(
100 * best_val_recall),
flush=True)
else:
print('Fail to load best checkpoint')
result = test_evaluator.submit(test_dataloader)
# save into the file
with open(f'results/{savename}_{best_val_recall}.txt', 'w') as writer:
for label_list in result:
a = [str(x) for x in label_list]
save_str = ' '.join(a)
writer.writelines(save_str + '\n')
print('Result Saved!', flush=True)
| 3,628 | 33.561905 | 116 | py |
OpenPSG | OpenPSG-main/ce7454/grade.py | import argparse
import os
import numpy as np
def compute_recall(gt_list, pred_list):
score_list = np.zeros([56, 2], dtype=int)
for gt, pred in zip(gt_list, pred_list):
for gt_id in gt:
# pos 0 for counting all existing relations
score_list[gt_id][0] += 1
if gt_id in pred:
# pos 1 for counting relations that is recalled
score_list[gt_id][1] += 1
score_list = score_list[6:]
# to avoid nan, but test set does not have empty predict
# score_list[:,0][score_list[:,0] == 0] = 1
meanrecall = np.mean(score_list[:, 1] / score_list[:, 0])
return meanrecall
def parse_args():
parser = argparse.ArgumentParser(description='MMDet eval a model')
parser.add_argument('input_path', help='input file path')
parser.add_argument('output_path', help='output file path')
args = parser.parse_args()
return args
def main():
args = parse_args()
submit_dir = os.path.join(args.input_path, 'res')
groundtruth_dir = os.path.join(args.input_path, 'ref')
gt_list = []
with open(os.path.join(groundtruth_dir, 'psg_cls_gt.txt'), 'r') as reader:
for line in reader.readlines():
gt_list.append(
[int(label) for label in line.strip('/n').split(' ')])
pred_list = []
with open(os.path.join(submit_dir, 'result.txt'), 'r') as reader:
for line in reader.readlines():
pred_list.append(
[int(label) for label in line.strip('/n').split(' ')])
assert np.array(pred_list).shape == (
500, 3), 'make sure the submitted file is 500 x 3'
result = compute_recall(gt_list, pred_list)
output_filename = os.path.join(args.output_path, 'scores.txt')
with open(output_filename, 'w') as f3:
f3.write('score: {}\n'.format(result))
if __name__ == '__main__':
main()
| 1,891 | 30.016393 | 78 | py |
OpenPSG | OpenPSG-main/ce7454/dataset.py | import io
import json
import logging
import os
import torch
import torchvision.transforms as trn
from PIL import Image, ImageFile
from torch.utils.data import Dataset
# to fix "OSError: image file is truncated"
ImageFile.LOAD_TRUNCATED_IMAGES = True
class Convert:
def __init__(self, mode='RGB'):
self.mode = mode
def __call__(self, image):
return image.convert(self.mode)
def get_transforms(stage: str):
mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
if stage == 'train':
return trn.Compose([
Convert('RGB'),
trn.Resize((1333, 800)),
trn.RandomHorizontalFlip(),
trn.RandomCrop((1333, 800), padding=4),
trn.ToTensor(),
trn.Normalize(mean, std),
])
elif stage in ['val', 'test']:
return trn.Compose([
Convert('RGB'),
trn.Resize((1333, 800)),
trn.ToTensor(),
trn.Normalize(mean, std),
])
class PSGClsDataset(Dataset):
def __init__(
self,
stage,
root='./data/coco/',
num_classes=56,
):
super(PSGClsDataset, self).__init__()
with open('./data/psg/psg_cls_basic.json') as f:
dataset = json.load(f)
self.imglist = [
d for d in dataset['data']
if d['image_id'] in dataset[f'{stage}_image_ids']
]
self.root = root
self.transform_image = get_transforms(stage)
self.num_classes = num_classes
def __len__(self):
return len(self.imglist)
def __getitem__(self, index):
sample = self.imglist[index]
path = os.path.join(self.root, sample['file_name'])
try:
with open(path, 'rb') as f:
content = f.read()
filebytes = content
buff = io.BytesIO(filebytes)
image = Image.open(buff).convert('RGB')
sample['data'] = self.transform_image(image)
except Exception as e:
logging.error('Error, cannot read [{}]'.format(path))
raise e
# Generate Soft Label
soft_label = torch.Tensor(self.num_classes)
soft_label.fill_(0)
soft_label[sample['relations']] = 1
sample['soft_label'] = soft_label
del sample['relations']
return sample
| 2,352 | 26.682353 | 65 | py |
OpenPSG | OpenPSG-main/ce7454/evaluator.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
class Evaluator:
def __init__(
self,
net: nn.Module,
k: int,
):
self.net = net
self.k = k
def eval_recall(
self,
data_loader: DataLoader,
):
self.net.eval()
loss_avg = 0.0
pred_list, gt_list = [], []
with torch.no_grad():
for batch in data_loader:
data = batch['data'].cuda()
logits = self.net(data)
prob = torch.sigmoid(logits)
target = batch['soft_label'].cuda()
loss = F.binary_cross_entropy(prob, target, reduction='sum')
loss_avg += float(loss.data)
# gather prediction and gt
pred = torch.topk(prob.data, self.k)[1]
pred = pred.cpu().detach().tolist()
pred_list.extend(pred)
for soft_label in batch['soft_label']:
gt_label = (soft_label == 1).nonzero(as_tuple=True)[0]\
.cpu().detach().tolist()
gt_list.append(gt_label)
# compute mean recall
score_list = np.zeros([56, 2], dtype=int)
for gt, pred in zip(gt_list, pred_list):
for gt_id in gt:
# pos 0 for counting all existing relations
score_list[gt_id][0] += 1
if gt_id in pred:
# pos 1 for counting relations that is recalled
score_list[gt_id][1] += 1
score_list = score_list[6:]
# to avoid nan
score_list[:, 0][score_list[:, 0] == 0] = 1
meanrecall = np.mean(score_list[:, 1] / score_list[:, 0])
metrics = {}
metrics['test_loss'] = loss_avg / len(data_loader)
metrics['mean_recall'] = meanrecall
return metrics
def submit(
self,
data_loader: DataLoader,
):
self.net.eval()
pred_list = []
with torch.no_grad():
for batch in data_loader:
data = batch['data'].cuda()
logits = self.net(data)
prob = torch.sigmoid(logits)
pred = torch.topk(prob.data, self.k)[1]
pred = pred.cpu().detach().tolist()
pred_list.extend(pred)
return pred_list
| 2,442 | 30.727273 | 76 | py |
OpenPSG | OpenPSG-main/ce7454/trainer.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
def cosine_annealing(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max -
lr_min) * 0.5 * (1 + np.cos(step / total_steps * np.pi))
class BaseTrainer:
def __init__(self,
net: nn.Module,
train_loader: DataLoader,
learning_rate: float = 0.1,
momentum: float = 0.9,
weight_decay: float = 0.0005,
epochs: int = 100) -> None:
self.net = net
self.train_loader = train_loader
self.optimizer = torch.optim.SGD(
net.parameters(),
learning_rate,
momentum=momentum,
weight_decay=weight_decay,
nesterov=True,
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
epochs * len(train_loader),
1, # since lr_lambda computes multiplicative factor
1e-6 / learning_rate,
),
)
def train_epoch(self):
self.net.train() # enter train mode
loss_avg = 0.0
train_dataiter = iter(self.train_loader)
for train_step in tqdm(range(1, len(train_dataiter) + 1)):
# for train_step in tqdm(range(1, 5)):
batch = next(train_dataiter)
data = batch['data'].cuda()
target = batch['soft_label'].cuda()
# forward
logits = self.net(data)
loss = F.binary_cross_entropy_with_logits(logits,
target,
reduction='sum')
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# exponential moving average, show smooth values
with torch.no_grad():
loss_avg = loss_avg * 0.8 + float(loss) * 0.2
metrics = {}
metrics['train_loss'] = loss_avg
return metrics
| 2,273 | 30.150685 | 77 | py |
OpenPSG | OpenPSG-main/tools/vis_results.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
from mmcv import Config, DictAction
from mmdet.datasets import build_dataset, replace_ImageToTensor
from openpsg.utils.utils import show_result
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet eval image prediction result for each')
parser.add_argument('config', help='test config file path')
parser.add_argument('prediction_path',
help='prediction path where test pkl result')
parser.add_argument('show_dir',
help='directory where painted images will be saved')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--img_idx',
default=[25, 73],
nargs='+',
type=int,
help='which image to show')
parser.add_argument('--wait-time',
type=float,
default=0,
help='the interval of show (s), 0 is block')
parser.add_argument('--topk',
default=20,
type=int,
help='saved Number of the highest topk '
'and lowest topk after index sorting')
parser.add_argument('--show-score-thr',
type=float,
default=0,
help='score threshold (default: 0.)')
parser.add_argument('--one_stage', default=False, action='store_true')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
mmcv.check_file_exist(args.prediction_path)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# build the dataloader
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.prediction_path)
for idx in args.img_idx:
print(idx, flush=True)
img = dataset[idx]['img_metas'][0].data['filename']
result = outputs[idx]
out_filepath = osp.join(args.show_dir, f'{idx}.png')
show_result(img,
result,
is_one_stage=args.one_stage,
num_rel=args.topk,
out_file=out_filepath)
if __name__ == '__main__':
main()
| 3,698 | 35.99 | 78 | py |
OpenPSG | OpenPSG-main/tools/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from grade import save_results
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import build_dataloader, replace_ImageToTensor
from mmdet.models import build_detector
from openpsg.datasets import build_dataset
from grade import save_results
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-dir',
help='directory where painted images will be saved')
parser.add_argument('--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument('--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument('--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument(
'--submit',
action='store_true',
help=
'save output to a json file and save the panoptic mask as a png image into a folder for grading purpose'
)
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir or args.submit, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
# NOTE:
if hasattr(dataset, 'PREDICATES'):
model.PREDICATES = dataset.PREDICATES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
if args.submit:
save_results(outputs)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule', 'dynamic_intervals'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if args.work_dir is not None and rank == 0:
mmcv.dump(metric_dict, json_file)
if __name__ == '__main__':
main()
| 9,830 | 39.126531 | 112 | py |
OpenPSG | OpenPSG-main/tools/grade.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
import os
import random
import numpy as np
import PIL
from mmcv import Config
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from panopticapi.utils import rgb2id
from PIL import Image
from tqdm import tqdm
from openpsg.datasets import build_dataset
from openpsg.models.relation_heads.approaches import Result
def save_results(results):
all_img_dicts = []
if not os.path.isdir('submission/panseg/'):
os.makedirs('submission/panseg/')
for idx, result in enumerate(results):
if not isinstance(result, Result):
continue
labels = result.labels
rels = result.rels
masks = result.masks
segments_info = []
img = np.full(masks.shape[1:3], 0)
for label, mask in zip(labels, masks):
r, g, b = random.choices(range(0, 255), k=3)
coloring_mask = 1 * np.vstack([[mask]] * 3)
for j, color in enumerate([r, g, b]):
coloring_mask[j, :, :] = coloring_mask[j, :, :] * color
img = img + coloring_mask
segment = dict(category_id=int(label), id=rgb2id((r, g, b)))
segments_info.append(segment)
image_path = 'submission/panseg/%d.png' % idx
# image_array = np.uint8(img).transpose((2,1,0))
image_array = np.uint8(img).transpose((1, 2, 0))
PIL.Image.fromarray(image_array).save(image_path)
single_result_dict = dict(
relations=rels.astype(np.int32).tolist(),
segments_info=segments_info,
pan_seg_file_name='%d.png' % idx,
)
all_img_dicts.append(single_result_dict)
if not os.path.isdir('submission'):
os.mkdir('submission')
with open('submission/relation.json', 'w') as outfile:
json.dump(all_img_dicts, outfile, default=str)
def load_results(loadpath):
with open(os.path.join(loadpath, 'relation.json')) as infile:
all_img_dicts = json.load(infile)
results = []
for single_result_dict in tqdm(all_img_dicts,
desc='Loading results from json...'):
pan_seg_filename = single_result_dict['pan_seg_file_name']
pan_seg_filename = os.path.join(loadpath, 'panseg', pan_seg_filename)
pan_seg_img = np.array(Image.open(pan_seg_filename))
pan_seg_img = pan_seg_img.copy() # (H, W, 3)
seg_map = rgb2id(pan_seg_img)
segments_info = single_result_dict['segments_info']
num_obj = len(segments_info)
# get separate masks
labels = []
masks = []
for _, s in enumerate(segments_info):
label = int(s['category_id'])
labels.append(label) # TODO:1-index for gt?
masks.append(seg_map == s['id'])
count = dict()
pan_result = seg_map.copy()
for _, s in enumerate(segments_info):
label = int(s['category_id'])
if label not in count.keys():
count[label] = 0
pan_result[seg_map == int(
s['id']
)] = label - 1 + count[label] * INSTANCE_OFFSET # change index?
count[label] += 1
rel_array = np.asarray(single_result_dict['relations'])
if len(rel_array) > 20:
rel_array = rel_array[:20]
rel_dists = np.zeros((len(rel_array), 57))
for idx_rel, rel in enumerate(rel_array):
rel_dists[idx_rel, rel[2]] += 1 # TODO:1-index for gt?
result = Result(
rels=rel_array,
rel_pair_idxes=rel_array[:, :2],
masks=masks,
labels=np.asarray(labels),
rel_dists=rel_dists,
refine_bboxes=np.ones((num_obj, 5)),
pan_results=pan_result,
)
results.append(result)
return results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet eval a model')
parser.add_argument('input_path', help='input file path')
parser.add_argument('output_path', help='output file path')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile('configs/_base_/datasets/psg_val.py')
dataset = build_dataset(cfg.data.test)
outputs = load_results(args.input_path)
metric1 = dataset.evaluate(outputs, **cfg.evaluation1)
metric2 = dataset.evaluate(outputs, **cfg.evaluation2)
output_filename = os.path.join(args.output_path, 'scores.txt')
with open(output_filename, 'w+') as f3:
f3.write('Recall R 20: {}\n'.format(metric1['sgdet_recall_R_20']))
f3.write('MeanRecall R 20: {}\n'.format(
metric1['sgdet_mean_recall_mR_20']))
f3.write('PQ: {}\n'.format(metric2['PQ']))
if __name__ == '__main__':
main()
| 4,814 | 31.979452 | 77 | py |
OpenPSG | OpenPSG-main/tools/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed, train_detector
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
from openpsg.datasets import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--resume-from',
help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training',
)
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)',
)
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)',
)
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.',
)
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.',
)
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.',
)
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher',
)
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
dataset = build_dataset(cfg.data.train)
if hasattr(cfg, 'dataset_config'):
cache_dir = cfg.dataset_config['cache']
print('Loading Statistics...')
if cache_dir is None:
raise FileNotFoundError(
'The cache_dir for caching the statistics is not provided.')
if not os.path.exists(cache_dir):
result = dataset.get_statistics()
statistics = {
'freq_matrix': result['freq_matrix'],
'pred_dist': result['pred_dist'],
}
torch.save(statistics, cache_dir)
print('\n Statistics created!')
model = build_detector(cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
# NOTE: Freeze weights here
if hasattr(cfg, 'freeze_modules'):
if cfg.freeze_modules is not None:
for module_name in cfg.freeze_modules:
for name, p in model.named_parameters():
if name.startswith(module_name):
p.requires_grad = False
# Unfreeze weights here
if hasattr(cfg, 'required_grad_modules'):
if cfg.required_grad_modules is not None:
for module_name in cfg.required_grad_modules:
for name, p in model.named_parameters():
if name.startswith(module_name):
p.requires_grad = True
datasets = [dataset]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(mmdet_version=__version__ +
get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta,
)
if __name__ == '__main__':
main()
| 8,188 | 35.234513 | 79 | py |
OpenPSG | OpenPSG-main/openpsg/version.py | # Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.5.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__) | 527 | 26.789474 | 56 | py |
OpenPSG | OpenPSG-main/openpsg/__init__.py | 0 | 0 | 0 | py |
|
OpenPSG | OpenPSG-main/openpsg/evaluation/sgg_eval.py | # ---------------------------------------------------------------
# vg_eval.py
# Set-up time: 2020/5/18 上午9:48
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import json
import os
import mmcv
import numpy as np
import torch
from mmcv.utils import print_log
from .sgg_metrics import (SGAccumulateRecall, SGMeanRecall,
SGNoGraphConstraintRecall, SGPairAccuracy, SGRecall,
SGZeroShotRecall)
def sgg_evaluation(
mode,
groundtruths,
predictions,
iou_thrs,
logger,
ind_to_predicates,
multiple_preds=False,
predicate_freq=None,
nogc_thres_num=None,
detection_method='bbox',
):
modes = mode if isinstance(mode, list) else [mode]
result_container = dict()
for m in modes:
msg = 'Evaluating {}...'.format(m)
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
single_result_dict = vg_evaluation_single(
m,
groundtruths,
predictions,
iou_thrs,
logger,
ind_to_predicates,
multiple_preds,
predicate_freq,
nogc_thres_num,
detection_method,
)
result_container.update(single_result_dict)
return result_container
def vg_evaluation_single(
mode,
groundtruths,
predictions,
iou_thrs,
logger,
ind_to_predicates,
multiple_preds=False,
predicate_freq=None,
nogc_thres_num=None,
detection_method='bbox',
):
# # get zeroshot triplet
num_predicates = len(ind_to_predicates)
assert isinstance(nogc_thres_num,
(list, tuple, int)) or nogc_thres_num is None
if nogc_thres_num is None:
nogc_thres_num = [num_predicates - 1] # default: all
elif isinstance(nogc_thres_num, int):
nogc_thres_num = [nogc_thres_num]
else:
pass
result_str = '\n' + '=' * 100 + '\n'
result_dict = {}
nogc_result_dict = {}
evaluator = {}
# tradictional Recall@K
eval_recall = SGRecall(result_dict,
nogc_result_dict,
nogc_thres_num,
detection_method=detection_method)
eval_recall.register_container(mode)
evaluator['eval_recall'] = eval_recall
# used by https://github.com/NVIDIA/ContrastiveLosses4VRD for sgcls and predcls
eval_pair_accuracy = SGPairAccuracy(result_dict, nogc_result_dict,
nogc_thres_num, detection_method)
eval_pair_accuracy.register_container(mode)
evaluator['eval_pair_accuracy'] = eval_pair_accuracy
# used for meanRecall@K
eval_mean_recall = SGMeanRecall(
result_dict,
nogc_result_dict,
nogc_thres_num,
num_predicates,
ind_to_predicates,
detection_method=detection_method,
print_detail=True,
)
eval_mean_recall.register_container(mode)
evaluator['eval_mean_recall'] = eval_mean_recall
# prepare all inputs
global_container = {}
# global_container["zeroshot_triplet"] = zeroshot_triplet
global_container['result_dict'] = result_dict
global_container['mode'] = mode
global_container['multiple_preds'] = multiple_preds
global_container['num_predicates'] = num_predicates
global_container['iou_thrs'] = iou_thrs
# global_container['attribute_on'] = attribute_on
# global_container['num_attributes'] = num_attributes
pbar = mmcv.ProgressBar(len(groundtruths))
for groundtruth, prediction in zip(groundtruths, predictions):
# Skip empty predictions
if prediction.refine_bboxes is None:
continue
evaluate_relation_of_one_image(groundtruth, prediction,
global_container, evaluator)
pbar.update()
# calculate mean recall
eval_mean_recall.calculate_mean_recall(mode)
# print result
result_str += eval_recall.generate_print_string(mode)
result_str += eval_mean_recall.generate_print_string(mode, predicate_freq)
if mode != 'sgdet':
result_str += eval_pair_accuracy.generate_print_string(mode)
result_str += '=' * 100 + '\n'
if logger is None:
result_str = '\n' + result_str
print_log(result_str, logger=logger)
return format_result_dict(result_dict, result_str, mode)
def format_result_dict(result_dict, result_str, mode):
"""
Function:
This is used for getting the results in both float data form and text
form so that they can be logged into tensorboard (scalar and text).
Here we only log the graph constraint results excluding phrdet.
"""
formatted = dict()
copy_stat_str = ''
# Traditional Recall
for k, v in result_dict[mode + '_recall'].items():
formatted[mode + '_recall_' + 'R_%d' % k] = np.mean(v)
copy_stat_str += (mode + '_recall_' + 'R_%d: ' % k +
'{:0.3f}'.format(np.mean(v)) + '\n')
# mean recall
for k, v in result_dict[mode + '_mean_recall'].items():
formatted[mode + '_mean_recall_' + 'mR_%d' % k] = float(v)
copy_stat_str += (mode + '_mean_recall_' + 'mR_%d: ' % k +
'{:0.3f}'.format(float(v)) + '\n')
if mode != 'sgdet':
# Accuracy
for k, v in result_dict[mode + '_accuracy_hit'].items():
a_hit = np.mean(v)
a_count = np.mean(result_dict[mode + '_accuracy_count'][k])
formatted[mode + '_accuracy_hit_' + 'A_%d' % k] = a_hit / a_count
copy_stat_str += (mode + '_accuracy_hit_' + 'A_%d: ' % k +
'{:0.3f}'.format(a_hit / a_count) + '\n')
formatted[mode + '_copystat'] = copy_stat_str
formatted[mode + '_runtime_eval_str'] = result_str
return formatted
def evaluate_relation_of_one_image(groundtruth, prediction, global_container,
evaluator):
"""
Returns:
pred_to_gt: Matching from predicate to GT
pred_5ples: the predicted (id0, id1, cls0, cls1, rel)
pred_triplet_scores: [cls_0score, relscore, cls1_score]
"""
# unpack all inputs
mode = global_container['mode']
local_container = {}
local_container['gt_rels'] = groundtruth.rels
# if there is no gt relations for current image, then skip it
if len(local_container['gt_rels']) == 0:
return
local_container['gt_boxes'] = groundtruth.bboxes # (#gt_objs, 4)
local_container['gt_classes'] = groundtruth.labels # (#gt_objs, )
# about relations
local_container[
'pred_rel_inds'] = prediction.rel_pair_idxes # (#pred_rels, 2)
local_container[
'rel_scores'] = prediction.rel_dists # (#pred_rels, num_pred_class)
# about objects
local_container[
'pred_boxes'] = prediction.refine_bboxes[:, :4] # (#pred_objs, 4)
local_container['pred_classes'] = prediction.labels # (#pred_objs, )
local_container[
'obj_scores'] = prediction.refine_bboxes[:, -1] # (#pred_objs, )
# about pan_seg masks
local_container['gt_masks'] = groundtruth.masks
local_container['pred_masks'] = prediction.masks
# to calculate accuracy, only consider those gt pairs
# This metric is used by "Graphical Contrastive Losses for Scene Graph
# Parsing"
# for sgcls and predcls
if mode != 'sgdet':
evaluator['eval_pair_accuracy'].prepare_gtpair(local_container)
# to calculate the prior label based on statistics
# evaluator["eval_zeroshot_recall"].prepare_zeroshot(
# global_container, local_container
# )
if mode == 'predcls':
local_container['pred_boxes'] = local_container['gt_boxes']
local_container['pred_classes'] = local_container['gt_classes']
local_container['obj_scores'] = np.ones(
local_container['gt_classes'].shape[0])
elif mode == 'sgcls':
if (local_container['gt_boxes'].shape[0] !=
local_container['pred_boxes'].shape[0]):
print(
'Num of GT boxes is not matching with num of pred boxes in SGCLS'
)
elif mode == 'sgdet' or mode == 'phrdet':
pass
else:
raise ValueError('invalid mode')
"""
elif mode == 'preddet':
# Only extract the indices that appear in GT
prc = intersect_2d(pred_rel_inds, gt_rels[:, :2])
if prc.size == 0:
for k in result_dict[mode + '_recall']:
result_dict[mode + '_recall'][k].append(0.0)
return None, None, None
pred_inds_per_gt = prc.argmax(0)
pred_rel_inds = pred_rel_inds[pred_inds_per_gt]
rel_scores = rel_scores[pred_inds_per_gt]
# Now sort the matching ones
rel_scores_sorted = argsort_desc(rel_scores[:,1:])
rel_scores_sorted[:,1] += 1
rel_scores_sorted = np.column_stack(
(pred_rel_inds[rel_scores_sorted[:,0]], rel_scores_sorted[:,1]))
matches = intersect_2d(rel_scores_sorted, gt_rels)
for k in result_dict[mode + '_recall']:
rec_i = float(matches[:k].any(0).sum()) / float(gt_rels.shape[0])
result_dict[mode + '_recall'][k].append(rec_i)
return None, None, None
"""
if local_container['pred_rel_inds'].shape[0] == 0:
return
# Traditional Metric with Graph Constraint
# NOTE: this is the MAIN evaluation function, it must be run first
# (several important variables need to be update)
local_container = evaluator['eval_recall'].calculate_recall(
global_container, local_container, mode)
# No Graph Constraint
# evaluator['eval_nog_recall'].calculate_recall(global_container,
# local_container, mode)
# Zero shot Recall
# evaluator["eval_zeroshot_recall"].calculate_recall(
# global_container, local_container, mode
# )
# GT Pair Accuracy
evaluator['eval_pair_accuracy'].calculate_recall(global_container,
local_container, mode)
# Mean Recall
evaluator['eval_mean_recall'].collect_mean_recall_items(
global_container, local_container, mode)
return
def convert_relation_matrix_to_triplets(relation):
triplets = []
for i in range(len(relation)):
for j in range(len(relation)):
if relation[i, j] > 0:
triplets.append((i, j, relation[i, j]))
return torch.LongTensor(triplets) # (num_rel, 3)
def generate_attributes_target(attributes, num_attributes):
"""from list of attribute indexes to [1,0,1,0,...,0,1] form."""
max_att = attributes.shape[1]
num_obj = attributes.shape[0]
with_attri_idx = (attributes.sum(-1) > 0).long()
without_attri_idx = 1 - with_attri_idx
num_pos = int(with_attri_idx.sum())
num_neg = int(without_attri_idx.sum())
assert num_pos + num_neg == num_obj
attribute_targets = torch.zeros((num_obj, num_attributes),
device=attributes.device).float()
for idx in torch.nonzero(with_attri_idx).squeeze(1).tolist():
for k in range(max_att):
att_id = int(attributes[idx, k])
if att_id == 0:
break
else:
attribute_targets[idx, att_id] = 1
return attribute_targets
| 11,583 | 33.47619 | 83 | py |
OpenPSG | OpenPSG-main/openpsg/evaluation/sgg_metrics.py | # ---------------------------------------------------------------
# sgg_eval.py
# Set-up time: 2020/5/18 上午9:49
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import itertools
from abc import ABC, abstractmethod
from functools import reduce
import numpy as np
import torch
from mmdet.core import bbox_overlaps
from terminaltables import AsciiTable
from .sgg_eval_util import argsort_desc, intersect_2d
basic_dict = {20: [], 50: [], 100: []}
class SceneGraphEvaluation(ABC):
def __init__(self,
result_dict,
nogc_result_dict,
nogc_thres_num,
detection_method='bbox'):
super().__init__()
self.result_dict = result_dict
self.nogc_result_dict = nogc_result_dict
self.nogc_thres_num = nogc_thres_num
self.detection_method = detection_method
if detection_method not in ('bbox', 'pan_seg'):
print('invalid detection method. using bbox instead.')
self.detection_method = detection_method = 'bbox'
if detection_method == 'bbox':
self.generate_triplet = _triplet_bbox
self.compute_pred_matches = _compute_pred_matches_bbox
elif detection_method == 'pan_seg':
self.generate_triplet = _triplet_panseg
self.compute_pred_matches = _compute_pred_matches_panseg
@abstractmethod
def register_container(self, mode):
print('Register Result Container')
pass
@abstractmethod
def generate_print_string(self, mode):
print('Generate Print String')
pass
"""
Traditional Recall, implement based on:
https://github.com/rowanz/neural-motifs
"""
class SGRecall(SceneGraphEvaluation):
def __init__(self, *args, **kwargs):
super(SGRecall, self).__init__(*args, **kwargs)
def register_container(self, mode):
self.result_dict[mode + '_recall'] = {20: [], 50: [], 100: []}
self.nogc_result_dict[mode + '_recall'] = {
ngc: {
20: [],
50: [],
100: []
}
for ngc in self.nogc_thres_num
}
if mode == 'sgdet':
self.result_dict['phrdet_recall'] = {20: [], 50: [], 100: []}
self.nogc_result_dict['phrdet_recall'] = {
ngc: {
20: [],
50: [],
100: []
}
for ngc in self.nogc_thres_num
}
def _calculate_single(self,
target_dict,
prediction_to_gt,
gt_rels,
mode,
nogc_num=None):
target = target_dict[mode +
'_recall'] if nogc_num is None else target_dict[
mode + '_recall'][nogc_num]
for k in target:
# the following code are copied from Neural-MOTIFS
match = reduce(np.union1d, prediction_to_gt[:k])
rec_i = float(len(match)) / float(gt_rels.shape[0])
target[k].append(rec_i)
def _print_single(self, target_dict, mode, nogc_num=None):
target = target_dict[mode +
'_recall'] if nogc_num is None else target_dict[
mode + '_recall'][nogc_num]
result_str = 'SGG eval: '
for k, v in target.items():
result_str += ' R @ %d: %.4f; ' % (k, np.mean(v))
suffix_type = 'Recall.' if nogc_num is None else 'NoGraphConstraint @ %d Recall.' % nogc_num
result_str += ' for mode=%s, type=%s' % (mode, suffix_type)
result_str += '\n'
return result_str
def generate_print_string(self, mode):
result_str = self._print_single(self.result_dict, mode)
if mode == 'sgdet':
result_str += self._print_single(self.result_dict, 'phrdet')
# nogc
for nogc_num in self.nogc_thres_num:
result_str += self._print_single(self.nogc_result_dict, mode,
nogc_num)
if mode == 'sgdet':
result_str += self._print_single(self.nogc_result_dict,
'phrdet', nogc_num)
return result_str
def calculate_recall(self, global_container, local_container, mode):
pred_rel_inds = local_container['pred_rel_inds']
rel_scores = local_container['rel_scores']
gt_rels = local_container['gt_rels']
gt_classes = local_container['gt_classes']
gt_boxes = local_container['gt_boxes']
pred_classes = local_container['pred_classes']
pred_boxes = local_container['pred_boxes']
obj_scores = local_container['obj_scores']
pred_masks = local_container['pred_masks']
gt_masks = local_container['gt_masks']
if mode == 'predcls':
pred_masks = gt_masks
iou_thrs = global_container['iou_thrs']
nogc_thres_num = self.nogc_thres_num
if self.detection_method == 'bbox':
gt_det_results = gt_boxes
if self.detection_method == 'pan_seg':
gt_det_results = gt_masks
gt_triplets, gt_triplet_det_results, _ = self.generate_triplet(
gt_rels, gt_classes, gt_det_results)
local_container['gt_triplets'] = gt_triplets
local_container['gt_triplet_det_results'] = gt_triplet_det_results
# if self.detection_method == 'bbox':
# local_container['gt_triplet_boxes'] = gt_triplet_det_results
# if self.detection_method == 'pan_seg':
# local_container['gt_triplet_masks'] = gt_triplet_det_results
# compute the graph constraint setting pred_rels
pred_rels = np.column_stack(
(pred_rel_inds, 1 + rel_scores[:, 1:].argmax(1)))
pred_scores = rel_scores[:, 1:].max(1)
if self.detection_method == 'bbox':
pred_det_results = pred_boxes
if self.detection_method == 'pan_seg':
pred_det_results = pred_masks
pred_triplets, pred_triplet_det_results, _ = \
self.generate_triplet(
pred_rels, pred_classes, pred_det_results, pred_scores, obj_scores)
# Compute recall. It's most efficient to match once and then do recall after
# if mode is sgdet, report both sgdet and phrdet
pred_to_gt = self.compute_pred_matches(
gt_triplets,
pred_triplets,
gt_triplet_det_results,
pred_triplet_det_results,
iou_thrs,
phrdet=False,
)
local_container['pred_to_gt'] = pred_to_gt
self._calculate_single(self.result_dict, pred_to_gt, gt_rels, mode)
if mode == 'sgdet':
pred_to_gt = self.compute_pred_matches(gt_triplets,
pred_triplets,
gt_triplet_det_results,
pred_triplet_det_results,
iou_thrs,
phrdet=True)
local_container['phrdet_pred_to_gt'] = pred_to_gt
self._calculate_single(self.result_dict,
pred_to_gt,
gt_rels,
mode='phrdet')
if self.detection_method != 'pan_seg':
# compute the no graph constraint setting pred_rels
obj_scores_per_rel = obj_scores[pred_rel_inds].prod(1)
nogc_overall_scores = obj_scores_per_rel[:, None] * rel_scores[:,
1:]
sorted_inds = np.argsort(nogc_overall_scores, axis=-1)[:, ::-1]
sorted_nogc_overall_scores = np.sort(nogc_overall_scores,
axis=-1)[:, ::-1]
gt_pair_idx = gt_rels[:, 0] * 10000 + gt_rels[:, 1]
for nogc_num in nogc_thres_num:
nogc_score_inds_ = argsort_desc(
sorted_nogc_overall_scores[:, :nogc_num])
nogc_pred_rels = np.column_stack(
(pred_rel_inds[nogc_score_inds_[:, 0]],
sorted_inds[nogc_score_inds_[:, 0],
nogc_score_inds_[:, 1]] + 1))
nogc_pred_scores = rel_scores[
nogc_score_inds_[:, 0],
sorted_inds[nogc_score_inds_[:, 0],
nogc_score_inds_[:, 1]] + 1]
pred_triplets, pred_triplet_det_results, pred_triplet_scores =\
self.generate_triplet(
nogc_pred_rels, pred_classes, pred_det_results,
nogc_pred_scores, obj_scores)
# prepare the gt rel signal to be used in PairAccuracy:
pred_pair_idx = nogc_pred_rels[:,
0] * 10000 + nogc_pred_rels[:,
1]
local_container['nogc@%d_pred_pair_in_gt' % nogc_num] = \
(pred_pair_idx[:, None] == gt_pair_idx[None, :]).sum(-1) > 0
# Compute recall. It's most efficient to match once and then do recall after
pred_to_gt = self.compute_pred_matches(
gt_triplets,
pred_triplets,
gt_triplet_det_results,
pred_triplet_det_results,
iou_thrs,
phrdet=False,
)
# NOTE: For NGC recall, zs recall, mean recall, only need to crop the top 100 triplets.
# While for computing the Pair Accuracy, all of the pairs are needed here.
local_container['nogc@%d_pred_to_gt' %
nogc_num] = pred_to_gt[:100] # for zR, mR, R
local_container['nogc@%d_all_pred_to_gt' %
nogc_num] = pred_to_gt # for Pair accuracy
self._calculate_single(self.nogc_result_dict, pred_to_gt[:100],
gt_rels, mode, nogc_num)
if mode == 'sgdet':
pred_to_gt = self.compute_pred_matches(
gt_triplets,
pred_triplets,
gt_triplet_det_results,
pred_triplet_det_results,
iou_thrs,
phrdet=True,
)
local_container['phrdet_nogc@%d_pred_to_gt' %
nogc_num] = pred_to_gt[:100]
local_container['phrdet_nogc@%d_all_pred_to_gt' %
nogc_num] = pred_to_gt # for Pair accuracy
self._calculate_single(self.nogc_result_dict,
pred_to_gt[:100],
gt_rels,
mode='phrdet',
nogc_num=nogc_num)
return local_container
"""
No Graph Constraint Recall, implement based on:
https://github.com/rowanz/neural-motifs
"""
class SGNoGraphConstraintRecall(SceneGraphEvaluation):
def __init__(self, result_dict):
super(SGNoGraphConstraintRecall, self).__init__(result_dict)
def register_container(self, mode):
self.result_dict[mode + '_recall_nogc'] = {20: [], 50: [], 100: []}
def generate_print_string(self, mode):
result_str = 'SGG eval: '
for k, v in self.result_dict[mode + '_recall_nogc'].items():
result_str += 'ngR @ %d: %.4f; ' % (k, np.mean(v))
result_str += ' for mode=%s, type=No Graph Constraint Recall(Main).' % mode
result_str += '\n'
return result_str
def calculate_recall(self, global_container, local_container, mode):
obj_scores = local_container['obj_scores']
pred_rel_inds = local_container['pred_rel_inds']
rel_scores = local_container['rel_scores']
pred_boxes = local_container['pred_boxes']
pred_masks = local_container['pred_masks']
if mode == 'predcls':
pred_masks = local_container['gt_masks']
pred_classes = local_container['pred_classes']
gt_rels = local_container['gt_rels']
obj_scores_per_rel = obj_scores[pred_rel_inds].prod(1)
nogc_overall_scores = obj_scores_per_rel[:, None] * rel_scores[:, 1:]
nogc_score_inds = argsort_desc(nogc_overall_scores)[:100]
nogc_pred_rels = np.column_stack(
(pred_rel_inds[nogc_score_inds[:, 0]], nogc_score_inds[:, 1] + 1))
nogc_pred_scores = rel_scores[nogc_score_inds[:, 0],
nogc_score_inds[:, 1] + 1]
if self.detection_method == 'bbox':
pred_det_results = pred_boxes
if self.detection_method == 'pan_seg':
pred_det_results = pred_masks
nogc_pred_triplets, nogc_pred_triplet_det_results, _ = self.generate_triplet(
nogc_pred_rels, pred_classes, pred_det_results, nogc_pred_scores,
obj_scores)
# No Graph Constraint
gt_triplets = local_container['gt_triplets']
gt_triplet_det_results = local_container['gt_triplet_det_results']
iou_thrs = global_container['iou_thrs']
nogc_pred_to_gt = self.compute_pred_matches(
gt_triplets,
nogc_pred_triplets,
gt_triplet_det_results,
nogc_pred_triplet_det_results,
iou_thrs,
phrdet=mode == 'phrdet',
)
for k in self.result_dict[mode + '_recall_nogc']:
match = reduce(np.union1d, nogc_pred_to_gt[:k])
rec_i = float(len(match)) / float(gt_rels.shape[0])
self.result_dict[mode + '_recall_nogc'][k].append(rec_i)
"""
Zero Shot Scene Graph
Only calculate the triplet that not occurred in the training set
"""
class SGZeroShotRecall(SceneGraphEvaluation):
def __init__(self, *args, **kwargs):
super(SGZeroShotRecall, self).__init__(*args, **kwargs)
def register_container(self, mode):
self.result_dict[mode + '_zeroshot_recall'] = {20: [], 50: [], 100: []}
self.nogc_result_dict[mode + '_zeroshot_recall'] = {
ngc: {
20: [],
50: [],
100: []
}
for ngc in self.nogc_thres_num
}
if mode == 'sgdet':
self.result_dict['phrdet_zeroshot_recall'] = {
20: [],
50: [],
100: []
}
self.nogc_result_dict['phrdet_zeroshot_recall'] = {
ngc: {
20: [],
50: [],
100: []
}
for ngc in self.nogc_thres_num
}
def _calculate_single(self,
target_dict,
prediction_to_gt,
mode,
nogc_num=None):
target = target_dict[mode + '_zeroshot_recall'] if nogc_num is None else \
target_dict[mode + '_zeroshot_recall'][nogc_num]
for k in target:
# Zero Shot Recall
match = reduce(np.union1d, prediction_to_gt[:k])
if len(self.zeroshot_idx) > 0:
if not isinstance(match, (list, tuple)):
match_list = match.tolist()
else:
match_list = match
zeroshot_match = len(
self.zeroshot_idx) + len(match_list) - len(
set(self.zeroshot_idx + match_list))
zero_rec_i = float(zeroshot_match) / float(
len(self.zeroshot_idx))
target[k].append(zero_rec_i)
def _print_single(self, target_dict, mode, nogc_num=None):
target = target_dict[mode + '_zeroshot_recall'] if nogc_num is None else \
target_dict[mode + '_zeroshot_recall'][nogc_num]
result_str = 'SGG eval: '
for k, v in target.items():
value = -1 if len(v) == 0 else np.mean(v)
result_str += ' zR @ %d: %.4f; ' % (k, value)
suffix_type = 'Zero Shot Recall.' if nogc_num is None else \
'NoGraphConstraint @ %d Zero Shot Recall.' % (
nogc_num)
result_str += ' for mode=%s, type=%s' % (mode, suffix_type)
result_str += '\n'
return result_str
def generate_print_string(self, mode):
result_str = self._print_single(self.result_dict, mode)
if mode == 'sgdet':
result_str += self._print_single(self.result_dict, 'phrdet')
# nogc
for nogc_num in self.nogc_thres_num:
result_str += self._print_single(self.nogc_result_dict, mode,
nogc_num)
if mode == 'sgdet':
result_str += self._print_single(self.nogc_result_dict,
'phrdet', nogc_num)
return result_str
def prepare_zeroshot(self, global_container, local_container):
gt_rels = local_container['gt_rels']
gt_classes = local_container['gt_classes']
zeroshot_triplets = global_container['zeroshot_triplet']
sub_id, ob_id, pred_label = gt_rels[:, 0], gt_rels[:, 1], gt_rels[:, 2]
gt_triplets = np.column_stack(
(gt_classes[sub_id], gt_classes[ob_id], pred_label)) # num_rel, 3
self.zeroshot_idx = np.where(
intersect_2d(gt_triplets, zeroshot_triplets).sum(-1) > 0
)[0].tolist()
def calculate_recall(self, global_container, local_container, mode):
pred_to_gt = local_container['pred_to_gt']
self._calculate_single(self.result_dict, pred_to_gt, mode)
if mode == 'sgdet':
phrdet_pred_to_gt = local_container['phrdet_pred_to_gt']
self._calculate_single(self.result_dict, phrdet_pred_to_gt,
'phrdet')
# nogc
for nogc_num in self.nogc_thres_num:
nogc_pred_to_gt = local_container['nogc@%d_pred_to_gt' % nogc_num]
self._calculate_single(self.nogc_result_dict, nogc_pred_to_gt,
mode, nogc_num)
if mode == 'sgdet':
nogc_pred_to_gt = local_container['phrdet_nogc@%d_pred_to_gt' %
nogc_num]
self._calculate_single(self.nogc_result_dict, nogc_pred_to_gt,
'phrdet', nogc_num)
"""
Give Ground Truth Object-Subject Pairs
Calculate Recall for SG-Cls and Pred-Cls
Only used in https://github.com/NVIDIA/ContrastiveLosses4VRD for sgcls and predcls
"""
class SGPairAccuracy(SceneGraphEvaluation):
def __init__(self, *args, **kwargs):
super(SGPairAccuracy, self).__init__(*args, **kwargs)
def register_container(self, mode):
self.result_dict[mode + '_accuracy_hit'] = {20: [], 50: [], 100: []}
self.nogc_result_dict[mode + '_accuracy_hit'] = {
ngc: {
20: [],
50: [],
100: []
}
for ngc in self.nogc_thres_num
}
self.result_dict[mode + '_accuracy_count'] = {20: [], 50: [], 100: []}
self.nogc_result_dict[mode + '_accuracy_count'] = {
ngc: {
20: [],
50: [],
100: []
}
for ngc in self.nogc_thres_num
}
def _calculate_single(self,
target_dict,
prediction_to_gt,
gt_rels,
mode,
pred_pair_in_gt,
nogc_num=None):
target_hit = target_dict[mode + '_accuracy_hit'] if nogc_num is None else \
target_dict[mode + '_accuracy_hit'][nogc_num]
target_count = target_dict[mode + '_accuracy_count'] if nogc_num is None else \
target_dict[mode + '_accuracy_count'][nogc_num]
if mode != 'sgdet':
gt_pair_pred_to_gt = []
for p, flag in zip(prediction_to_gt, pred_pair_in_gt):
if flag:
gt_pair_pred_to_gt.append(p)
for k in target_hit:
# to calculate accuracy, only consider those gt pairs
# This metric is used by "Graphical Contrastive Losses for Scene Graph Parsing"
if len(gt_pair_pred_to_gt) > 0:
gt_pair_match = reduce(np.union1d, gt_pair_pred_to_gt[:k])
else:
gt_pair_match = []
target_hit[k].append(float(len(gt_pair_match)))
target_count[k].append(float(gt_rels.shape[0]))
def _print_single(self, target_dict, mode, nogc_num=None):
target_hit = target_dict[mode + '_accuracy_hit'] if nogc_num is None else \
target_dict[mode + '_accuracy_hit'][nogc_num]
target_count = target_dict[mode + '_accuracy_count'] if nogc_num is None else \
target_dict[mode + '_accuracy_count'][nogc_num]
result_str = 'SGG eval: '
for k, v in target_hit.items():
a_hit = np.mean(v)
a_count = np.mean(target_count[k])
result_str += ' A @ %d: %.4f; ' % (k, a_hit / a_count)
suffix_type = 'TopK Accuracy.' if nogc_num is None else 'NoGraphConstraint @ %d TopK Accuracy.' % (
nogc_num)
result_str += ' for mode=%s, type=%s' % (mode, suffix_type)
result_str += '\n'
return result_str
def generate_print_string(self, mode):
result_str = self._print_single(self.result_dict, mode)
if mode == 'sgdet':
result_str += self._print_single(self.result_dict, 'phrdet')
# nogc
for nogc_num in self.nogc_thres_num:
result_str += self._print_single(self.nogc_result_dict, mode,
nogc_num)
if mode == 'sgdet':
result_str += self._print_single(self.nogc_result_dict,
'phrdet', nogc_num)
return result_str
def prepare_gtpair(self, local_container):
pred_pair_idx = local_container[
'pred_rel_inds'][:,
0] * 10000 + local_container['pred_rel_inds'][:,
1]
gt_pair_idx = local_container[
'gt_rels'][:, 0] * 10000 + local_container['gt_rels'][:, 1]
self.pred_pair_in_gt = (pred_pair_idx[:, None]
== gt_pair_idx[None, :]).sum(-1) > 0
def calculate_recall(self, global_container, local_container, mode):
if mode != 'sgdet':
pred_to_gt = local_container['pred_to_gt']
gt_rels = local_container['gt_rels']
self._calculate_single(self.result_dict, pred_to_gt, gt_rels, mode,
self.pred_pair_in_gt)
if self.detection_method != 'pan_seg':
# nogc
for nogc_num in self.nogc_thres_num:
nogc_pred_to_gt = local_container['nogc@%d_all_pred_to_gt'
% nogc_num]
self._calculate_single(
self.nogc_result_dict, nogc_pred_to_gt, gt_rels, mode,
local_container['nogc@%d_pred_pair_in_gt' % nogc_num],
nogc_num)
"""
Mean Recall: Proposed in:
https://arxiv.org/pdf/1812.01880.pdf CVPR, 2019
"""
class SGMeanRecall(SceneGraphEvaluation):
def __init__(self,
result_dict,
nogc_result_dict,
nogc_thres_num,
num_rel,
ind_to_predicates,
detection_method='pan_seg',
print_detail=False):
super(SGMeanRecall, self).__init__(result_dict, nogc_result_dict,
nogc_thres_num, detection_method)
self.num_rel = num_rel
self.print_detail = print_detail
self.rel_name_list = ind_to_predicates[1:] # remove __background__
def register_container(self, mode):
# self.result_dict[mode + '_recall_hit'] = {20: [0]*self.num_rel, 50: [0]*self.num_rel, 100: [0]*self.num_rel}
# self.result_dict[mode + '_recall_count'] = {20: [0]*self.num_rel, 50: [0]*self.num_rel, 100: [0]*self.num_rel}
self.result_dict[mode + '_mean_recall'] = {20: 0.0, 50: 0.0, 100: 0.0}
self.result_dict[mode + '_mean_recall_collect'] = {
20: [[] for _ in range(self.num_rel)],
50: [[] for _ in range(self.num_rel)],
100: [[] for _ in range(self.num_rel)]
}
self.result_dict[mode + '_mean_recall_list'] = {
20: [],
50: [],
100: []
}
self.nogc_result_dict[mode + '_mean_recall'] = {
ngc: {
20: 0.0,
50: 0.0,
100: 0.0
}
for ngc in self.nogc_thres_num
}
self.nogc_result_dict[mode + '_mean_recall_collect'] = {
ngc: {
20: [[] for _ in range(self.num_rel)],
50: [[] for _ in range(self.num_rel)],
100: [[] for _ in range(self.num_rel)]
}
for ngc in self.nogc_thres_num
}
self.nogc_result_dict[mode + '_mean_recall_list'] = {
ngc: {
20: [],
50: [],
100: []
}
for ngc in self.nogc_thres_num
}
if mode == 'sgdet':
self.result_dict['phrdet_mean_recall'] = {
20: 0.0,
50: 0.0,
100: 0.0
}
self.result_dict['phrdet_mean_recall_collect'] = {
20: [[] for _ in range(self.num_rel)],
50: [[] for _ in range(self.num_rel)],
100: [[] for _ in range(self.num_rel)]
}
self.result_dict['phrdet_mean_recall_list'] = {
20: [],
50: [],
100: []
}
self.nogc_result_dict['phrdet_mean_recall'] = {
ngc: {
20: 0.0,
50: 0.0,
100: 0.0
}
for ngc in self.nogc_thres_num
}
self.nogc_result_dict['phrdet_mean_recall_collect'] = {
ngc: {
20: [[] for _ in range(self.num_rel)],
50: [[] for _ in range(self.num_rel)],
100: [[] for _ in range(self.num_rel)]
}
for ngc in self.nogc_thres_num
}
self.nogc_result_dict['phrdet_mean_recall_list'] = {
ngc: {
20: [],
50: [],
100: []
}
for ngc in self.nogc_thres_num
}
def _collect_single(self,
target_dict,
prediction_to_gt,
gt_rels,
mode,
nogc_num=None):
target_collect = target_dict[mode + '_mean_recall_collect'] if nogc_num is None else \
target_dict[mode + '_mean_recall_collect'][nogc_num]
for k in target_collect:
# the following code are copied from Neural-MOTIFS
match = reduce(np.union1d, prediction_to_gt[:k])
# NOTE: by kaihua, calculate Mean Recall for each category independently
# this metric is proposed by: CVPR 2019 oral paper "Learning to Compose Dynamic Tree Structures for Visual Contexts"
recall_hit = [0] * self.num_rel
recall_count = [0] * self.num_rel
for idx in range(gt_rels.shape[0]):
local_label = gt_rels[idx, 2]
recall_count[int(local_label)] += 1
recall_count[0] += 1
for idx in range(len(match)):
local_label = gt_rels[int(match[idx]), 2]
recall_hit[int(local_label)] += 1
recall_hit[0] += 1
for n in range(self.num_rel):
if recall_count[n] > 0:
target_collect[k][n].append(
float(recall_hit[n] / recall_count[n]))
def _calculate_single(self, target_dict, mode, nogc_num=None):
target_collect = target_dict[mode + '_mean_recall_collect'] if nogc_num is None else \
target_dict[mode + '_mean_recall_collect'][nogc_num]
target_recall = target_dict[mode + '_mean_recall'] if nogc_num is None else \
target_dict[mode + '_mean_recall'][nogc_num]
target_recall_list = target_dict[mode + '_mean_recall_list'] if nogc_num is None else \
target_dict[mode + '_mean_recall_list'][nogc_num]
for k, v in target_recall.items():
sum_recall = 0
num_rel_no_bg = self.num_rel - 1
for idx in range(num_rel_no_bg):
if len(target_collect[k][idx + 1]) == 0:
tmp_recall = 0.0
else:
tmp_recall = np.mean(target_collect[k][idx + 1])
target_recall_list[k].append(tmp_recall)
sum_recall += tmp_recall
target_recall[k] = sum_recall / float(num_rel_no_bg)
def _print_single(self,
target_dict,
mode,
nogc_num=None,
predicate_freq=None):
target = target_dict[mode + '_mean_recall'] if nogc_num is None else \
target_dict[mode + '_mean_recall'][nogc_num]
target_recall_list = target_dict[mode + '_mean_recall_list'] if nogc_num is None else \
target_dict[mode + '_mean_recall_list'][nogc_num]
result_str = 'SGG eval: '
for k, v in target.items():
result_str += ' mR @ %d: %.4f; ' % (k, float(v))
suffix_type = 'Mean Recall.' if nogc_num is None else 'NoGraphConstraint @ %d Mean Recall.' % (
nogc_num)
result_str += ' for mode=%s, type=%s' % (mode, suffix_type)
result_str += '\n'
# result_str is flattened for copying the data to the form, while the table is for vis.
# Only for graph constraint, one mode for short
if self.print_detail and mode != 'phrdet' and nogc_num is None:
rel_name_list, res = self.rel_name_list, target_recall_list[100]
if predicate_freq is not None:
rel_name_list = [
self.rel_name_list[sid] for sid in predicate_freq
]
res = [target_recall_list[100][sid] for sid in predicate_freq]
result_per_predicate = []
for n, r in zip(rel_name_list, res):
result_per_predicate.append(
('{}'.format(str(n)), '{:.4f}'.format(r)))
result_str += '\t'.join(list(map(str, rel_name_list)))
result_str += '\n'
def map_float(num):
return '{:.4f}'.format(num)
result_str += '\t'.join(list(map(map_float, res)))
result_str += '\n'
num_columns = min(6, len(result_per_predicate) * 2)
results_flatten = list(itertools.chain(*result_per_predicate))
headers = ['predicate', 'Rec100'] * (num_columns // 2)
results_2d = itertools.zip_longest(
*[results_flatten[i::num_columns] for i in range(num_columns)])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
result_str += table.table + '\n'
return result_str
def generate_print_string(self, mode, predicate_freq=None):
result_str = self._print_single(self.result_dict,
mode,
predicate_freq=predicate_freq)
if mode == 'sgdet':
result_str += self._print_single(self.result_dict,
'phrdet',
predicate_freq=predicate_freq)
# nogc
for nogc_num in self.nogc_thres_num:
result_str += self._print_single(self.nogc_result_dict,
mode,
nogc_num,
predicate_freq=predicate_freq)
if mode == 'sgdet':
result_str += self._print_single(self.nogc_result_dict,
'phrdet',
nogc_num,
predicate_freq=predicate_freq)
return result_str
def collect_mean_recall_items(self, global_container, local_container,
mode):
pred_to_gt = local_container['pred_to_gt']
gt_rels = local_container['gt_rels']
self._collect_single(self.result_dict, pred_to_gt, gt_rels, mode)
if mode == 'sgdet':
phrdet_pred_to_gt = local_container['phrdet_pred_to_gt']
self._collect_single(self.result_dict, phrdet_pred_to_gt, gt_rels,
'phrdet')
if self.detection_method != 'pan_seg':
for nogc_num in self.nogc_thres_num:
nogc_pred_to_gt = local_container['nogc@%d_pred_to_gt' %
nogc_num]
self._collect_single(self.nogc_result_dict, nogc_pred_to_gt,
gt_rels, mode, nogc_num)
if mode == 'sgdet':
nogc_pred_to_gt = local_container[
'phrdet_nogc@%d_pred_to_gt' % nogc_num]
self._collect_single(self.nogc_result_dict,
nogc_pred_to_gt, gt_rels, 'phrdet',
nogc_num)
def calculate_mean_recall(self, mode):
self._calculate_single(self.result_dict, mode)
if mode == 'sgdet':
self._calculate_single(self.result_dict, 'phrdet')
for nogc_num in self.nogc_thres_num:
self._calculate_single(self.nogc_result_dict, mode, nogc_num)
if mode == 'sgdet':
self._calculate_single(self.nogc_result_dict, 'phrdet',
nogc_num)
"""
Accumulate Recall:
calculate recall on the whole dataset instead of each image
"""
class SGAccumulateRecall(SceneGraphEvaluation):
def __init__(self, result_dict):
super(SGAccumulateRecall, self).__init__(result_dict)
def register_container(self, mode):
self.result_dict[mode + '_accumulate_recall'] = {
20: 0.0,
50: 0.0,
100: 0.0
}
def generate_print_string(self, mode):
result_str = 'SGG eval: '
for k, v in self.result_dict[mode + '_accumulate_recall'].items():
result_str += ' aR @ %d: %.4f; ' % (k, float(v))
result_str += ' for mode=%s, type=Accumulate Recall.' % mode
result_str += '\n'
return result_str
def calculate_accumulate(self, mode):
for k, v in self.result_dict[mode + '_accumulate_recall'].items():
self.result_dict[mode + '_accumulate_recall'][k] = float(
self.result_dict[mode + '_recall_hit'][k][0]) / float(
self.result_dict[mode + '_recall_count'][k][0] + 1e-10)
return
def _triplet_bbox(relations,
classes,
boxes,
predicate_scores=None,
class_scores=None):
"""
format relations of (sub_id, ob_id, pred_label) into triplets of (sub_label, pred_label, ob_label)
Parameters:
relations (#rel, 3) : (sub_id, ob_id, pred_label)
classes (#objs, ) : class labels of objects
boxes (#objs, 4)
predicate_scores (#rel, ) : scores for each predicate
class_scores (#objs, ) : scores for each object
Returns:
triplets (#rel, 3) : (sub_label, pred_label, ob_label)
triplets_boxes (#rel, 8) array of boxes for the parts
triplets_scores (#rel, 3) : (sub_score, pred_score, ob_score)
"""
sub_id, ob_id, pred_label = relations[:, 0], relations[:, 1], relations[:,
2]
triplets = np.column_stack((classes[sub_id], pred_label, classes[ob_id]))
triplet_boxes = np.column_stack((boxes[sub_id], boxes[ob_id]))
triplet_scores = None
if predicate_scores is not None and class_scores is not None:
triplet_scores = np.column_stack((
class_scores[sub_id],
predicate_scores,
class_scores[ob_id],
))
return triplets, triplet_boxes, triplet_scores
def _compute_pred_matches_bbox(gt_triplets,
pred_triplets,
gt_boxes,
pred_boxes,
iou_thrs,
phrdet=False):
"""
Given a set of predicted triplets, return the list of matching GT's for each of the
given predictions
Return:
pred_to_gt [List of List]
"""
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = [[] for x in range(pred_boxes.shape[0])]
for gt_ind, gt_box, keep_inds in zip(
np.where(gt_has_match)[0],
gt_boxes[gt_has_match],
keeps[gt_has_match],
):
boxes = pred_boxes[keep_inds]
if phrdet:
# Evaluate where the union box > 0.5
gt_box_union = gt_box.reshape((2, 4))
gt_box_union = np.concatenate(
(gt_box_union.min(0)[:2], gt_box_union.max(0)[2:]), 0)
box_union = boxes.reshape((-1, 2, 4))
box_union = np.concatenate(
(box_union.min(1)[:, :2], box_union.max(1)[:, 2:]), 1)
inds = bbox_overlaps(
torch.Tensor(gt_box_union[None]),
torch.Tensor(box_union)).numpy()[0] >= iou_thrs
else:
sub_iou = bbox_overlaps(torch.Tensor(gt_box[None, :4]),
torch.Tensor(boxes[:, :4])).numpy()[0]
obj_iou = bbox_overlaps(torch.Tensor(gt_box[None, 4:]),
torch.Tensor(boxes[:, 4:])).numpy()[0]
inds = (sub_iou >= iou_thrs) & (obj_iou >= iou_thrs)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt[i].append(int(gt_ind))
return pred_to_gt
def _triplet_panseg(relations,
classes,
masks,
predicate_scores=None,
class_scores=None):
"""
format relations of (sub_id, ob_id, pred_label) into triplets of (sub_label, pred_label, ob_label)
Parameters:
relations (#rel, 3) : (sub_id, ob_id, pred_label)
classes (#objs, ) : class labels of objects
masks (#objs, )
predicate_scores (#rel, ) : scores for each predicate
class_scores (#objs, ) : scores for each object
Returns:
triplets (#rel, 3) : (sub_label, pred_label, ob_label)
triplet_masks(#rel, 2, , )
triplets_scores (#rel, 3) : (sub_score, pred_score, ob_score)
"""
sub_id, ob_id, pred_label = relations[:, 0], relations[:, 1], relations[:,
2]
triplets = np.column_stack((classes[sub_id], pred_label, classes[ob_id]))
masks = np.array(masks)
triplet_masks = np.stack((masks[sub_id], masks[ob_id]), axis=1)
triplet_scores = None
if predicate_scores is not None and class_scores is not None:
triplet_scores = np.column_stack((
class_scores[sub_id],
predicate_scores,
class_scores[ob_id],
))
return triplets, triplet_masks, triplet_scores
def _compute_pred_matches_panseg(gt_triplets,
pred_triplets,
gt_masks,
pred_masks,
iou_thrs,
phrdet=False):
"""
Given a set of predicted triplets, return the list of matching GT's for each of the
given predictions
Return:
pred_to_gt [List of List]
"""
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = [[] for x in range(pred_masks.shape[0])]
for gt_ind, gt_mask, keep_inds in zip(
np.where(gt_has_match)[0],
gt_masks[gt_has_match],
keeps[gt_has_match],
):
pred_mask = pred_masks[keep_inds]
sub_gt_mask = gt_mask[0]
ob_gt_mask = gt_mask[1]
sub_pred_mask = pred_mask[:, 0]
ob_pred_mask = pred_mask[:, 1]
if phrdet:
# Evaluate where the union mask > 0.5
inds = []
gt_mask_union = np.logical_or(sub_gt_mask, ob_gt_mask)
pred_mask_union = np.logical_or(sub_pred_mask, ob_pred_mask)
for pred_mask in pred_mask_union:
iou = mask_iou(gt_mask_union, pred_mask)
inds.append(iou >= iou_thrs)
else:
sub_inds = []
for pred_mask in sub_pred_mask:
sub_iou = mask_iou(sub_gt_mask, pred_mask)
sub_inds.append(sub_iou >= iou_thrs)
ob_inds = []
for pred_mask in ob_pred_mask:
ob_iou = mask_iou(ob_gt_mask, pred_mask)
ob_inds.append(ob_iou >= iou_thrs)
inds = np.logical_and(sub_inds, ob_inds)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt[i].append(int(gt_ind))
return pred_to_gt
def mask_iou(mask1, mask2):
assert mask1.shape == mask2.shape
mask1_area = np.count_nonzero(mask1)
mask2_area = np.count_nonzero(mask2)
intersection = np.count_nonzero(np.logical_and(mask1, mask2))
iou = intersection / (mask1_area + mask2_area - intersection)
return iou
| 43,520 | 39.826454 | 128 | py |
OpenPSG | OpenPSG-main/openpsg/evaluation/__init__.py | from .sgg_eval import sgg_evaluation
| 37 | 18 | 36 | py |
OpenPSG | OpenPSG-main/openpsg/evaluation/sgg_eval_util.py | # ---------------------------------------------------------------
# sgg_eval_util.py
# Set-up time: 2020/5/18 下午9:37
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import numpy as np
def intersect_2d(x1, x2):
"""Given two arrays [m1, n], [m2,n], returns a [m1, m2] array where each
entry is True if those rows match.
:param x1: [m1, n] numpy array
:param x2: [m2, n] numpy array
:return: [m1, m2] bool array of the intersections
"""
if x1.shape[1] != x2.shape[1]:
raise ValueError('Input arrays must have same #columns')
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
res = (x1[..., None] == x2.T[None, ...]).all(1)
return res
def argsort_desc(scores):
"""Returns the indices that sort scores descending in a smart way.
:param scores: Numpy array of arbitrary size
:return: an array of size [numel(scores), dim(scores)] where each row is the index you'd
need to get the score.
"""
return np.column_stack(
np.unravel_index(np.argsort(-scores.ravel()), scores.shape))
| 1,383 | 35.421053 | 92 | py |
OpenPSG | OpenPSG-main/openpsg/models/registry.py | from mmdet.utils import Registry
FRAMEWORK = Registry('framework')
| 68 | 16.25 | 33 | py |
OpenPSG | OpenPSG-main/openpsg/models/__init__.py | from .frameworks import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .relation_heads import * # noqa: F401,F403
from .roi_extractors import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
| 228 | 37.166667 | 48 | py |
OpenPSG | OpenPSG-main/openpsg/models/roi_extractors/visual_spatial.py | # ---------------------------------------------------------------
# visual_spatial.py
# Set-up time: 2020/4/28 下午8:46
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from mmcv import ops
from mmcv.cnn import ConvModule, kaiming_init, normal_init
from mmcv.runner import BaseModule, force_fp32
from mmdet.models import ROI_EXTRACTORS
from torch.nn.modules.utils import _pair
from openpsg.models.relation_heads.approaches import PointNetFeat
from openpsg.utils.utils import enumerate_by_image
@ROI_EXTRACTORS.register_module()
class VisualSpatialExtractor(BaseModule):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (int): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0.
"""
def __init__(
self,
bbox_roi_layer,
in_channels,
featmap_strides,
roi_out_channels=256,
fc_out_channels=1024,
finest_scale=56,
mask_roi_layer=None,
with_avg_pool=False,
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=False,
separate_spatial=False,
gather_visual='sum',
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
init_cfg=None,
):
super(VisualSpatialExtractor, self).__init__(init_cfg)
self.roi_feat_size = _pair(bbox_roi_layer.get('output_size', 7))
self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
self.in_channels = in_channels
self.roi_out_channels = roi_out_channels
self.fc_out_channels = fc_out_channels
self.featmap_strides = featmap_strides
self.finest_scale = finest_scale
self.fp16_enabled = False
self.with_avg_pool = with_avg_pool
self.with_visual_bbox = with_visual_bbox
self.with_visual_mask = with_visual_mask
self.with_visual_point = with_visual_point
self.with_spatial = with_spatial
self.separate_spatial = separate_spatial
self.gather_visual = gather_visual
# NOTE: do not include the visual_point_head
self.num_visual_head = int(self.with_visual_bbox) + int(
self.with_visual_mask)
if self.num_visual_head == 0:
raise ValueError('There must be at least one visual head. ')
in_channels = self.in_channels
if self.with_avg_pool:
self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
else:
in_channels *= self.roi_feat_area
# set some caches
self._union_rois = None
self._pair_rois = None
# build visual head: extract visual features.
if self.with_visual_bbox:
assert bbox_roi_layer is not None
self.bbox_roi_layers = self.build_roi_layers(
bbox_roi_layer, featmap_strides)
self.visual_bbox_head = nn.Sequential(*[
nn.Linear(in_channels, self.fc_out_channels),
nn.ReLU(inplace=True),
nn.Linear(self.fc_out_channels, self.fc_out_channels),
nn.ReLU(inplace=True),
])
if self.with_visual_mask:
assert mask_roi_layer is not None
self.mask_roi_layers = self.build_roi_layers(
mask_roi_layer, featmap_strides)
self.visual_mask_head = nn.Sequential(*[
nn.Linear(in_channels, self.fc_out_channels),
nn.ReLU(inplace=True),
nn.Linear(self.fc_out_channels, self.fc_out_channels),
nn.ReLU(inplace=True),
])
if self.with_visual_point:
# TODO: build the point feats extraction head.
self.pointFeatExtractor = PointNetFeat()
if self.num_visual_head > 1:
gather_in_channels = (self.fc_out_channels *
2 if self.gather_visual == 'cat' else
self.fc_out_channels)
self.gather_visual_head = nn.Sequential(*[
nn.Linear(gather_in_channels, self.fc_out_channels),
nn.ReLU(inplace=True),
])
# build spatial_head
if self.with_spatial:
self.spatial_size = self.roi_feat_size[0] * 4 - 1
self.spatial_conv = nn.Sequential(*[
ConvModule(
2,
self.in_channels // 2,
kernel_size=7,
stride=2,
padding=3,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
order=('conv', 'act', 'norm'),
),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
ConvModule(
self.in_channels // 2,
self.roi_out_channels,
kernel_size=3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
order=('conv', 'act', 'norm'),
),
])
if self.separate_spatial:
self.spatial_head = nn.Sequential(*[
nn.Linear(in_channels, self.fc_out_channels),
nn.ReLU(inplace=True),
nn.Linear(self.fc_out_channels, self.fc_out_channels),
nn.ReLU(inplace=True),
])
@property
def num_inputs(self):
"""int: Input feature map levels."""
return len(self.featmap_strides)
@property
def union_rois(self):
return self._union_rois
@property
def pair_rois(self):
return self._pair_rois
def init_weights(self):
if self.with_visual_bbox:
for m in self.visual_bbox_head:
if isinstance(m, nn.Linear):
kaiming_init(m, distribution='uniform', a=1)
if self.with_visual_mask:
for m in self.visual_mask_head:
if isinstance(m, nn.Linear):
kaiming_init(m, distribution='uniform', a=1)
if self.with_visual_point:
pass
# for the pointNet head, just leave it there, do not
if self.num_visual_head > 1:
for m in self.gather_visual_head:
if isinstance(m, nn.Linear):
kaiming_init(m, distribution='uniform', a=1)
if self.with_spatial:
for m in self.spatial_conv:
if isinstance(m, ConvModule):
normal_init(m.conv, std=0.01)
if self.separate_spatial:
for m in self.spatial_head:
if isinstance(m, nn.Linear):
kaiming_init(m, distribution='uniform', a=1)
def build_roi_layers(self, layer_cfg, featmap_strides):
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
def roi_rescale(self, rois, scale_factor):
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1] + 1
h = rois[:, 4] - rois[:, 2] + 1
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5 + 0.5
x2 = cx + new_w * 0.5 - 0.5
y1 = cy - new_h * 0.5 + 0.5
y2 = cy + new_h * 0.5 - 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
def roi_forward(self,
roi_layers,
feats,
rois,
masks=None,
roi_scale_factor=None):
if len(feats) == 1:
if roi_layers[0].__class__.__name__ == 'ShapeAwareRoIAlign':
assert masks is not None
roi_feats = roi_layers[0](feats[0], rois, masks)
else:
roi_feats = roi_layers[0](feats[0], rois)
else:
out_size = roi_layers[0].output_size
num_levels = self.num_inputs
target_lvls = self.map_roi_levels(rois, num_levels)
roi_feats = feats[0].new_zeros(rois.size(0), self.roi_out_channels,
*out_size)
if roi_scale_factor is not None:
assert masks is None # not applicated for shape-aware roi align
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
inds = target_lvls == i
if inds.any():
rois_ = rois[inds, :]
if roi_layers[
i].__class__.__name__ == 'ShapeAwareRoIAlign':
masks_ = [
masks[idx] for idx in torch.nonzero(inds).view(-1)
]
roi_feats_t = roi_layers[i](feats[i], rois_, masks_)
else:
roi_feats_t = roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
return roi_feats
def single_roi_forward(self,
feats,
rois,
masks=None,
points=None,
roi_scale_factor=None):
roi_feats_bbox, roi_feats_mask, roi_feats_point = None, None, None
# 1. Use the visual and spatial head to extract roi features.
if self.with_visual_bbox:
roi_feats_bbox = self.roi_forward(self.bbox_roi_layers, feats,
rois, masks, roi_scale_factor)
if self.with_visual_mask:
roi_feats_mask = self.roi_forward(self.mask_roi_layers, feats,
rois, masks, roi_scale_factor)
if self.with_visual_point:
# input: (N_entity, Ndim(2), N_point)
# output: (N_entity, feat_dim(1024))
roi_feats_point, trans_matrix, _ = self.pointFeatExtractor(
torch.stack(points).transpose(2, 1))
roi_feats_result = []
# gather the visual features, do not include the features from points
for roi_feats, head in (
(roi_feats_bbox, getattr(self, 'visual_bbox_head', None)),
(roi_feats_mask, getattr(self, 'visual_mask_head', None)),
):
if head is not None:
roi_feats_result.append(
head(roi_feats.view(roi_feats.size(0), -1)))
if self.num_visual_head > 1:
if self.gather_visual == 'cat':
roi_feats_result = torch.cat(roi_feats_result, dim=-1)
elif self.gather_visual == 'sum':
roi_feats_result = torch.stack(roi_feats_result).sum(0)
elif self.gather_visual == 'prod':
roi_feats_result = torch.stack(roi_feats_result).prod(0)
else:
raise NotImplementedError(
'The gathering operation {} is not implemented yet.'.
format(self.gather_visual))
roi_feats = self.gather_visual_head(roi_feats_result)
else:
roi_feats = roi_feats_result[0]
if self.with_visual_point:
return (roi_feats, roi_feats_point, trans_matrix)
else:
return (roi_feats, )
def union_roi_forward(
self,
feats,
img_metas,
rois,
rel_pair_idx,
masks=None,
points=None,
roi_scale_factor=None,
):
assert self.with_spatial
num_images = feats[0].size(0)
assert num_images == len(rel_pair_idx)
rel_pair_index = []
im_inds = rois[:, 0]
acc_obj = 0
for i, s, e in enumerate_by_image(im_inds):
num_obj_i = e - s
rel_pair_idx_i = rel_pair_idx[i].clone()
rel_pair_idx_i[:, 0] += acc_obj
rel_pair_idx_i[:, 1] += acc_obj
acc_obj += num_obj_i
rel_pair_index.append(rel_pair_idx_i)
rel_pair_index = torch.cat(rel_pair_index, 0)
# prepare the union rois
head_rois = rois[rel_pair_index[:, 0], :]
tail_rois = rois[rel_pair_index[:, 1], :]
head_rois_int = head_rois.cpu().numpy().astype(np.int32)
tail_rois_int = tail_rois.cpu().numpy().astype(np.int32)
union_rois = torch.stack(
[
head_rois[:, 0],
torch.min(head_rois[:, 1], tail_rois[:, 1]),
torch.min(head_rois[:, 2], tail_rois[:, 2]),
torch.max(head_rois[:, 3], tail_rois[:, 3]),
torch.max(head_rois[:, 4], tail_rois[:, 4]),
],
-1,
)
self._union_rois = union_rois[:, 1:]
self._pair_rois = torch.cat((head_rois[:, 1:], tail_rois[:, 1:]),
dim=-1)
# OPTIONAL: prepare the union masks
union_masks = None
if masks is not None and self.with_visual_mask:
union_rois_int = union_rois.cpu().numpy().astype(np.int32)
union_heights = union_rois_int[:, 4] - union_rois_int[:, 2] + 1
union_widths = union_rois_int[:, 3] - union_rois_int[:, 1] + 1
union_masks = []
for i, pair_idx in enumerate(rel_pair_index.cpu().numpy()):
head_mask, tail_mask = masks[pair_idx[0]], masks[pair_idx[1]]
union_mask = torch.zeros(union_heights[i],
union_widths[i]).to(head_mask)
base_x, base_y = union_rois_int[i, 1], union_rois_int[i, 2]
union_mask[(head_rois_int[i, 2] -
base_y):(head_rois_int[i, 4] - base_y + 1),
(head_rois_int[i, 1] -
base_x):(head_rois_int[i, 3] - base_x +
1), ] = head_mask
union_mask[(tail_rois_int[i, 2] -
base_y):(tail_rois_int[i, 4] - base_y + 1),
(tail_rois_int[i, 1] -
base_x):(tail_rois_int[i, 3] - base_x +
1), ] = tail_mask
union_masks.append(union_mask)
# OPTIONAL: prepare the union points
union_points = None
if points is not None and self.with_visual_point:
union_points = []
for i, pair_idx in enumerate(rel_pair_index.cpu().numpy()):
head_points, tail_points = points[pair_idx[0]], points[
pair_idx[1]]
pts = torch.cat((head_points, tail_points), dim=0)
union_points.append(pts)
roi_feats_bbox, roi_feats_mask, roi_feats_point, rect_feats = (
None,
None,
None,
None,
)
# 1. Use the visual and spatial head to extract roi features.
if self.with_visual_bbox:
roi_feats_bbox = self.roi_forward(self.bbox_roi_layers, feats,
union_rois, union_masks,
roi_scale_factor)
if self.with_visual_mask:
roi_feats_mask = self.roi_forward(self.mask_roi_layers, feats,
union_rois, union_masks,
roi_scale_factor)
if self.with_visual_point:
roi_feats_point, trans_matrix, _ = self.pointFeatExtractor(
torch.stack(union_points, dim=0).transpose(2, 1))
# rect_feats: use range to construct rectangle, sized (rect_size, rect_size)
num_rel = len(rel_pair_index)
dummy_x_range = (torch.arange(self.spatial_size).to(
rel_pair_index.device).view(1, 1,
-1).expand(num_rel, self.spatial_size,
self.spatial_size))
dummy_y_range = (torch.arange(self.spatial_size).to(
rel_pair_index.device).view(1, -1,
1).expand(num_rel, self.spatial_size,
self.spatial_size))
size_list = [
np.array(img_meta['img_shape'][:2]).reshape(1, -1)
for img_meta in img_metas
]
img_input_sizes = np.empty((0, 2), dtype=np.float32)
for img_id in range(len(rel_pair_idx)):
num_rel = len(rel_pair_idx[img_id])
img_input_sizes = np.vstack(
(img_input_sizes, np.tile(size_list[img_id], (num_rel, 1))))
img_input_sizes = torch.from_numpy(img_input_sizes).to(rois)
# resize bbox to the scale rect_size
head_proposals = head_rois.clone()
head_proposals[:, 1::2] *= self.spatial_size / img_input_sizes[:, 1:2]
head_proposals[:, 2::2] *= self.spatial_size / img_input_sizes[:, 0:1]
tail_proposals = tail_rois.clone()
tail_proposals[:, 1::2] *= self.spatial_size / img_input_sizes[:, 1:2]
tail_proposals[:, 2::2] *= self.spatial_size / img_input_sizes[:, 0:1]
head_rect = ((dummy_x_range >= head_proposals[:, 1].floor().view(
-1, 1, 1).long())
& (dummy_x_range <= head_proposals[:, 3].ceil().view(
-1, 1, 1).long())
& (dummy_y_range >= head_proposals[:, 2].floor().view(
-1, 1, 1).long())
& (dummy_y_range <= head_proposals[:, 4].ceil().view(
-1, 1, 1).long())).float()
tail_rect = ((dummy_x_range >= tail_proposals[:, 1].floor().view(
-1, 1, 1).long())
& (dummy_x_range <= tail_proposals[:, 2].ceil().view(
-1, 1, 1).long())
& (dummy_y_range >= tail_proposals[:, 3].floor().view(
-1, 1, 1).long())
& (dummy_y_range <= tail_proposals[:, 4].ceil().view(
-1, 1, 1).long())).float()
rect_input = torch.stack((head_rect, tail_rect),
dim=1) # (num_rel, 2, rect_size, rect_size)
rect_feats = self.spatial_conv(rect_input)
# gather the different visual features and spatial features
if self.separate_spatial: # generally, it is False
roi_feats_result = []
for roi_feats, head in (
(roi_feats_bbox, getattr(self, 'visual_bbox_head', None)),
(roi_feats_mask, getattr(self, 'visual_mask_head', None)),
):
if head is not None:
roi_feats_result.append(
head(roi_feats.view(roi_feats.size(0), -1)))
if self.num_visual_head > 1:
if self.gather_visual == 'cat':
roi_feats_result = torch.cat(roi_feats_result, dim=-1)
elif self.gather_visual == 'sum':
roi_feats_result = torch.stack(roi_feats_result).sum(0)
elif self.gather_visual == 'prod':
roi_feats_result = torch.stack(roi_feats_result).prod(0)
else:
raise NotImplementedError(
'The gathering operation {} is not implemented yet.'.
format(self.gather_visual))
roi_feats = self.gather_visual_head(roi_feats_result)
else:
roi_feats = roi_feats_result[0]
roi_feats_spatial = self.spatial_head(rect_feats)
if self.with_visual_point:
return (roi_feats, roi_feats_spatial, roi_feats_point,
trans_matrix)
else:
return (roi_feats, roi_feats_spatial)
else:
roi_feats_result = []
for roi_feats, head in (
(roi_feats_bbox, getattr(self, 'visual_bbox_head', None)),
(roi_feats_mask, getattr(self, 'visual_mask_head', None)),
):
if head is not None:
roi_feats_result.append(
head((roi_feats + rect_feats).view(
roi_feats.size(0), -1)))
if self.num_visual_head > 1:
if self.gather_visual == 'cat':
roi_feats_result = torch.cat(roi_feats_result, dim=-1)
elif self.gather_visual == 'sum':
roi_feats_result = torch.stack(roi_feats_result).sum(0)
elif self.gather_visual == 'prod':
roi_feats_result = torch.stack(roi_feats_result).prod(0)
else:
raise NotImplementedError(
'The gathering operation {} is not implemented yet.'.
format(self.gather_visual))
roi_feats = self.gather_visual_head(roi_feats_result)
else:
roi_feats = roi_feats_result[0]
if self.with_visual_point:
return (roi_feats, roi_feats_point, trans_matrix)
else:
return (roi_feats, )
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(
self,
feats,
img_metas,
rois,
rel_pair_idx=None,
masks=None,
points=None,
roi_scale_factor=None,
):
if rois.shape[0] == 0:
return torch.from_numpy(np.empty(
(0, self.fc_out_channels))).to(feats[0])
if self.with_spatial:
assert rel_pair_idx is not None
return self.union_roi_forward(feats, img_metas, rois, rel_pair_idx,
masks, points, roi_scale_factor)
else:
return self.single_roi_forward(feats, rois, masks, points,
roi_scale_factor)
| 23,631 | 41.275492 | 84 | py |
OpenPSG | OpenPSG-main/openpsg/models/roi_extractors/__init__.py | from .visual_spatial import VisualSpatialExtractor
| 51 | 25 | 50 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/motif_head.py | # ---------------------------------------------------------------
# motif_head.py
# Set-up time: 2020/4/27 下午8:08
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import torch
import torch.nn as nn
from mmcv.cnn import normal_init, xavier_init
from mmdet.models import HEADS
from .approaches import LSTMContext
from .relation_head import RelationHead
@HEADS.register_module()
class MotifHead(RelationHead):
def __init__(self, **kwargs):
super(MotifHead, self).__init__(**kwargs)
self.context_layer = LSTMContext(self.head_config, self.obj_classes,
self.rel_classes)
# post decoding
self.use_vision = self.head_config.use_vision
self.hidden_dim = self.head_config.hidden_dim
self.context_pooling_dim = self.head_config.context_pooling_dim
self.post_emb = nn.Linear(self.hidden_dim, self.hidden_dim * 2)
self.post_cat = nn.Linear(self.hidden_dim * 2,
self.context_pooling_dim)
self.rel_compress = nn.Linear(self.context_pooling_dim,
self.num_predicates,
bias=True)
if self.context_pooling_dim != self.head_config.roi_dim:
self.union_single_not_match = True
self.up_dim = nn.Linear(self.head_config.roi_dim,
self.context_pooling_dim)
else:
self.union_single_not_match = False
def init_weights(self):
self.bbox_roi_extractor.init_weights()
self.relation_roi_extractor.init_weights()
self.context_layer.init_weights()
normal_init(self.post_emb,
mean=0,
std=10.0 * (1.0 / self.hidden_dim)**0.5)
xavier_init(self.post_cat)
xavier_init(self.rel_compress)
if self.union_single_not_match:
xavier_init(self.up_dim)
def forward(
self,
img,
img_meta,
det_result,
gt_result=None,
is_testing=False,
ignore_classes=None,
):
"""
Obtain the relation prediction results based on detection results.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_meta (list[dict]): list of image info dict where each dict has:
'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
det_result: (Result): Result containing bbox, label, mask, point, rels,
etc. According to different mode, all the contents have been
set correctly. Feel free to use it.
gt_result : (Result): The ground truth information.
is_testing:
Returns:
det_result with the following newly added keys:
refine_scores (list[Tensor]): logits of object
rel_scores (list[Tensor]): logits of relation
rel_pair_idxes (list[Tensor]): (num_rel, 2) index of subject and object
relmaps (list[Tensor]): (num_obj, num_obj):
target_rel_labels (list[Tensor]): the target relation label.
"""
roi_feats, union_feats, det_result = self.frontend_features(
img, img_meta, det_result, gt_result)
if roi_feats.shape[0] == 0:
return det_result
# (N_b, N_c + 1), (N_b),
refine_obj_scores, obj_preds, edge_ctx, _ = self.context_layer(
roi_feats, det_result)
if is_testing and ignore_classes is not None:
refine_obj_scores = self.process_ignore_objects(
refine_obj_scores, ignore_classes)
obj_preds = refine_obj_scores[:, 1:].max(1)[1] + 1
# post decode
edge_rep = self.post_emb(edge_ctx)
edge_rep = edge_rep.view(edge_rep.size(0), 2, self.hidden_dim)
head_rep = edge_rep[:, 0].contiguous().view(-1, self.hidden_dim)
tail_rep = edge_rep[:, 1].contiguous().view(-1, self.hidden_dim)
num_rels = [r.shape[0] for r in det_result.rel_pair_idxes]
num_objs = [len(b) for b in det_result.bboxes]
assert len(num_rels) == len(num_objs)
head_reps = head_rep.split(num_objs, dim=0)
tail_reps = tail_rep.split(num_objs, dim=0)
obj_preds = obj_preds.split(num_objs, dim=0)
prod_reps = []
pair_preds = []
for pair_idx, head_rep, tail_rep, obj_pred in zip(
det_result.rel_pair_idxes, head_reps, tail_reps, obj_preds):
prod_reps.append(
torch.cat((head_rep[pair_idx[:, 0]], tail_rep[pair_idx[:, 1]]),
dim=-1))
pair_preds.append(
torch.stack(
(obj_pred[pair_idx[:, 0]], obj_pred[pair_idx[:, 1]]),
dim=1))
prod_rep = torch.cat(prod_reps, dim=0)
pair_pred = torch.cat(pair_preds, dim=0)
prod_rep = self.post_cat(prod_rep)
if self.use_vision:
if self.union_single_not_match:
prod_rep = prod_rep * self.up_dim(union_feats)
else:
prod_rep = prod_rep * union_feats
rel_scores = self.rel_compress(prod_rep)
if self.use_bias:
rel_scores = rel_scores + self.freq_bias.index_with_labels(
pair_pred.long())
# make some changes: list to tensor or tensor to tuple
if self.training:
det_result.target_labels = torch.cat(det_result.target_labels,
dim=-1)
det_result.target_rel_labels = (torch.cat(
det_result.target_rel_labels,
dim=-1) if det_result.target_rel_labels is not None else None)
else:
refine_obj_scores = refine_obj_scores.split(num_objs, dim=0)
rel_scores = rel_scores.split(num_rels, dim=0)
# we use obj_preds instead of pred from obj_dists
# because in decoder_rnn, preds has been through a nms stage
det_result.refine_scores = refine_obj_scores
det_result.rel_scores = rel_scores
# ranking prediction:
if self.with_relation_ranker:
det_result = self.relation_ranking_forward(prod_rep, det_result,
gt_result, num_rels,
is_testing)
return det_result
| 6,943 | 38.908046 | 87 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/gps_head.py | # ---------------------------------------------------------------
# gps_head.py
# Set-up time: 2021/3/31 17:13
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models import HEADS
from .approaches import DirectionAwareMessagePassing
from .relation_head import RelationHead
@HEADS.register_module()
class GPSHead(RelationHead):
def __init__(self, **kwargs):
super(GPSHead, self).__init__(**kwargs)
# 1. Initialize the interaction pattern templates
self.context_layer = DirectionAwareMessagePassing(
self.head_config, self.obj_classes)
if self.use_bias:
self.wp = nn.Linear(self.head_config.roi_dim, self.num_predicates)
self.w_proj1 = nn.Linear(self.head_config.roi_dim,
self.head_config.roi_dim)
self.w_proj2 = nn.Linear(self.head_config.roi_dim,
self.head_config.roi_dim)
self.w_proj3 = nn.Linear(self.head_config.roi_dim,
self.head_config.roi_dim)
self.out_rel = nn.Linear(self.head_config.roi_dim,
self.num_predicates,
bias=True)
def init_weights(self):
self.bbox_roi_extractor.init_weights()
self.relation_roi_extractor.init_weights()
def relation_infer(self,
pair_reps,
union_reps,
proj1,
proj2,
proj3,
out_rel,
wp=None,
log_freq=None):
dim = pair_reps.shape[-1]
t1, t2, t3 = proj1(pair_reps[:, :dim // 2]), \
proj2(pair_reps[:, dim // 2:]), proj3(union_reps)
t4 = (F.relu(t1 + t2) - (t1 - t2) * (t1 - t2))
rel_scores = out_rel(F.relu(t4 + t3) - (t4 - t3) * (t4 - t3))
if wp is not None and log_freq is not None:
tensor_d = F.sigmoid(wp(union_reps))
rel_scores += tensor_d * log_freq
return rel_scores
def forward(self,
img,
img_meta,
det_result,
gt_result=None,
is_testing=False,
ignore_classes=None):
"""Obtain the relation prediction results based on detection results.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_meta (list[dict]): list of image info dict where each dict has:
'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
det_result: (Result): Result containing bbox, label, mask,
point, rels, etc. According to different mode, all the
contents have been set correctly. Feel free to use it.
gt_result : (Result): The ground truth information.
is_testing:
Returns:
det_result with the following newly added keys:
refine_scores (list[Tensor]): logits of object
rel_scores (list[Tensor]): logits of relation
rel_pair_idxes (list[Tensor]): (num_rel, 2) index of
subject and object
relmaps (list[Tensor]): (num_obj, num_obj):
target_rel_labels (list[Tensor]): the target relation label.
"""
roi_feats, union_feats, det_result = self.frontend_features(
img, img_meta, det_result, gt_result)
if roi_feats.shape[0] == 0:
return det_result
num_rels = [r.shape[0] for r in det_result.rel_pair_idxes]
num_objs = [len(b) for b in det_result.bboxes]
assert len(num_rels) == len(num_objs)
# 1. Message Passing with visual texture features
refine_obj_scores, obj_preds, roi_context_feats = self.context_layer(
roi_feats, union_feats, det_result)
obj_preds = obj_preds.split(num_objs, 0)
split_roi_context_feats = roi_context_feats.split(num_objs)
pair_reps = []
pair_preds = []
for pair_idx, obj_rep, obj_pred in zip(det_result.rel_pair_idxes,
split_roi_context_feats,
obj_preds):
pair_preds.append(
torch.stack(
(obj_pred[pair_idx[:, 0]], obj_pred[pair_idx[:, 1]]),
dim=1))
pair_reps.append(
torch.cat((obj_rep[pair_idx[:, 0]], obj_rep[pair_idx[:, 1]]),
dim=-1))
pair_reps = torch.cat(pair_reps, dim=0)
pair_preds = torch.cat(pair_preds, dim=0)
# 3. build different relation head
log_freq = None
if self.use_bias:
log_freq = F.log_softmax(
self.freq_bias.index_with_labels(
pair_preds.long() -
1)) # USE 0-index when getting frequency vec!
if log_freq.isnan().any(): # TODO:why?
log_freq = None
rel_scores = self.relation_infer(pair_reps, union_feats, self.w_proj1,
self.w_proj2, self.w_proj3,
self.out_rel,
self.wp if self.use_bias else None,
log_freq)
# make some changes: list to tensor or tensor to tuple
if not is_testing:
det_result.target_labels = torch.cat(det_result.target_labels,
dim=-1)
det_result.target_rel_labels = torch.cat(
det_result.target_rel_labels,
dim=-1) if det_result.target_rel_labels is not None else None
else:
refine_obj_scores = refine_obj_scores.split(num_objs, dim=0)
rel_scores = rel_scores.split(num_rels, dim=0)
det_result.refine_scores = refine_obj_scores
det_result.rel_scores = rel_scores
# ranking prediction:
if self.with_relation_ranker:
det_result = self.relation_ranking_forward(pair_reps, det_result,
gt_result, num_rels,
is_testing)
return det_result
| 6,890 | 41.801242 | 79 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/imp_head.py | # ---------------------------------------------------------------
# imp_head.py
# Set-up time: 2020/5/21 下午11:22
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import torch
from mmdet.models import HEADS
from .approaches import IMPContext
from .relation_head import RelationHead
@HEADS.register_module()
class IMPHead(RelationHead):
def __init__(self, **kwargs):
super(IMPHead, self).__init__(**kwargs)
self.context_layer = IMPContext(self.head_config, self.obj_classes,
self.rel_classes)
def forward(self,
img,
img_meta,
det_result,
gt_result=None,
is_testing=False,
ignore_classes=None):
"""Obtain the relation prediction results based on detection results.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_meta (list[dict]): list of image info dict where each dict has:
'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
det_result: (Result): Result containing bbox, label, mask, point, rels,
etc. According to different mode, all the contents have been
set correctly. Feel free to use it.
gt_result : (Result): The ground truth information.
is_testing:
Returns:
det_result with the following newly added keys:
refine_scores (list[Tensor]): logits of object
rel_scores (list[Tensor]): logits of relation
rel_pair_idxes (list[Tensor]): (num_rel, 2) index of subject and object
relmaps (list[Tensor]): (num_obj, num_obj):
target_rel_labels (list[Tensor]): the target relation label.
"""
roi_feats, union_feats, det_result = self.frontend_features(
img, img_meta, det_result, gt_result)
if roi_feats.shape[0] == 0:
return det_result
refine_obj_scores, rel_scores = self.context_layer(
roi_feats, union_feats, det_result)
num_rels = [r.shape[0] for r in det_result.rel_pair_idxes]
num_objs = [len(b) for b in det_result.bboxes]
assert len(num_rels) == len(num_objs)
if self.use_bias:
obj_preds = refine_obj_scores.max(-1)[1]
obj_preds = obj_preds.split(num_objs, dim=0)
pair_preds = []
for pair_idx, obj_pred in zip(det_result.rel_pair_idxes,
obj_preds):
pair_preds.append(
torch.stack(
(obj_pred[pair_idx[:, 0]], obj_pred[pair_idx[:, 1]]),
dim=1))
pair_pred = torch.cat(pair_preds, dim=0)
rel_scores = rel_scores + self.freq_bias.index_with_labels(
pair_pred.long())
# make some changes: list to tensor or tensor to tuple
if self.training:
det_result.target_labels = torch.cat(det_result.target_labels,
dim=-1)
det_result.target_rel_labels = torch.cat(
det_result.target_rel_labels, dim=-1)
else:
refine_obj_scores = refine_obj_scores.split(num_objs, dim=0)
rel_scores = rel_scores.split(num_rels, dim=0)
det_result.refine_scores = refine_obj_scores
det_result.rel_scores = rel_scores
return det_result
| 3,996 | 40.635417 | 87 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/__init__.py | from .gps_head import GPSHead
from .imp_head import IMPHead
from .motif_head import MotifHead
from .vctree_head import VCTreeHead
| 130 | 25.2 | 35 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/psgformer_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from mmcv.cnn import Conv2d, Linear, build_activation_layer
from mmcv.cnn.bricks.transformer import build_positional_encoding
from mmcv.runner import force_fp32
from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh,
build_assigner, build_sampler, multi_apply,
reduce_mean)
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.dense_heads import AnchorFreeHead
from mmdet.models.utils import build_transformer
#####imports for tools
from packaging import version
if version.parse(torchvision.__version__) < version.parse('0.7'):
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
@HEADS.register_module()
class PSGFormerHead(AnchorFreeHead):
_version = 2
def __init__(self,
num_classes,
in_channels,
num_relations,
object_classes,
predicate_classes,
num_obj_query=100,
num_rel_query=100,
num_reg_fcs=2,
use_mask=True,
temp=0.1,
transformer=None,
n_heads=8,
sync_cls_avg_factor=False,
bg_cls_weight=0.02,
positional_encoding=dict(type='SinePositionalEncoding',
num_feats=128,
normalize=True),
rel_loss_cls=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=2.0,
class_weight=1.0),
sub_id_loss=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=2.0,
class_weight=1.0),
obj_id_loss=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=2.0,
class_weight=1.0),
loss_cls=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
focal_loss=dict(type='BCEFocalLoss', loss_weight=1.0),
dice_loss=dict(type='DiceLoss', loss_weight=1.0),
train_cfg=dict(id_assigner=dict(
type='IdMatcher',
sub_id_cost=dict(type='ClassificationCost', weight=1.),
obj_id_cost=dict(type='ClassificationCost', weight=1.),
r_cls_cost=dict(type='ClassificationCost', weight=1.)),
bbox_assigner=dict(
type='HungarianAssigner',
cls_cost=dict(type='ClassificationCost',
weight=1.),
reg_cost=dict(type='BBoxL1Cost',
weight=5.0),
iou_cost=dict(type='IoUCost',
iou_mode='giou',
weight=2.0))),
test_cfg=dict(max_per_img=100),
init_cfg=None,
**kwargs):
super(AnchorFreeHead, self).__init__(init_cfg)
self.sync_cls_avg_factor = sync_cls_avg_factor
# NOTE following the official DETR rep0, bg_cls_weight means
# relative classification weight of the no-object class.
assert isinstance(bg_cls_weight, float), 'Expected ' \
'bg_cls_weight to have type float. Found ' \
f'{type(bg_cls_weight)}.'
self.bg_cls_weight = bg_cls_weight
class_weight = loss_cls.get('class_weight', None)
assert isinstance(class_weight, float), 'Expected ' \
'class_weight to have type float. Found ' \
f'{type(class_weight)}.'
class_weight = torch.ones(num_classes + 1) * class_weight
# set background class as the last indice
class_weight[num_classes] = bg_cls_weight
loss_cls.update({'class_weight': class_weight})
r_class_weight = rel_loss_cls.get('class_weight', None)
assert isinstance(r_class_weight, float), 'Expected ' \
'class_weight to have type float. Found ' \
f'{type(r_class_weight)}.'
r_class_weight = torch.ones(num_relations + 1) * r_class_weight
#NOTE set background class as the first indice for relations as they are 1-based
r_class_weight[0] = bg_cls_weight
rel_loss_cls.update({'class_weight': r_class_weight})
if 'bg_cls_weight' in rel_loss_cls:
rel_loss_cls.pop('bg_cls_weight')
if train_cfg:
assert 'id_assigner' in train_cfg, 'id_assigner should be provided '\
'when train_cfg is set.'
assert 'bbox_assigner' in train_cfg, 'bbox_assigner should be provided '\
'when train_cfg is set.'
id_assigner = train_cfg['id_assigner']
bbox_assigner = train_cfg['bbox_assigner']
assert loss_cls['loss_weight'] == bbox_assigner['cls_cost']['weight'], \
'The classification weight for loss and matcher should be' \
'exactly the same.'
assert loss_bbox['loss_weight'] == bbox_assigner['reg_cost'][
'weight'], 'The regression L1 weight for loss and matcher ' \
'should be exactly the same.'
assert loss_iou['loss_weight'] == bbox_assigner['iou_cost']['weight'], \
'The regression iou weight for loss and matcher should be' \
'exactly the same.'
self.id_assigner = build_assigner(id_assigner)
self.bbox_assigner = build_assigner(bbox_assigner)
# DETR sampling=False, so use PseudoSampler
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
assert num_obj_query == num_rel_query
self.num_obj_query = num_obj_query
self.num_rel_query = num_rel_query
self.use_mask = use_mask
self.temp = temp
self.num_classes = num_classes
self.num_relations = num_relations
self.object_classes = object_classes
self.predicate_classes = predicate_classes
self.in_channels = in_channels
self.num_reg_fcs = num_reg_fcs
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.fp16_enabled = False
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_iou = build_loss(loss_iou)
self.focal_loss = build_loss(focal_loss)
self.dice_loss = build_loss(dice_loss)
self.rel_loss_cls = build_loss(rel_loss_cls)
### id losses
self.sub_id_loss = build_loss(sub_id_loss)
self.obj_id_loss = build_loss(obj_id_loss)
if self.loss_cls.use_sigmoid:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
if rel_loss_cls['use_sigmoid']:
self.rel_cls_out_channels = num_relations
else:
self.rel_cls_out_channels = num_relations + 1
self.act_cfg = transformer.get('act_cfg',
dict(type='ReLU', inplace=True))
self.activate = build_activation_layer(self.act_cfg)
self.positional_encoding = build_positional_encoding(
positional_encoding)
self.transformer = build_transformer(transformer)
self.n_heads = n_heads
self.embed_dims = self.transformer.embed_dims
assert 'num_feats' in positional_encoding
num_feats = positional_encoding['num_feats']
assert num_feats * 2 == self.embed_dims, 'embed_dims should' \
f' be exactly 2 times of num_feats. Found {self.embed_dims}' \
f' and {num_feats}.'
self._init_layers()
def _init_layers(self):
"""Initialize layers of the transformer head."""
self.input_proj = Conv2d(self.in_channels,
self.embed_dims,
kernel_size=1)
self.obj_query_embed = nn.Embedding(self.num_obj_query,
self.embed_dims)
self.rel_query_embed = nn.Embedding(self.num_rel_query,
self.embed_dims)
self.class_embed = Linear(self.embed_dims, self.cls_out_channels)
self.box_embed = MLP(self.embed_dims, self.embed_dims, 4, 3)
self.sub_query_update = nn.Sequential(
Linear(self.embed_dims, self.embed_dims), nn.ReLU(inplace=True),
Linear(self.embed_dims, self.embed_dims))
self.obj_query_update = nn.Sequential(
Linear(self.embed_dims, self.embed_dims), nn.ReLU(inplace=True),
Linear(self.embed_dims, self.embed_dims))
self.sop_query_update = nn.Sequential(
Linear(2 * self.embed_dims, self.embed_dims),
nn.ReLU(inplace=True), Linear(self.embed_dims, self.embed_dims))
self.rel_query_update = nn.Identity()
self.rel_cls_embed = Linear(self.embed_dims, self.rel_cls_out_channels)
self.bbox_attention = MHAttentionMap(self.embed_dims,
self.embed_dims,
self.n_heads,
dropout=0.0)
self.mask_head = MaskHeadSmallConv(self.embed_dims + self.n_heads,
[1024, 512, 256], self.embed_dims)
def init_weights(self):
"""Initialize weights of the transformer head."""
# The initialization for transformer is important
self.transformer.init_weights()
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""load checkpoints."""
version = local_metadata.get('version', None)
if (version is None or version < 2):
convert_dict = {
'.self_attn.': '.attentions.0.',
'.ffn.': '.ffns.0.',
'.multihead_attn.': '.attentions.1.',
'.decoder1.norm.': '.decoder1.post_norm.',
'.decoder2.norm.': '.decoder2.post_norm.',
'.query_embedding.': '.query_embed.'
}
state_dict_keys = list(state_dict.keys())
for k in state_dict_keys:
for ori_key, convert_key in convert_dict.items():
if ori_key in k:
convert_key = k.replace(ori_key, convert_key)
state_dict[convert_key] = state_dict[k]
del state_dict[k]
super(AnchorFreeHead,
self)._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys,
unexpected_keys, error_msgs)
def forward(self, feats, img_metas, train_mode=False):
# construct binary masks which used for the transformer.
# NOTE following the official DETR repo, non-zero values representing
# ignored positions, while zero values means valid positions.
last_features = feats[-1]
batch_size = last_features.size(0)
input_img_h, input_img_w = img_metas[0]['batch_input_shape']
masks = last_features.new_ones((batch_size, input_img_h, input_img_w))
for img_id in range(batch_size):
img_h, img_w, _ = img_metas[img_id]['img_shape']
masks[img_id, :img_h, :img_w] = 0
last_features = self.input_proj(last_features)
# interpolate masks to have the same spatial shape with feats
masks = F.interpolate(masks.unsqueeze(1),
size=last_features.shape[-2:]).to(
torch.bool).squeeze(1)
# position encoding
pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w]
# outs_dec: [nb_dec, bs, num_query, embed_dim]
outs_obj_dec, outs_rel_dec, memory \
= self.transformer(last_features, masks,
self.obj_query_embed.weight,
self.rel_query_embed.weight,
pos_embed)
outputs_class = self.class_embed(outs_obj_dec)
outputs_coord = self.box_embed(outs_obj_dec).sigmoid()
bbox_mask = self.bbox_attention(outs_obj_dec[-1], memory, mask=masks)
seg_masks = self.mask_head(last_features, bbox_mask,
[feats[2], feats[1], feats[0]])
seg_masks = seg_masks.view(batch_size, self.num_obj_query,
seg_masks.shape[-2], seg_masks.shape[-1])
### interaction
updated_sub_embed = self.sub_query_update(outs_obj_dec)
updated_obj_embed = self.obj_query_update(outs_obj_dec)
sub_q_normalized = F.normalize(updated_sub_embed[-1],
p=2,
dim=-1,
eps=1e-12)
obj_q_normalized = F.normalize(updated_obj_embed[-1],
p=2,
dim=-1,
eps=1e-12)
updated_rel_embed = self.rel_query_update(outs_rel_dec)
rel_q_normalized = F.normalize(updated_rel_embed[-1],
p=2,
dim=-1,
eps=1e-12)
#### relation-oriented search
subject_scores = torch.matmul(
rel_q_normalized, sub_q_normalized.transpose(1, 2)) / self.temp
object_scores = torch.matmul(
rel_q_normalized, obj_q_normalized.transpose(1, 2)) / self.temp
_, subject_ids = subject_scores.max(-1)
_, object_ids = object_scores.max(-1)
# prediction
sub_outputs_class = torch.empty_like(outputs_class)
sub_outputs_coord = torch.empty_like(outputs_coord)
obj_outputs_class = torch.empty_like(outputs_class)
obj_outputs_coord = torch.empty_like(outputs_coord)
outputs_sub_seg_masks = torch.empty_like(seg_masks)
outputs_obj_seg_masks = torch.empty_like(seg_masks)
triplet_sub_ids = []
triplet_obj_ids = []
for i in range(len(subject_ids)):
triplet_sub_id = subject_ids[i]
triplet_obj_id = object_ids[i]
sub_outputs_class[:, i] = outputs_class[:, i, triplet_sub_id, :]
sub_outputs_coord[:, i] = outputs_coord[:, i, triplet_sub_id, :]
obj_outputs_class[:, i] = outputs_class[:, i, triplet_obj_id, :]
obj_outputs_coord[:, i] = outputs_coord[:, i, triplet_obj_id, :]
outputs_sub_seg_masks[i] = seg_masks[i, triplet_sub_id, :, :]
outputs_obj_seg_masks[i] = seg_masks[i, triplet_obj_id, :, :]
triplet_sub_ids.append(triplet_sub_id)
triplet_obj_ids.append(triplet_obj_id)
all_cls_scores = dict(cls=outputs_class,
sub=sub_outputs_class,
obj=obj_outputs_class)
rel_outputs_class = self.rel_cls_embed(outs_rel_dec)
all_cls_scores['rel'] = rel_outputs_class
all_cls_scores['sub_ids'] = triplet_sub_ids
all_cls_scores['obj_ids'] = triplet_obj_ids
all_cls_scores['subject_scores'] = subject_scores
all_cls_scores['object_scores'] = object_scores
all_bbox_preds = dict(bbox=outputs_coord,
sub=sub_outputs_coord,
obj=obj_outputs_coord,
mask=seg_masks,
sub_seg=outputs_sub_seg_masks,
obj_seg=outputs_obj_seg_masks)
return all_cls_scores, all_bbox_preds
@force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
def loss(self,
all_cls_scores_list,
all_bbox_preds_list,
gt_rels_list,
gt_bboxes_list,
gt_labels_list,
gt_masks_list,
img_metas,
gt_bboxes_ignore=None):
# NOTE defaultly only the outputs from the last feature scale is used.
all_cls_scores = all_cls_scores_list
all_bbox_preds = all_bbox_preds_list
assert gt_bboxes_ignore is None, \
'Only supports for gt_bboxes_ignore setting to None.'
### object detection and panoptic segmentation
all_od_cls_scores = all_cls_scores['cls']
all_od_bbox_preds = all_bbox_preds['bbox']
all_mask_preds = all_bbox_preds['mask']
num_dec_layers = len(all_od_cls_scores)
all_mask_preds = [all_mask_preds for _ in range(num_dec_layers)]
all_s_bbox_preds = all_bbox_preds['sub']
all_o_bbox_preds = all_bbox_preds['obj']
all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]
all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
all_gt_rels_list = [gt_rels_list for _ in range(num_dec_layers)]
all_gt_bboxes_ignore_list = [
gt_bboxes_ignore for _ in range(num_dec_layers)
]
all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers)]
img_metas_list = [img_metas for _ in range(num_dec_layers)]
all_r_cls_scores = all_cls_scores['rel']
subject_scores = all_cls_scores['subject_scores']
object_scores = all_cls_scores['object_scores']
subject_scores = [subject_scores for _ in range(num_dec_layers)]
object_scores = [object_scores for _ in range(num_dec_layers)]
losses_cls, losses_bbox, losses_iou, dice_losses, focal_losses, \
r_losses_cls, loss_subject_match, loss_object_match= multi_apply(
self.loss_single, subject_scores, object_scores,
all_od_cls_scores, all_od_bbox_preds, all_mask_preds,
all_r_cls_scores, all_s_bbox_preds, all_o_bbox_preds,
all_gt_rels_list, all_gt_bboxes_list, all_gt_labels_list,
all_gt_masks_list, img_metas_list, all_gt_bboxes_ignore_list)
loss_dict = dict()
## loss of relation-oriented matching
loss_dict['loss_subject_match'] = loss_subject_match[-1]
loss_dict['loss_object_match'] = loss_object_match[-1]
## loss of object detection and segmentation
# loss from the last decoder layer
loss_dict['loss_cls'] = losses_cls[-1]
loss_dict['loss_bbox'] = losses_bbox[-1]
loss_dict['loss_iou'] = losses_iou[-1]
loss_dict['focal_losses'] = focal_losses[-1]
loss_dict['dice_losses'] = dice_losses[-1]
# loss from other decoder layers
num_dec_layer = 0
for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1],
losses_bbox[:-1],
losses_iou[:-1]):
loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i
loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i
num_dec_layer += 1
## loss of scene graph
# loss from the last decoder layer
loss_dict['r_loss_cls'] = r_losses_cls[-1]
# loss from other decoder layers
num_dec_layer = 0
for r_loss_cls_i in r_losses_cls[:-1]:
loss_dict[f'd{num_dec_layer}.r_loss_cls'] = r_loss_cls_i
num_dec_layer += 1
return loss_dict
def loss_single(self,
subject_scores,
object_scores,
od_cls_scores,
od_bbox_preds,
mask_preds,
r_cls_scores,
s_bbox_preds,
o_bbox_preds,
gt_rels_list,
gt_bboxes_list,
gt_labels_list,
gt_masks_list,
img_metas,
gt_bboxes_ignore_list=None):
## before get targets
num_imgs = r_cls_scores.size(0)
# obj det&seg
cls_scores_list = [od_cls_scores[i] for i in range(num_imgs)]
bbox_preds_list = [od_bbox_preds[i] for i in range(num_imgs)]
mask_preds_list = [mask_preds[i] for i in range(num_imgs)]
# scene graph
r_cls_scores_list = [r_cls_scores[i] for i in range(num_imgs)]
s_bbox_preds_list = [s_bbox_preds[i] for i in range(num_imgs)]
o_bbox_preds_list = [o_bbox_preds[i] for i in range(num_imgs)]
# matche scores
subject_scores_list = [subject_scores[i] for i in range(num_imgs)]
object_scores_list = [object_scores[i] for i in range(num_imgs)]
cls_reg_targets = self.get_targets(
subject_scores_list, object_scores_list, cls_scores_list,
bbox_preds_list, mask_preds_list, r_cls_scores_list,
s_bbox_preds_list, o_bbox_preds_list, gt_rels_list, gt_bboxes_list,
gt_labels_list, gt_masks_list, img_metas, gt_bboxes_ignore_list)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
mask_targets_list, num_total_od_pos, num_total_od_neg,
mask_preds_list, r_labels_list, r_label_weights_list, num_total_pos,
num_total_neg, filtered_subject_scores, filtered_object_scores,
gt_subject_id_list, gt_object_id_list) = cls_reg_targets
# obj det&seg
labels = torch.cat(labels_list, 0)
label_weights = torch.cat(label_weights_list, 0)
bbox_targets = torch.cat(bbox_targets_list, 0)
bbox_weights = torch.cat(bbox_weights_list, 0)
mask_targets = torch.cat(mask_targets_list, 0).float().flatten(1)
mask_preds = torch.cat(mask_preds_list, 0).flatten(1)
num_od_matches = mask_preds.shape[0]
# id loss
filtered_subject_scores = torch.cat(
filtered_subject_scores,
0).reshape(len(filtered_subject_scores[0]), -1)
filtered_object_scores = torch.cat(filtered_object_scores, 0).reshape(
len(filtered_object_scores[0]), -1)
gt_subject_id = torch.cat(gt_subject_id_list, 0)
gt_subject_id = F.one_hot(
gt_subject_id, num_classes=filtered_subject_scores.shape[-1])
gt_object_id = torch.cat(gt_object_id_list, 0)
gt_object_id = F.one_hot(gt_object_id,
num_classes=filtered_object_scores.shape[-1])
loss_subject_match = self.sub_id_loss(filtered_subject_scores,
gt_subject_id)
loss_object_match = self.obj_id_loss(filtered_object_scores,
gt_object_id)
# mask loss
focal_loss = self.focal_loss(mask_preds, mask_targets, num_od_matches)
dice_loss = self.dice_loss(mask_preds, mask_targets, num_od_matches)
# classification loss
od_cls_scores = od_cls_scores.reshape(-1, self.cls_out_channels)
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = num_total_od_pos * 1.0 + \
num_total_od_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
od_cls_scores.new_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
loss_cls = self.loss_cls(od_cls_scores,
labels,
label_weights,
avg_factor=cls_avg_factor)
# Compute the average number of gt boxes across all gpus, for
# normalization purposes
num_total_od_pos = loss_cls.new_tensor([num_total_od_pos])
num_total_od_pos = torch.clamp(reduce_mean(num_total_od_pos),
min=1).item()
# construct factors used for rescale bboxes
factors = []
for img_meta, bbox_pred in zip(img_metas, od_bbox_preds):
img_h, img_w, _ = img_meta['img_shape']
factor = bbox_pred.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0).repeat(
bbox_pred.size(0), 1)
factors.append(factor)
factors = torch.cat(factors, 0)
# DETR regress the relative position of boxes (cxcywh) in the image,
# thus the learning target is normalized by the image size. So here
# we need to re-scale them for calculating IoU loss
od_bbox_preds = od_bbox_preds.reshape(-1, 4)
bboxes = bbox_cxcywh_to_xyxy(od_bbox_preds) * factors
bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors
# regression IoU loss, defaultly GIoU loss
loss_iou = self.loss_iou(bboxes,
bboxes_gt,
bbox_weights,
avg_factor=num_total_od_pos)
# regression L1 loss
loss_bbox = self.loss_bbox(od_bbox_preds,
bbox_targets,
bbox_weights,
avg_factor=num_total_od_pos)
# scene graph
r_labels = torch.cat(r_labels_list, 0)
r_label_weights = torch.cat(r_label_weights_list, 0)
# classification loss
r_cls_scores = r_cls_scores.reshape(-1, self.rel_cls_out_channels)
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = num_total_pos * 1.0 + \
num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
r_cls_scores.new_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
r_loss_cls = self.rel_loss_cls(r_cls_scores,
r_labels,
r_label_weights,
avg_factor=cls_avg_factor)
return loss_cls, loss_bbox, loss_iou, dice_loss, focal_loss, r_loss_cls, loss_subject_match, loss_object_match
def get_targets(self,
subject_scores_list,
object_scores_list,
cls_scores_list,
bbox_preds_list,
mask_preds_list,
r_cls_scores_list,
s_bbox_preds_list,
o_bbox_preds_list,
gt_rels_list,
gt_bboxes_list,
gt_labels_list,
gt_masks_list,
img_metas,
gt_bboxes_ignore_list=None):
assert gt_bboxes_ignore_list is None, \
'Only supports for gt_bboxes_ignore setting to None.'
num_imgs = len(r_cls_scores_list)
gt_bboxes_ignore_list = [
gt_bboxes_ignore_list for _ in range(num_imgs)
]
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
mask_targets_list, od_pos_inds_list, od_neg_inds_list,
mask_preds_list, r_labels_list, r_label_weights_list, pos_inds_list,
neg_inds_list, filtered_subject_scores, filtered_object_scores,
gt_subject_id_list, gt_object_id_list) = multi_apply(
self._get_target_single, subject_scores_list, object_scores_list,
cls_scores_list, bbox_preds_list, mask_preds_list,
r_cls_scores_list, s_bbox_preds_list, o_bbox_preds_list,
gt_rels_list, gt_bboxes_list, gt_labels_list, gt_masks_list,
img_metas, gt_bboxes_ignore_list)
num_total_od_pos = sum((inds.numel() for inds in od_pos_inds_list))
num_total_od_neg = sum((inds.numel() for inds in od_neg_inds_list))
num_total_pos = sum((inds.numel() for inds in pos_inds_list))
num_total_neg = sum((inds.numel() for inds in neg_inds_list))
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, mask_targets_list, num_total_od_pos,
num_total_od_neg, mask_preds_list, r_labels_list,
r_label_weights_list, num_total_pos, num_total_neg,
filtered_subject_scores, filtered_object_scores,
gt_subject_id_list, gt_object_id_list)
def _get_target_single(self,
subject_scores,
object_scores,
cls_score,
bbox_pred,
mask_preds,
r_cls_score,
s_bbox_pred,
o_bbox_pred,
gt_rels,
gt_bboxes,
gt_labels,
gt_masks,
img_meta,
gt_bboxes_ignore=None):
assert len(gt_masks) == len(gt_bboxes)
###### obj det&seg
num_bboxes = bbox_pred.size(0)
assert len(gt_masks) == len(gt_bboxes)
# assigner and sampler, only return human&object assign result
od_assign_result = self.bbox_assigner.assign(bbox_pred, cls_score,
gt_bboxes, gt_labels,
img_meta,
gt_bboxes_ignore)
sampling_result = self.sampler.sample(od_assign_result, bbox_pred,
gt_bboxes)
od_pos_inds = sampling_result.pos_inds
od_neg_inds = sampling_result.neg_inds #### no-rel class indices in prediction
# label targets
labels = gt_bboxes.new_full((num_bboxes, ),
self.num_classes,
dtype=torch.long) ### 0-based
labels[od_pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
label_weights = gt_bboxes.new_ones(num_bboxes)
# mask targets for subjects and objects
mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds,
...] ###FIXME some transform might be needed
mask_preds = mask_preds[od_pos_inds]
mask_preds = interpolate(mask_preds[:, None],
size=gt_masks.shape[-2:],
mode='bilinear',
align_corners=False).squeeze(1)
# bbox targets for subjects and objects
bbox_targets = torch.zeros_like(bbox_pred)
bbox_weights = torch.zeros_like(bbox_pred)
bbox_weights[od_pos_inds] = 1.0
img_h, img_w, _ = img_meta['img_shape']
# DETR regress the relative position of boxes (cxcywh) in the image.
# Thus the learning target should be normalized by the image size, also
# the box format should be converted from defaultly x1y1x2y2 to cxcywh.
factor = bbox_pred.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0)
pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor
pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized)
bbox_targets[od_pos_inds] = pos_gt_bboxes_targets
gt_label_assigned_query = torch.ones_like(gt_labels)
gt_label_assigned_query[
sampling_result.pos_assigned_gt_inds] = od_pos_inds
###### scene graph
num_rels = s_bbox_pred.size(0)
# separate human boxes and object boxes from gt_bboxes and generate labels
gt_sub_bboxes = []
gt_obj_bboxes = []
gt_sub_labels = []
gt_obj_labels = []
gt_rel_labels = []
gt_sub_ids = []
gt_obj_ids = []
for rel_id in range(gt_rels.size(0)):
gt_sub_bboxes.append(gt_bboxes[int(gt_rels[rel_id, 0])])
gt_obj_bboxes.append(gt_bboxes[int(gt_rels[rel_id, 1])])
gt_sub_labels.append(gt_labels[int(gt_rels[rel_id, 0])])
gt_obj_labels.append(gt_labels[int(gt_rels[rel_id, 1])])
gt_rel_labels.append(gt_rels[rel_id, 2])
gt_sub_ids.append(gt_label_assigned_query[int(gt_rels[rel_id, 0])])
gt_obj_ids.append(gt_label_assigned_query[int(gt_rels[rel_id, 1])])
gt_sub_bboxes = torch.vstack(gt_sub_bboxes).type_as(gt_bboxes).reshape(
-1, 4)
gt_obj_bboxes = torch.vstack(gt_obj_bboxes).type_as(gt_bboxes).reshape(
-1, 4)
gt_sub_labels = torch.vstack(gt_sub_labels).type_as(gt_labels).reshape(
-1)
gt_obj_labels = torch.vstack(gt_obj_labels).type_as(gt_labels).reshape(
-1)
gt_rel_labels = torch.vstack(gt_rel_labels).type_as(gt_labels).reshape(
-1)
gt_sub_ids = torch.vstack(gt_sub_ids).type_as(gt_labels).reshape(-1)
gt_obj_ids = torch.vstack(gt_obj_ids).type_as(gt_labels).reshape(-1)
########################################
#### overwrite relation labels above####
########################################
# assigner and sampler for relation-oriented id match
s_assign_result, o_assign_result = self.id_assigner.assign(
subject_scores, object_scores, r_cls_score, gt_sub_ids, gt_obj_ids,
gt_rel_labels, img_meta, gt_bboxes_ignore)
s_sampling_result = self.sampler.sample(s_assign_result, s_bbox_pred,
gt_sub_bboxes)
o_sampling_result = self.sampler.sample(o_assign_result, o_bbox_pred,
gt_obj_bboxes)
pos_inds = o_sampling_result.pos_inds
neg_inds = o_sampling_result.neg_inds #### no-rel class indices in prediction
#match id targets
gt_subject_ids = gt_sub_bboxes.new_full((num_rels, ),
-1,
dtype=torch.long)
gt_subject_ids[pos_inds] = gt_sub_ids[
s_sampling_result.pos_assigned_gt_inds]
gt_object_ids = gt_obj_bboxes.new_full((num_rels, ),
-1,
dtype=torch.long)
gt_object_ids[pos_inds] = gt_obj_ids[
o_sampling_result.pos_assigned_gt_inds]
# filtering unmatched subject/object id predictions
gt_subject_ids = gt_subject_ids[pos_inds]
gt_subject_ids_res = torch.zeros_like(gt_subject_ids)
for idx, gt_subject_id in enumerate(gt_subject_ids):
gt_subject_ids_res[idx] = ((od_pos_inds == gt_subject_id).nonzero(
as_tuple=True)[0])
gt_subject_ids = gt_subject_ids_res
gt_object_ids = gt_object_ids[pos_inds]
gt_object_ids_res = torch.zeros_like(gt_object_ids)
for idx, gt_object_id in enumerate(gt_object_ids):
gt_object_ids_res[idx] = ((od_pos_inds == gt_object_id).nonzero(
as_tuple=True)[0])
gt_object_ids = gt_object_ids_res
filtered_subject_scores = subject_scores[pos_inds]
filtered_subject_scores = filtered_subject_scores[:, od_pos_inds]
filtered_object_scores = object_scores[pos_inds]
filtered_object_scores = filtered_object_scores[:, od_pos_inds]
r_labels = gt_obj_bboxes.new_full((num_rels, ), 0,
dtype=torch.long) ### 1-based
r_labels[pos_inds] = gt_rel_labels[
o_sampling_result.pos_assigned_gt_inds]
r_label_weights = gt_obj_bboxes.new_ones(num_rels)
return (labels, label_weights, bbox_targets, bbox_weights,
mask_targets, od_pos_inds, od_neg_inds, mask_preds, r_labels,
r_label_weights, pos_inds, neg_inds, filtered_subject_scores,
filtered_object_scores, gt_subject_ids, gt_object_ids
) ###return the interpolated predicted masks
# over-write because img_metas are needed as inputs for bbox_head.
def forward_train(self,
x,
img_metas,
gt_rels,
gt_bboxes,
gt_labels=None,
gt_masks=None,
gt_bboxes_ignore=None,
proposal_cfg=None,
**kwargs):
"""Forward function for training mode.
Args:
x (list[Tensor]): Features from backbone.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
proposal_cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert proposal_cfg is None, '"proposal_cfg" must be None'
outs = self(x, img_metas)
if gt_labels is None:
loss_inputs = outs + (gt_rels, gt_bboxes, gt_masks, img_metas)
else:
loss_inputs = outs + (gt_rels, gt_bboxes, gt_labels, gt_masks,
img_metas)
losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
@force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
def get_bboxes(self, cls_scores, bbox_preds, img_metas, rescale=False):
# NOTE defaultly only using outputs from the last feature level,
# and only the outputs from the last decoder layer is used.
result_list = []
for img_id in range(len(img_metas)):
# od_cls_score = cls_scores['cls'][-1, img_id, ...]
# bbox_pred = bbox_preds['bbox'][-1, img_id, ...]
# mask_pred = bbox_preds['mask'][img_id, ...]
all_cls_score = cls_scores['cls'][-1, img_id, ...]
all_masks = bbox_preds['mask'][img_id, ...]
s_cls_score = cls_scores['sub'][-1, img_id, ...]
o_cls_score = cls_scores['obj'][-1, img_id, ...]
r_cls_score = cls_scores['rel'][-1, img_id, ...]
s_bbox_pred = bbox_preds['sub'][-1, img_id, ...]
o_bbox_pred = bbox_preds['obj'][-1, img_id, ...]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
s_mask_pred = bbox_preds['sub_seg'][img_id, ...]
o_mask_pred = bbox_preds['obj_seg'][img_id, ...]
triplet_sub_ids = cls_scores['sub_ids'][img_id]
triplet_obj_ids = cls_scores['obj_ids'][img_id]
triplets = self._get_bboxes_single(all_masks, all_cls_score,
s_cls_score, o_cls_score,
r_cls_score, s_bbox_pred,
o_bbox_pred, s_mask_pred,
o_mask_pred, img_shape,
triplet_sub_ids,
triplet_obj_ids,
scale_factor, rescale)
result_list.append(triplets)
return result_list
def _get_bboxes_single(self,
all_masks,
all_cls_score,
s_cls_score,
o_cls_score,
r_cls_score,
s_bbox_pred,
o_bbox_pred,
s_mask_pred,
o_mask_pred,
img_shape,
triplet_sub_ids,
triplet_obj_ids,
scale_factor,
rescale=False):
assert len(s_cls_score) == len(o_cls_score)
assert len(s_cls_score) == len(s_bbox_pred)
assert len(s_cls_score) == len(o_bbox_pred)
mask_size = (round(img_shape[0] / scale_factor[1]),
round(img_shape[1] / scale_factor[0]))
max_per_img = self.test_cfg.get('max_per_img', self.num_obj_query)
assert self.rel_loss_cls.use_sigmoid == False
assert len(s_cls_score) == len(r_cls_score)
# 0-based label input for objects and self.num_classes as default background cls
s_logits = F.softmax(s_cls_score, dim=-1)[..., :-1]
o_logits = F.softmax(o_cls_score, dim=-1)[..., :-1]
s_scores, s_labels = s_logits.max(-1)
o_scores, o_labels = o_logits.max(-1)
r_lgs = F.softmax(r_cls_score, dim=-1)
r_logits = r_lgs[..., 1:]
r_scores, r_indexes = r_logits.reshape(-1).topk(max_per_img)
r_labels = r_indexes % self.num_relations + 1
triplet_index = r_indexes // self.num_relations
s_scores = s_scores[triplet_index]
s_labels = s_labels[triplet_index] + 1
s_bbox_pred = s_bbox_pred[triplet_index]
o_scores = o_scores[triplet_index]
o_labels = o_labels[triplet_index] + 1
o_bbox_pred = o_bbox_pred[triplet_index]
r_dists = r_lgs.reshape(
-1, self.num_relations +
1)[triplet_index] #### NOTE: to match the evaluation in vg
labels = torch.cat((s_labels, o_labels), 0)
complete_labels = labels
complete_r_labels = r_labels
complete_r_dists = r_dists
if self.use_mask:
s_mask_pred = s_mask_pred[triplet_index]
o_mask_pred = o_mask_pred[triplet_index]
s_mask_pred = F.interpolate(s_mask_pred.unsqueeze(1),
size=mask_size).squeeze(1)
s_mask_pred = torch.sigmoid(s_mask_pred) > 0.85
o_mask_pred = F.interpolate(o_mask_pred.unsqueeze(1),
size=mask_size).squeeze(1)
o_mask_pred = torch.sigmoid(o_mask_pred) > 0.85
output_masks = torch.cat((s_mask_pred, o_mask_pred), 0)
all_logits = F.softmax(all_cls_score, dim=-1)[..., :-1]
all_scores, all_labels = all_logits.max(-1)
all_masks = F.interpolate(all_masks.unsqueeze(1),
size=mask_size).squeeze(1)
#### for panoptic postprocessing ####
triplet_sub_ids = triplet_sub_ids[triplet_index].view(-1,1)
triplet_obj_ids = triplet_obj_ids[triplet_index].view(-1,1)
pan_rel_pairs = torch.cat((triplet_sub_ids,triplet_obj_ids), -1).to(torch.int).to(all_masks.device)
tri_obj_unique = pan_rel_pairs.unique()
keep = all_labels != (s_logits.shape[-1] - 1)
tmp = torch.zeros_like(keep, dtype=torch.bool)
for id in tri_obj_unique:
tmp[id] = True
keep = keep & tmp
all_labels = all_labels[keep]
all_masks = all_masks[keep]
all_scores = all_scores[keep]
h, w = all_masks.shape[-2:]
no_obj_filter = torch.zeros(pan_rel_pairs.shape[0],dtype=torch.bool)
for triplet_id in range(pan_rel_pairs.shape[0]):
if keep[pan_rel_pairs[triplet_id,0]] and keep[pan_rel_pairs[triplet_id,1]]:
no_obj_filter[triplet_id]=True
pan_rel_pairs = pan_rel_pairs[no_obj_filter]
if keep.sum() != len(keep):
for new_id, past_id in enumerate(keep.nonzero().view(-1)):
pan_rel_pairs.masked_fill_(pan_rel_pairs.eq(past_id), new_id)
r_labels, r_dists = r_labels[no_obj_filter], r_dists[no_obj_filter]
if all_labels.numel() == 0:
pan_img = torch.ones(mask_size).cpu().to(torch.long)
pan_masks = pan_img.unsqueeze(0).cpu().to(torch.long)
pan_rel_pairs = torch.arange(len(labels), dtype=torch.int).to(masks.device).reshape(2, -1).T
rels = torch.tensor([0,0,0]).view(-1,3)
pan_labels = torch.tensor([0])
else:
all_masks = all_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
thing_classes = defaultdict(lambda: [])
thing_dedup = defaultdict(lambda: [])
for k, label in enumerate(all_labels):
if label.item() >= 80:
stuff_equiv_classes[label.item()].append(k)
else:
thing_classes[label.item()].append(k)
def dedup_things(pred_ids, binary_masks):
while len(pred_ids) > 1:
base_mask = binary_masks[pred_ids[0]].unsqueeze(0)
other_masks = binary_masks[pred_ids[1:]]
# calculate ious
ious = base_mask.mm(other_masks.transpose(0,1))/((base_mask+other_masks)>0).sum(-1)
ids_left = []
thing_dedup[pred_ids[0]].append(pred_ids[0])
for iou, other_id in zip(ious[0],pred_ids[1:]):
if iou>0.5:
thing_dedup[pred_ids[0]].append(other_id)
else:
ids_left.append(other_id)
pred_ids = ids_left
if len(pred_ids) == 1:
thing_dedup[pred_ids[0]].append(pred_ids[0])
all_binary_masks = (torch.sigmoid(all_masks) > 0.85).to(torch.float)
# create dict that groups duplicate masks
for thing_pred_ids in thing_classes.values():
if len(thing_pred_ids) > 1:
dedup_things(thing_pred_ids, all_binary_masks)
else:
thing_dedup[thing_pred_ids[0]].append(thing_pred_ids[0])
def get_ids_area(all_masks, pan_rel_pairs, r_labels, r_dists, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = all_masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w),
dtype=torch.long,
device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
pan_rel_pairs.masked_fill_(pan_rel_pairs.eq(eq_id), equiv[0])
# Merge the masks corresponding to the same thing instance
for equiv in thing_dedup.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
pan_rel_pairs.masked_fill_(pan_rel_pairs.eq(eq_id), equiv[0])
m_ids_remain,_ = m_id.unique().sort()
no_obj_filter2 = torch.zeros(pan_rel_pairs.shape[0],dtype=torch.bool)
for triplet_id in range(pan_rel_pairs.shape[0]):
if pan_rel_pairs[triplet_id,0] in m_ids_remain and pan_rel_pairs[triplet_id,1] in m_ids_remain:
no_obj_filter2[triplet_id]=True
pan_rel_pairs = pan_rel_pairs[no_obj_filter2]
r_labels, r_dists = r_labels[no_obj_filter2], r_dists[no_obj_filter2]
pan_labels = []
pan_masks = []
for i, m_id_remain in enumerate(m_ids_remain):
pan_masks.append(m_id.eq(m_id_remain).unsqueeze(0))
pan_labels.append(all_labels[m_id_remain].unsqueeze(0))
m_id.masked_fill_(m_id.eq(m_id_remain), i)
pan_rel_pairs.masked_fill_(pan_rel_pairs.eq(m_id_remain), i)
pan_masks = torch.cat(pan_masks, 0)
pan_labels = torch.cat(pan_labels, 0)
seg_img = m_id * INSTANCE_OFFSET + pan_labels[m_id]
seg_img = seg_img.view(h, w).cpu().to(torch.long)
m_id = m_id.view(h, w).cpu()
area = []
for i in range(len(all_masks)):
area.append(m_id.eq(i).sum().item())
return area, seg_img, pan_rel_pairs, pan_masks, r_labels, r_dists, pan_labels
area, pan_img, pan_rel_pairs, pan_masks, r_labels, r_dists, pan_labels = \
get_ids_area(all_masks, pan_rel_pairs, r_labels, r_dists, dedup=True)
if r_labels.numel() == 0:
rels = torch.tensor([0,0,0]).view(-1,3)
else:
rels = torch.cat((pan_rel_pairs,r_labels.unsqueeze(-1)),-1)
# if all_labels.numel() > 0:
# # We know filter empty masks as long as we find some
# while True:
# filtered_small = torch.as_tensor(
# [area[i] <= 4 for i, c in enumerate(all_labels)],
# dtype=torch.bool,
# device=keep.device)
# if filtered_small.any().item():
# all_scores = all_scores[~filtered_small]
# all_labels = all_labels[~filtered_small]
# all_masks = all_masks[~filtered_small]
# area, pan_img = get_ids_area(all_masks, all_scores)
# else:
# break
s_det_bboxes = bbox_cxcywh_to_xyxy(s_bbox_pred)
s_det_bboxes[:, 0::2] = s_det_bboxes[:, 0::2] * img_shape[1]
s_det_bboxes[:, 1::2] = s_det_bboxes[:, 1::2] * img_shape[0]
s_det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1])
s_det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0])
if rescale:
s_det_bboxes /= s_det_bboxes.new_tensor(scale_factor)
s_det_bboxes = torch.cat((s_det_bboxes, s_scores.unsqueeze(1)), -1)
o_det_bboxes = bbox_cxcywh_to_xyxy(o_bbox_pred)
o_det_bboxes[:, 0::2] = o_det_bboxes[:, 0::2] * img_shape[1]
o_det_bboxes[:, 1::2] = o_det_bboxes[:, 1::2] * img_shape[0]
o_det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1])
o_det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0])
if rescale:
o_det_bboxes /= o_det_bboxes.new_tensor(scale_factor)
o_det_bboxes = torch.cat((o_det_bboxes, o_scores.unsqueeze(1)), -1)
det_bboxes = torch.cat((s_det_bboxes, o_det_bboxes), 0)
rel_pairs = torch.arange(len(det_bboxes),
dtype=torch.int).reshape(2, -1).T
if self.use_mask:
return det_bboxes, complete_labels, rel_pairs, output_masks, pan_rel_pairs, \
pan_img, complete_r_labels, complete_r_dists, r_labels, r_dists, pan_masks, rels, pan_labels
else:
return det_bboxes, labels, rel_pairs, r_scores, r_labels, r_dists
def simple_test_bboxes(self, feats, img_metas, rescale=False):
# forward of this head requires img_metas
outs = self.forward(feats, img_metas)
results_list = self.get_bboxes(*outs, img_metas, rescale=rescale)
return results_list
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN) Copied from
hoitr."""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def _expand(tensor, length: int):
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)
class MaskHeadSmallConv(nn.Module):
"""Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
inter_dims = [
dim, context_dim // 2, context_dim // 4, context_dim // 8,
context_dim // 16, context_dim // 64
]
self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)
self.gn1 = torch.nn.GroupNorm(8, dim)
self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)
self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])
self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])
self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])
self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)
self.dim = dim
self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x, bbox_mask, fpns):
x = torch.cat(
[_expand(x, bbox_mask.shape[1]),
bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = F.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = F.relu(x)
cur_fpn = self.adapter1(fpns[0])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay3(x)
x = self.gn3(x)
x = F.relu(x)
cur_fpn = self.adapter2(fpns[1])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay4(x)
x = self.gn4(x)
x = F.relu(x)
cur_fpn = self.adapter3(fpns[2])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay5(x)
x = self.gn5(x)
x = F.relu(x)
x = self.out_lay(x)
return x
class MHAttentionMap(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax
(no multiplication by value)"""
def __init__(self,
query_dim,
hidden_dim,
num_heads,
dropout=0.0,
bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads)**-0.5
def forward(self, q, k, mask=None):
q = self.q_linear(q)
k = F.conv2d(k,
self.k_linear.weight.unsqueeze(-1).unsqueeze(-1),
self.k_linear.bias)
qh = q.view(q.shape[0], q.shape[1], self.num_heads,
self.hidden_dim // self.num_heads)
kh = k.view(k.shape[0], self.num_heads,
self.hidden_dim // self.num_heads, k.shape[-2],
k.shape[-1])
weights = torch.einsum('bqnc,bnchw->bqnhw', qh * self.normalize_fact,
kh)
if mask is not None:
weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float('-inf'))
weights = F.softmax(weights.flatten(2), dim=-1).view(weights.size())
weights = self.dropout(weights)
return weights
def interpolate(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
"""Equivalent to nn.functional.interpolate, but with support for empty
batch sizes.
This will eventually be supported natively by PyTorch, and this class can
go away.
"""
if version.parse(torchvision.__version__) < version.parse('0.7'):
if input.numel() > 0:
return torch.nn.functional.interpolate(input, size, scale_factor,
mode, align_corners)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor,
mode, align_corners)
| 59,405 | 44.979876 | 119 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/relation_head.py | import copy
import itertools
import mmcv
import numpy as np
import torch
import torch.nn.functional as F
from mmcv.runner import BaseModule
from mmdet.core import bbox2roi
from mmdet.models import HEADS, builder
from mmdet.models.losses import accuracy
from .approaches import (FrequencyBias, PostProcessor, RelationSampler,
get_weak_key_rel_labels)
@HEADS.register_module()
class RelationHead(BaseModule):
"""The basic class of all the relation head."""
def __init__(
self,
object_classes,
predicate_classes,
head_config,
bbox_roi_extractor=None,
relation_roi_extractor=None,
relation_sampler=None,
relation_ranker=None,
dataset_config=None,
use_bias=False,
use_statistics=False,
num_classes=151,
num_predicates=51,
loss_object=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_relation=None,
init_cfg=None,
):
"""The public parameters that shared by various relation heads are
initialized here."""
super(RelationHead, self).__init__(init_cfg)
self.use_bias = use_bias
self.num_classes = num_classes
self.num_predicates = num_predicates
# upgrade some submodule attribute to this head
self.head_config = head_config
self.use_gt_box = self.head_config.use_gt_box
self.use_gt_label = self.head_config.use_gt_label
self.with_visual_bbox = (bbox_roi_extractor is not None
and bbox_roi_extractor.with_visual_bbox) or (
relation_roi_extractor is not None and
relation_roi_extractor.with_visual_bbox)
self.with_visual_mask = (bbox_roi_extractor is not None
and bbox_roi_extractor.with_visual_mask) or (
relation_roi_extractor is not None and
relation_roi_extractor.with_visual_mask)
self.with_visual_point = (bbox_roi_extractor is not None and
bbox_roi_extractor.with_visual_point) or (
relation_roi_extractor is not None and
relation_roi_extractor.with_visual_point)
self.dataset_config = dataset_config
if self.use_gt_box:
if self.use_gt_label:
self.mode = 'predcls'
else:
self.mode = 'sgcls'
else:
self.mode = 'sgdet'
if bbox_roi_extractor is not None:
self.bbox_roi_extractor = builder.build_roi_extractor(
bbox_roi_extractor)
if relation_roi_extractor is not None:
self.relation_roi_extractor = builder.build_roi_extractor(
relation_roi_extractor)
if relation_sampler is not None:
relation_sampler.update(dict(use_gt_box=self.use_gt_box))
self.relation_sampler = RelationSampler(**relation_sampler)
self.post_processor = PostProcessor()
# relation ranker: a standard component
if relation_ranker is not None:
ranker = relation_ranker.pop('type')
# self.supervised_form = relation_ranker.pop('supervised_form')
self.comb_factor = relation_ranker.pop('comb_factor', 0.5)
self.area_form = relation_ranker.pop('area_form', 'rect')
loss_ranking_relation = relation_ranker.pop('loss')
self.loss_ranking_relation = builder.build_loss(
loss_ranking_relation)
if loss_ranking_relation.type != 'CrossEntropyLoss':
num_out = 1
else:
num_out = 2
relation_ranker.update(dict(num_out=num_out))
self.relation_ranker = eval(ranker)(**relation_ranker)
if loss_object is not None:
self.loss_object = builder.build_loss(loss_object)
if loss_relation is not None:
self.loss_relation = builder.build_loss(loss_relation)
if use_statistics:
cache_dir = dataset_config['cache']
self.statistics = torch.load(cache_dir,
map_location=torch.device('cpu'))
print('\n Statistics loaded!')
self.obj_classes, self.rel_classes = (
object_classes,
predicate_classes,
)
self.obj_classes.insert(0, '__background__')
self.rel_classes.insert(0, '__background__')
assert self.num_classes == len(self.obj_classes)
assert self.num_predicates == len(self.rel_classes)
if self.use_bias:
assert self.with_statistics
# convey statistics into FrequencyBias to avoid loading again
self.freq_bias = FrequencyBias(self.head_config, self.statistics)
@property
def with_bbox_roi_extractor(self):
return (hasattr(self, 'bbox_roi_extractor')
and self.bbox_roi_extractor is not None)
@property
def with_relation_roi_extractor(self):
return (hasattr(self, 'relation_roi_extractor')
and self.relation_roi_extractor is not None)
@property
def with_statistics(self):
return hasattr(self, 'statistics') and self.statistics is not None
@property
def with_bias(self):
return hasattr(self, 'freq_bias') and self.freq_bias is not None
@property
def with_loss_object(self):
return hasattr(self, 'loss_object') and self.loss_object is not None
@property
def with_loss_relation(self):
return hasattr(self,
'loss_relation') and self.loss_relation is not None
@property
def with_relation_ranker(self):
return hasattr(self,
'relation_ranker') and self.relation_ranker is not None
def init_weights(self):
if self.with_bbox_roi_extractor:
self.bbox_roi_extractor.init_weights()
if self.with_relation_roi_extractor:
self.relation_roi_extractor.init_weights()
self.context_layer.init_weights()
def frontend_features(self, img, img_meta, det_result, gt_result):
bboxes, masks, points = (
det_result.bboxes,
det_result.masks,
copy.deepcopy(det_result.points),
)
# train/val or: for finetuning on the dataset without
# relationship annotations
if gt_result is not None and gt_result.rels is not None:
if self.mode in ['predcls', 'sgcls']:
sample_function = self.relation_sampler.gtbox_relsample
else:
sample_function = self.relation_sampler.detect_relsample
sample_res = sample_function(det_result, gt_result)
if len(sample_res) == 4:
rel_labels, rel_pair_idxes, rel_matrix, \
key_rel_labels = sample_res
else:
rel_labels, rel_pair_idxes, rel_matrix = sample_res
key_rel_labels = None
else:
rel_labels, rel_matrix, key_rel_labels = None, None, None
rel_pair_idxes = self.relation_sampler.prepare_test_pairs(
det_result)
det_result.rel_pair_idxes = rel_pair_idxes
det_result.relmaps = rel_matrix
det_result.target_rel_labels = rel_labels
det_result.target_key_rel_labels = key_rel_labels
rois = bbox2roi(bboxes)
# merge image-wise masks or points
if masks is not None:
masks = list(itertools.chain(*masks))
if points is not None:
aug_points = []
for pts_list in points:
for pts in pts_list:
pts = pts.view(-1, 2) # (:, [x, y])
pts += torch.from_numpy(
np.random.normal(0, 0.02, size=pts.shape)).to(pts)
# pts -= torch.mean(pts, dim=0, keepdim=True)
pts /= torch.max(torch.sqrt(torch.sum(pts**2, dim=1)))
aug_points.append(pts)
points = aug_points
# extract the unary roi features and union roi features.
roi_feats = self.bbox_roi_extractor(img,
img_meta,
rois,
masks=masks,
points=points)
union_feats = self.relation_roi_extractor(img,
img_meta,
rois,
rel_pair_idx=rel_pair_idxes,
masks=masks,
points=points)
return roi_feats + union_feats + (det_result, )
# return roi_feats, union_feats, (det_result,)
def forward(self, **kwargs):
raise NotImplementedError
def relation_ranking_forward(self, input, det_result, gt_result, num_rels,
is_testing):
# predict the ranking
# tensor
ranking_scores = self.relation_ranker(
input.detach(), det_result, self.relation_roi_extractor.union_rois)
# (1) weak supervision, KLDiv:
if self.loss_ranking_relation.__class__.__name__ == 'KLDivLoss':
if not is_testing: # include training and validation
# list form
det_result.target_key_rel_labels = get_weak_key_rel_labels(
det_result, gt_result, self.comb_factor, self.area_form)
ranking_scores = ranking_scores.view(-1)
ranking_scores = ranking_scores.split(num_rels, 0)
else:
ranking_scores = ranking_scores.view(-1)
ranking_scores = torch.sigmoid(ranking_scores).split(num_rels,
dim=0)
# (2) CEloss: the predicted one is the binary classification, 2 columns
if self.loss_ranking_relation.__class__.__name__ == 'CrossEntropyLoss':
if not is_testing:
det_result.target_key_rel_labels = torch.cat(
det_result.target_key_rel_labels, dim=-1)
else:
ranking_scores = (F.softmax(ranking_scores,
dim=-1)[:, 1].view(-1).split(
num_rels, 0))
# Margin loss, DR loss
elif self.loss_ranking_relation.__class__.__name__ == 'SigmoidDRLoss':
if not is_testing:
ranking_scores = ranking_scores.view(-1)
ranking_scores = ranking_scores.split(num_rels, 0)
else:
ranking_scores = ranking_scores.view(-1)
ranking_scores = torch.sigmoid(ranking_scores).split(num_rels,
dim=0)
det_result.ranking_scores = ranking_scores
return det_result
def loss(self, det_result):
(
obj_scores,
rel_scores,
target_labels,
target_rel_labels,
add_for_losses,
head_spec_losses,
) = (
det_result.refine_scores,
det_result.rel_scores,
det_result.target_labels,
det_result.target_rel_labels,
det_result.add_losses,
det_result.head_spec_losses,
)
losses = dict()
if self.with_loss_object and obj_scores is not None:
# fix: the val process
if isinstance(target_labels, (tuple, list)):
target_labels = torch.cat(target_labels, dim=-1)
if isinstance(obj_scores, (tuple, list)):
obj_scores = torch.cat(obj_scores, dim=0)
losses['loss_object'] = self.loss_object(obj_scores, target_labels)
losses['acc_object'] = accuracy(obj_scores, target_labels)
if self.with_loss_relation and rel_scores is not None:
if isinstance(target_rel_labels, (tuple, list)):
target_rel_labels = torch.cat(target_rel_labels, dim=-1)
if isinstance(rel_scores, (tuple, list)):
rel_scores = torch.cat(rel_scores, dim=0)
losses['loss_relation'] = self.loss_relation(
rel_scores, target_rel_labels)
losses['acc_relation'] = accuracy(rel_scores, target_rel_labels)
if self.with_relation_ranker:
target_key_rel_labels = det_result.target_key_rel_labels
ranking_scores = det_result.ranking_scores
avg_factor = (torch.nonzero(
target_key_rel_labels != -1).view(-1).size(0) if isinstance(
target_key_rel_labels, torch.Tensor) else None)
losses['loss_ranking_relation'] = self.loss_ranking_relation(
ranking_scores, target_key_rel_labels, avg_factor=avg_factor)
# if self.supervised_form == 'weak':
# # use the KLdiv loss: the label is the soft distribution
# bs = 0
# losses['loss_ranking_relation'] = 0
# for ranking_score, target_key_rel_label in zip(ranking_scores, target_key_rel_labels):
# bs += ranking_score.size(0)
# losses['loss_ranking_relation'] += torch.nn.KLDivLoss(reduction='none')(F.log_softmax(ranking_score, dim=-1),
# target_key_rel_label).sum(-1)
# losses['loss_ranking_relation'] = losses['loss_ranking_relation'] / bs
# else:
# #TODO: firstly try the CE loss function, or you may try the margin loss
# #TODO: Check the margin loss
# #loss_func = builder.build_loss(self.loss_ranking_relation)
# losses['loss_ranking_relation'] = self.loss_ranking_relation(ranking_scores, target_key_rel_labels)
if add_for_losses is not None:
for loss_key, loss_item in add_for_losses.items():
if isinstance(loss_item, list): # loss_vctree_binary
loss_ = [
F.binary_cross_entropy_with_logits(l[0], l[1])
for l in loss_item
]
loss_ = sum(loss_) / len(loss_)
losses[loss_key] = loss_
elif isinstance(loss_item, tuple):
if isinstance(loss_item[1], (list, tuple)):
target = torch.cat(loss_item[1], -1)
else:
target = loss_item[1]
losses[loss_key] = F.cross_entropy(loss_item[0], target)
else:
raise NotImplementedError
if head_spec_losses is not None:
# this losses have been calculated in the specific relation head
losses.update(head_spec_losses)
return losses
def get_result(self, det_result, scale_factor, rescale, key_first=False):
"""for test forward.
:param det_result:
:return:
"""
result = self.post_processor(det_result, key_first=key_first)
for k, v in result.__dict__.items():
if (k != 'add_losses' and k != 'head_spec_losses' and v is not None
and len(v) > 0):
_v = v[0] # remove the outer list
if isinstance(_v, torch.Tensor):
result.__setattr__(k, _v.cpu().numpy())
elif isinstance(_v, list): # for mask
result.__setattr__(k, [__v.cpu().numpy() for __v in _v])
else:
result.__setattr__(k, _v) # e.g., img_shape, is a tuple
if rescale:
if result.bboxes is not None:
result.bboxes[:, :4] = result.bboxes[:, :4] / scale_factor
if result.refine_bboxes is not None:
result.refine_bboxes[:, :
4] = result.refine_bboxes[:, :
4] / scale_factor
if result.masks is not None:
resize_masks = []
for bbox, mask in zip(result.refine_bboxes, result.masks):
_bbox = bbox.astype(np.int32)
w = max(_bbox[2] - _bbox[0] + 1, 1)
h = max(_bbox[3] - _bbox[1] + 1, 1)
resize_masks.append(
mmcv.imresize(mask.astype(np.uint8), (w, h)))
result.masks = resize_masks
if result.points is not None:
resize_points = []
for points in result.points:
resize_points.append(points / scale_factor)
result.points = resize_points
# if needed, adjust the form for object detection evaluation
result.formatted_bboxes, result.formatted_masks = [], []
if result.refine_bboxes is None:
result.formatted_bboxes = [
np.zeros((0, 5), dtype=np.float32)
for i in range(self.num_classes - 1)
]
else:
result.formatted_bboxes = [
result.refine_bboxes[result.refine_labels == i + 1, :]
for i in range(self.num_classes - 1)
]
if result.masks is None:
result.formatted_masks = [[] for i in range(self.num_classes - 1)]
else:
result.formatted_masks = [[] for i in range(self.num_classes - 1)]
for i in range(len(result.masks)):
result.formatted_masks[result.refine_labels[i] - 1].append(
result.masks[i])
# to save the space, drop the saliency maps, if it exists
if result.saliency_maps is not None:
result.saliency_maps = None
return result
def process_ignore_objects(self, input, ignore_classes):
"""An API used in inference stage for processing the data when some
object classes should be ignored."""
ignored_input = input.clone()
ignored_input[:, ignore_classes] = 0.0
return ignored_input
| 18,539 | 41.136364 | 131 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/vctree_head.py | # ---------------------------------------------------------------
# vctree_head.py
# Set-up time: 2020/6/4 上午9:35
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init, xavier_init
from mmdet.models import HEADS
from .approaches import VCTreeLSTMContext
from .relation_head import RelationHead
@HEADS.register_module()
class VCTreeHead(RelationHead):
def __init__(self, **kwargs):
super(VCTreeHead, self).__init__(**kwargs)
self.context_layer = VCTreeLSTMContext(self.head_config,
self.obj_classes,
self.rel_classes)
# post decoding
self.use_vision = self.head_config.use_vision
self.hidden_dim = self.head_config.hidden_dim
self.context_pooling_dim = self.head_config.context_pooling_dim
self.post_emb = nn.Linear(self.hidden_dim, self.hidden_dim * 2)
self.post_cat = nn.Linear(self.hidden_dim * 2,
self.context_pooling_dim)
self.rel_compress = nn.Linear(self.context_pooling_dim,
self.num_predicates,
bias=True)
if self.context_pooling_dim != self.head_config.roi_dim:
self.union_single_not_match = True
self.up_dim = nn.Linear(self.head_config.roi_dim,
self.context_pooling_dim)
else:
self.union_single_not_match = False
def init_weights(self):
self.bbox_roi_extractor.init_weights()
self.relation_roi_extractor.init_weights()
self.context_layer.init_weights()
normal_init(self.post_emb,
mean=0,
std=10.0 * (1.0 / self.hidden_dim)**0.5)
xavier_init(self.post_cat)
xavier_init(self.rel_compress)
if self.union_single_not_match:
xavier_init(self.up_dim)
def forward(self,
img,
img_meta,
det_result,
gt_result=None,
is_testing=False,
ignore_classes=None):
"""
Obtain the relation prediction results based on detection results.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_meta (list[dict]): list of image info dict where each dict has:
'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
det_result: (Result): Result containing bbox, label, mask, point, rels,
etc. According to different mode, all the contents have been
set correctly. Feel free to use it.
gt_result : (Result): The ground truth information.
is_testing:
Returns:
det_result with the following newly added keys:
refine_scores (list[Tensor]): logits of object
rel_scores (list[Tensor]): logits of relation
rel_pair_idxes (list[Tensor]): (num_rel, 2) index of subject and object
relmaps (list[Tensor]): (num_obj, num_obj):
target_rel_labels (list[Tensor]): the target relation label.
"""
roi_feats, union_feats, det_result = self.frontend_features(
img, img_meta, det_result, gt_result)
if roi_feats.shape[0] == 0:
return det_result
refine_obj_scores, obj_preds, edge_ctx, binary_preds = self.context_layer(
roi_feats, det_result)
# post decode
edge_rep = F.relu(self.post_emb(edge_ctx))
edge_rep = edge_rep.view(edge_rep.size(0), 2, self.hidden_dim)
head_rep = edge_rep[:, 0].contiguous().view(-1, self.hidden_dim)
tail_rep = edge_rep[:, 1].contiguous().view(-1, self.hidden_dim)
num_rels = [r.shape[0] for r in det_result.rel_pair_idxes]
num_objs = [len(b) for b in det_result.bboxes]
assert len(num_rels) == len(num_objs)
head_reps = head_rep.split(num_objs, dim=0)
tail_reps = tail_rep.split(num_objs, dim=0)
obj_preds = obj_preds.split(num_objs, dim=0)
prod_reps = []
pair_preds = []
for pair_idx, head_rep, tail_rep, obj_pred in zip(
det_result.rel_pair_idxes, head_reps, tail_reps, obj_preds):
prod_reps.append(
torch.cat((head_rep[pair_idx[:, 0]], tail_rep[pair_idx[:, 1]]),
dim=-1))
pair_preds.append(
torch.stack(
(obj_pred[pair_idx[:, 0]], obj_pred[pair_idx[:, 1]]),
dim=1))
prod_rep = torch.cat(prod_reps, dim=0)
pair_pred = torch.cat(pair_preds, dim=0)
prod_rep = self.post_cat(prod_rep)
if self.use_vision:
if self.union_single_not_match:
prod_rep = prod_rep * self.up_dim(union_feats)
else:
prod_rep = prod_rep * union_feats
rel_scores = self.rel_compress(prod_rep)
if self.use_bias:
rel_scores = rel_scores + self.freq_bias.index_with_labels(
pair_pred.long())
# make some changes: list to tensor or tensor to tuple
if self.training:
det_result.target_labels = torch.cat(det_result.target_labels,
dim=-1)
det_result.target_rel_labels = torch.cat(
det_result.target_rel_labels, dim=-1)
else:
refine_obj_scores = refine_obj_scores.split(num_objs, dim=0)
rel_scores = rel_scores.split(num_rels, dim=0)
det_result.refine_scores = refine_obj_scores
det_result.rel_scores = rel_scores
# add additional auxiliary loss
add_for_losses = {}
if not is_testing:
binary_loss_items = []
for bi_gt, bi_pred in zip(det_result.relmaps, binary_preds):
bi_gt = (bi_gt > 0).float()
binary_loss_items.append((bi_pred, bi_gt))
add_for_losses['loss_vctree_binary'] = binary_loss_items
det_result.add_losses = add_for_losses
# ranking prediction:
if self.with_relation_ranker:
det_result = self.relation_ranking_forward(prod_rep, det_result,
gt_result, num_rels,
is_testing)
return det_result
| 7,052 | 40.005814 | 87 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/psgtr_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import time
from collections import defaultdict
from inspect import signature
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from mmcv.cnn import Conv2d, Linear, build_activation_layer
from mmcv.cnn.bricks.transformer import FFN, build_positional_encoding
from mmcv.ops import batched_nms
from mmcv.runner import force_fp32
from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh,
build_assigner, build_sampler, multi_apply,
reduce_mean)
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.dense_heads import AnchorFreeHead
from mmdet.models.utils import build_transformer
#####imports for tools
from packaging import version
if version.parse(torchvision.__version__) < version.parse('0.7'):
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
@HEADS.register_module()
class PSGTrHead(AnchorFreeHead):
_version = 2
def __init__(
self,
num_classes,
in_channels,
num_relations,
object_classes,
predicate_classes,
use_mask=True,
num_query=100,
num_reg_fcs=2,
transformer=None,
n_heads=8,
swin_backbone=None,
sync_cls_avg_factor=False,
bg_cls_weight=0.02,
positional_encoding=dict(type='SinePositionalEncoding',
num_feats=128,
normalize=True),
sub_loss_cls=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
sub_loss_bbox=dict(type='L1Loss', loss_weight=5.0),
sub_loss_iou=dict(type='GIoULoss', loss_weight=2.0),
sub_focal_loss=dict(type='BCEFocalLoss', loss_weight=1.0),
sub_dice_loss=dict(type='DiceLoss', loss_weight=1.0),
obj_loss_cls=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
obj_loss_bbox=dict(type='L1Loss', loss_weight=5.0),
obj_loss_iou=dict(type='GIoULoss', loss_weight=2.0),
obj_focal_loss=dict(type='BCEFocalLoss', loss_weight=1.0),
obj_dice_loss=dict(type='DiceLoss', loss_weight=1.0),
rel_loss_cls=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=2.0,
class_weight=1.0),
train_cfg=dict(assigner=dict(
type='HTriMatcher',
s_cls_cost=dict(type='ClassificationCost', weight=1.),
s_reg_cost=dict(type='BBoxL1Cost', weight=5.0),
s_iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0),
o_cls_cost=dict(type='ClassificationCost', weight=1.),
o_reg_cost=dict(type='BBoxL1Cost', weight=5.0),
o_iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0),
r_cls_cost=dict(type='ClassificationCost', weight=2.))),
test_cfg=dict(max_per_img=100),
init_cfg=None,
**kwargs):
super(AnchorFreeHead, self).__init__(init_cfg)
self.sync_cls_avg_factor = sync_cls_avg_factor
# NOTE following the official DETR rep0, bg_cls_weight means
# relative classification weight of the no-object class.
assert isinstance(bg_cls_weight, float), 'Expected ' \
'bg_cls_weight to have type float. Found ' \
f'{type(bg_cls_weight)}.'
self.bg_cls_weight = bg_cls_weight
assert isinstance(use_mask, bool), 'Expected ' \
'use_mask to have type bool. Found ' \
f'{type(use_mask)}.'
self.use_mask = use_mask
s_class_weight = sub_loss_cls.get('class_weight', None)
assert isinstance(s_class_weight, float), 'Expected ' \
'class_weight to have type float. Found ' \
f'{type(s_class_weight)}.'
s_class_weight = torch.ones(num_classes + 1) * s_class_weight
#NOTE set background class as the last indice
s_class_weight[-1] = bg_cls_weight
sub_loss_cls.update({'class_weight': s_class_weight})
o_class_weight = obj_loss_cls.get('class_weight', None)
assert isinstance(o_class_weight, float), 'Expected ' \
'class_weight to have type float. Found ' \
f'{type(o_class_weight)}.'
o_class_weight = torch.ones(num_classes + 1) * o_class_weight
#NOTE set background class as the last indice
o_class_weight[-1] = bg_cls_weight
obj_loss_cls.update({'class_weight': o_class_weight})
r_class_weight = rel_loss_cls.get('class_weight', None)
assert isinstance(r_class_weight, float), 'Expected ' \
'class_weight to have type float. Found ' \
f'{type(r_class_weight)}.'
r_class_weight = torch.ones(num_relations + 1) * r_class_weight
#NOTE set background class as the first indice for relations as they are 1-based
r_class_weight[0] = bg_cls_weight
rel_loss_cls.update({'class_weight': r_class_weight})
if 'bg_cls_weight' in rel_loss_cls:
rel_loss_cls.pop('bg_cls_weight')
if train_cfg:
assert 'assigner' in train_cfg, 'assigner should be provided '\
'when train_cfg is set.'
assigner = train_cfg['assigner']
assert sub_loss_cls['loss_weight'] == assigner['s_cls_cost']['weight'], \
'The classification weight for loss and matcher should be' \
'exactly the same.'
assert obj_loss_cls['loss_weight'] == assigner['o_cls_cost']['weight'], \
'The classification weight for loss and matcher should be' \
'exactly the same.'
assert rel_loss_cls['loss_weight'] == assigner['r_cls_cost']['weight'], \
'The classification weight for loss and matcher should be' \
'exactly the same.'
assert sub_loss_bbox['loss_weight'] == assigner['s_reg_cost'][
'weight'], 'The regression L1 weight for loss and matcher ' \
'should be exactly the same.'
assert obj_loss_bbox['loss_weight'] == assigner['o_reg_cost'][
'weight'], 'The regression L1 weight for loss and matcher ' \
'should be exactly the same.'
assert sub_loss_iou['loss_weight'] == assigner['s_iou_cost']['weight'], \
'The regression iou weight for loss and matcher should be' \
'exactly the same.'
assert obj_loss_iou['loss_weight'] == assigner['o_iou_cost']['weight'], \
'The regression iou weight for loss and matcher should be' \
'exactly the same.'
self.assigner = build_assigner(assigner)
# following DETR sampling=False, so use PseudoSampler
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.num_query = num_query
self.num_classes = num_classes
self.num_relations = num_relations
self.object_classes = object_classes
self.predicate_classes = predicate_classes
self.in_channels = in_channels
self.num_reg_fcs = num_reg_fcs
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.fp16_enabled = False
self.swin = swin_backbone
self.obj_loss_cls = build_loss(obj_loss_cls)
self.obj_loss_bbox = build_loss(obj_loss_bbox)
self.obj_loss_iou = build_loss(obj_loss_iou)
self.sub_loss_cls = build_loss(sub_loss_cls)
self.sub_loss_bbox = build_loss(sub_loss_bbox)
self.sub_loss_iou = build_loss(sub_loss_iou)
if self.use_mask:
# self.obj_focal_loss = build_loss(obj_focal_loss)
self.obj_dice_loss = build_loss(obj_dice_loss)
# self.sub_focal_loss = build_loss(sub_focal_loss)
self.sub_dice_loss = build_loss(sub_dice_loss)
self.rel_loss_cls = build_loss(rel_loss_cls)
if self.obj_loss_cls.use_sigmoid:
self.obj_cls_out_channels = num_classes
else:
self.obj_cls_out_channels = num_classes + 1
if self.sub_loss_cls.use_sigmoid:
self.sub_cls_out_channels = num_classes
else:
self.sub_cls_out_channels = num_classes + 1
if rel_loss_cls['use_sigmoid']:
self.rel_cls_out_channels = num_relations
else:
self.rel_cls_out_channels = num_relations + 1
self.act_cfg = transformer.get('act_cfg',
dict(type='ReLU', inplace=True))
self.activate = build_activation_layer(self.act_cfg)
self.positional_encoding = build_positional_encoding(
positional_encoding)
self.transformer = build_transformer(transformer)
self.n_heads = n_heads
self.embed_dims = self.transformer.embed_dims
assert 'num_feats' in positional_encoding
num_feats = positional_encoding['num_feats']
assert num_feats * 2 == self.embed_dims, 'embed_dims should' \
f' be exactly 2 times of num_feats. Found {self.embed_dims}' \
f' and {num_feats}.'
self._init_layers()
def _init_layers(self):
"""Initialize layers of the transformer head."""
self.input_proj = Conv2d(self.in_channels,
self.embed_dims,
kernel_size=1)
self.query_embed = nn.Embedding(self.num_query, self.embed_dims)
self.obj_cls_embed = Linear(self.embed_dims, self.obj_cls_out_channels)
self.obj_box_embed = MLP(self.embed_dims, self.embed_dims, 4, 3)
self.sub_cls_embed = Linear(self.embed_dims, self.sub_cls_out_channels)
self.sub_box_embed = MLP(self.embed_dims, self.embed_dims, 4, 3)
self.rel_cls_embed = Linear(self.embed_dims, self.rel_cls_out_channels)
if self.use_mask:
self.sub_bbox_attention = MHAttentionMap(self.embed_dims,
self.embed_dims,
self.n_heads,
dropout=0.0)
self.obj_bbox_attention = MHAttentionMap(self.embed_dims,
self.embed_dims,
self.n_heads,
dropout=0.0)
if not self.swin:
self.sub_mask_head = MaskHeadSmallConv(
self.embed_dims + self.n_heads, [1024, 512, 256],
self.embed_dims)
self.obj_mask_head = MaskHeadSmallConv(
self.embed_dims + self.n_heads, [1024, 512, 256],
self.embed_dims)
elif self.swin:
self.sub_mask_head = MaskHeadSmallConv(
self.embed_dims + self.n_heads, self.swin, self.embed_dims)
self.obj_mask_head = MaskHeadSmallConv(
self.embed_dims + self.n_heads, self.swin, self.embed_dims)
def init_weights(self):
"""Initialize weights of the transformer head."""
# The initialization for transformer is important
self.transformer.init_weights()
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""load checkpoints."""
version = local_metadata.get('version', None)
if (version is None or version < 2):
convert_dict = {
'.self_attn.': '.attentions.0.',
'.ffn.': '.ffns.0.',
'.multihead_attn.': '.attentions.1.',
'.decoder.norm.': '.decoder.post_norm.',
'.query_embedding.': '.query_embed.'
}
state_dict_keys = list(state_dict.keys())
for k in state_dict_keys:
for ori_key, convert_key in convert_dict.items():
if ori_key in k:
convert_key = k.replace(ori_key, convert_key)
state_dict[convert_key] = state_dict[k]
del state_dict[k]
super(AnchorFreeHead,
self)._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys,
unexpected_keys, error_msgs)
def forward(self, feats, img_metas):
# construct binary masks which used for the transformer.
# NOTE following the official DETR repo, non-zero values representing
# ignored positions, while zero values means valid positions.
last_features = feats[
-1] ####get feature outputs of intermediate layers
batch_size = last_features.size(0)
input_img_h, input_img_w = img_metas[0]['batch_input_shape']
masks = last_features.new_ones((batch_size, input_img_h, input_img_w))
for img_id in range(batch_size):
img_h, img_w, _ = img_metas[img_id]['img_shape']
masks[img_id, :img_h, :img_w] = 0
last_features = self.input_proj(last_features)
# interpolate masks to have the same spatial shape with feats
masks = F.interpolate(masks.unsqueeze(1),
size=last_features.shape[-2:]).to(
torch.bool).squeeze(1)
# position encoding
pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w]
# outs_dec: [nb_dec, bs, num_query, embed_dim]
outs_dec, memory = self.transformer(last_features, masks,
self.query_embed.weight, pos_embed)
sub_outputs_class = self.sub_cls_embed(outs_dec)
sub_outputs_coord = self.sub_box_embed(outs_dec).sigmoid()
obj_outputs_class = self.obj_cls_embed(outs_dec)
obj_outputs_coord = self.obj_box_embed(outs_dec).sigmoid()
all_cls_scores = dict(sub=sub_outputs_class, obj=obj_outputs_class)
rel_outputs_class = self.rel_cls_embed(outs_dec)
all_cls_scores['rel'] = rel_outputs_class
if self.use_mask:
###########for segmentation#################
sub_bbox_mask = self.sub_bbox_attention(outs_dec[-1],
memory,
mask=masks)
obj_bbox_mask = self.obj_bbox_attention(outs_dec[-1],
memory,
mask=masks)
sub_seg_masks = self.sub_mask_head(last_features, sub_bbox_mask,
[feats[2], feats[1], feats[0]])
outputs_sub_seg_masks = sub_seg_masks.view(batch_size,
self.num_query,
sub_seg_masks.shape[-2],
sub_seg_masks.shape[-1])
obj_seg_masks = self.obj_mask_head(last_features, obj_bbox_mask,
[feats[2], feats[1], feats[0]])
outputs_obj_seg_masks = obj_seg_masks.view(batch_size,
self.num_query,
obj_seg_masks.shape[-2],
obj_seg_masks.shape[-1])
all_bbox_preds = dict(sub=sub_outputs_coord,
obj=obj_outputs_coord,
sub_seg=outputs_sub_seg_masks,
obj_seg=outputs_obj_seg_masks)
else:
all_bbox_preds = dict(sub=sub_outputs_coord, obj=obj_outputs_coord)
return all_cls_scores, all_bbox_preds
@force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
def loss(self,
all_cls_scores_list,
all_bbox_preds_list,
gt_rels_list,
gt_bboxes_list,
gt_labels_list,
gt_masks_list,
img_metas,
gt_bboxes_ignore=None):
# NOTE defaultly only the outputs from the last feature scale is used.
all_cls_scores = all_cls_scores_list
all_bbox_preds = all_bbox_preds_list
assert gt_bboxes_ignore is None, \
'Only supports for gt_bboxes_ignore setting to None.'
all_s_cls_scores = all_cls_scores['sub']
all_o_cls_scores = all_cls_scores['obj']
all_s_bbox_preds = all_bbox_preds['sub']
all_o_bbox_preds = all_bbox_preds['obj']
num_dec_layers = len(all_s_cls_scores)
if self.use_mask:
all_s_mask_preds = all_bbox_preds['sub_seg']
all_o_mask_preds = all_bbox_preds['obj_seg']
all_s_mask_preds = [
all_s_mask_preds for _ in range(num_dec_layers)
]
all_o_mask_preds = [
all_o_mask_preds for _ in range(num_dec_layers)
]
all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]
all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
all_gt_rels_list = [gt_rels_list for _ in range(num_dec_layers)]
all_gt_bboxes_ignore_list = [
gt_bboxes_ignore for _ in range(num_dec_layers)
]
all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers)]
img_metas_list = [img_metas for _ in range(num_dec_layers)]
all_r_cls_scores = [None for _ in range(num_dec_layers)]
all_r_cls_scores = all_cls_scores['rel']
if self.use_mask:
# s_losses_cls, o_losses_cls, r_losses_cls, s_losses_bbox, o_losses_bbox, s_losses_iou, o_losses_iou, s_focal_losses, s_dice_losses, o_focal_losses, o_dice_losses = multi_apply(
# self.loss_single, all_s_cls_scores, all_o_cls_scores, all_r_cls_scores, all_s_bbox_preds, all_o_bbox_preds,
# all_s_mask_preds, all_o_mask_preds,
# all_gt_rels_list,all_gt_bboxes_list, all_gt_labels_list,
# all_gt_masks_list, img_metas_list,
# all_gt_bboxes_ignore_list)
s_losses_cls, o_losses_cls, r_losses_cls, s_losses_bbox, o_losses_bbox, s_losses_iou, o_losses_iou, s_dice_losses, o_dice_losses = multi_apply(
self.loss_single, all_s_cls_scores, all_o_cls_scores,
all_r_cls_scores, all_s_bbox_preds, all_o_bbox_preds,
all_s_mask_preds, all_o_mask_preds, all_gt_rels_list,
all_gt_bboxes_list, all_gt_labels_list, all_gt_masks_list,
img_metas_list, all_gt_bboxes_ignore_list)
else:
all_s_mask_preds = [None for _ in range(num_dec_layers)]
all_o_mask_preds = [None for _ in range(num_dec_layers)]
s_losses_cls, o_losses_cls, r_losses_cls, s_losses_bbox, o_losses_bbox, s_losses_iou, o_losses_iou, s_dice_losses, o_dice_losses = multi_apply(
self.loss_single, all_s_cls_scores, all_o_cls_scores,
all_r_cls_scores, all_s_bbox_preds, all_o_bbox_preds,
all_s_mask_preds, all_o_mask_preds, all_gt_rels_list,
all_gt_bboxes_list, all_gt_labels_list, all_gt_masks_list,
img_metas_list, all_gt_bboxes_ignore_list)
loss_dict = dict()
# loss from the last decoder layer
loss_dict['s_loss_cls'] = s_losses_cls[-1]
loss_dict['o_loss_cls'] = o_losses_cls[-1]
loss_dict['r_loss_cls'] = r_losses_cls[-1]
loss_dict['s_loss_bbox'] = s_losses_bbox[-1]
loss_dict['o_loss_bbox'] = o_losses_bbox[-1]
loss_dict['s_loss_iou'] = s_losses_iou[-1]
loss_dict['o_loss_iou'] = o_losses_iou[-1]
if self.use_mask:
# loss_dict['s_focal_losses'] = s_focal_losses[-1]
# loss_dict['o_focal_losses'] = o_focal_losses[-1]
loss_dict['s_dice_losses'] = s_dice_losses[-1]
loss_dict['o_dice_losses'] = o_dice_losses[-1]
# loss from other decoder layers
num_dec_layer = 0
for s_loss_cls_i, o_loss_cls_i, r_loss_cls_i, \
s_loss_bbox_i, o_loss_bbox_i, \
s_loss_iou_i, o_loss_iou_i in zip(s_losses_cls[:-1], o_losses_cls[:-1], r_losses_cls[:-1],
s_losses_bbox[:-1], o_losses_bbox[:-1],
s_losses_iou[:-1], o_losses_iou[:-1]):
loss_dict[f'd{num_dec_layer}.s_loss_cls'] = s_loss_cls_i
loss_dict[f'd{num_dec_layer}.o_loss_cls'] = o_loss_cls_i
loss_dict[f'd{num_dec_layer}.r_loss_cls'] = r_loss_cls_i
loss_dict[f'd{num_dec_layer}.s_loss_bbox'] = s_loss_bbox_i
loss_dict[f'd{num_dec_layer}.o_loss_bbox'] = o_loss_bbox_i
loss_dict[f'd{num_dec_layer}.s_loss_iou'] = s_loss_iou_i
loss_dict[f'd{num_dec_layer}.o_loss_iou'] = o_loss_iou_i
num_dec_layer += 1
return loss_dict
def loss_single(self,
s_cls_scores,
o_cls_scores,
r_cls_scores,
s_bbox_preds,
o_bbox_preds,
s_mask_preds,
o_mask_preds,
gt_rels_list,
gt_bboxes_list,
gt_labels_list,
gt_masks_list,
img_metas,
gt_bboxes_ignore_list=None):
num_imgs = s_cls_scores.size(0)
s_cls_scores_list = [s_cls_scores[i] for i in range(num_imgs)]
o_cls_scores_list = [o_cls_scores[i] for i in range(num_imgs)]
r_cls_scores_list = [r_cls_scores[i] for i in range(num_imgs)]
s_bbox_preds_list = [s_bbox_preds[i] for i in range(num_imgs)]
o_bbox_preds_list = [o_bbox_preds[i] for i in range(num_imgs)]
if self.use_mask:
s_mask_preds_list = [s_mask_preds[i] for i in range(num_imgs)]
o_mask_preds_list = [o_mask_preds[i] for i in range(num_imgs)]
else:
s_mask_preds_list = [None for i in range(num_imgs)]
o_mask_preds_list = [None for i in range(num_imgs)]
cls_reg_targets = self.get_targets(
s_cls_scores_list, o_cls_scores_list, r_cls_scores_list,
s_bbox_preds_list, o_bbox_preds_list, s_mask_preds_list,
o_mask_preds_list, gt_rels_list, gt_bboxes_list, gt_labels_list,
gt_masks_list, img_metas, gt_bboxes_ignore_list)
(s_labels_list, o_labels_list, r_labels_list, s_label_weights_list,
o_label_weights_list, r_label_weights_list, s_bbox_targets_list,
o_bbox_targets_list, s_bbox_weights_list, o_bbox_weights_list,
s_mask_targets_list, o_mask_targets_list, num_total_pos,
num_total_neg, s_mask_preds_list, o_mask_preds_list) = cls_reg_targets
s_labels = torch.cat(s_labels_list, 0)
o_labels = torch.cat(o_labels_list, 0)
r_labels = torch.cat(r_labels_list, 0)
s_label_weights = torch.cat(s_label_weights_list, 0)
o_label_weights = torch.cat(o_label_weights_list, 0)
r_label_weights = torch.cat(r_label_weights_list, 0)
s_bbox_targets = torch.cat(s_bbox_targets_list, 0)
o_bbox_targets = torch.cat(o_bbox_targets_list, 0)
s_bbox_weights = torch.cat(s_bbox_weights_list, 0)
o_bbox_weights = torch.cat(o_bbox_weights_list, 0)
if self.use_mask:
s_mask_targets = torch.cat(s_mask_targets_list,
0).float().flatten(1)
o_mask_targets = torch.cat(o_mask_targets_list,
0).float().flatten(1)
s_mask_preds = torch.cat(s_mask_preds_list, 0).flatten(1)
o_mask_preds = torch.cat(o_mask_preds_list, 0).flatten(1)
num_matches = o_mask_preds.shape[0]
# mask loss
# s_focal_loss = self.sub_focal_loss(s_mask_preds,s_mask_targets,num_matches)
s_dice_loss = self.sub_dice_loss(
s_mask_preds, s_mask_targets,
num_matches)
# o_focal_loss = self.obj_focal_loss(o_mask_preds,o_mask_targets,num_matches)
o_dice_loss = self.obj_dice_loss(
o_mask_preds, o_mask_targets,
num_matches)
else:
s_dice_loss = None
o_dice_loss = None
# classification loss
s_cls_scores = s_cls_scores.reshape(-1, self.sub_cls_out_channels)
o_cls_scores = o_cls_scores.reshape(-1, self.obj_cls_out_channels)
r_cls_scores = r_cls_scores.reshape(-1, self.rel_cls_out_channels)
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = num_total_pos * 1.0 + \
num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
s_cls_scores.new_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
###NOTE change cls_avg_factor for objects as we do not calculate object classification loss for unmatched ones
s_loss_cls = self.sub_loss_cls(s_cls_scores,
s_labels,
s_label_weights,
avg_factor=num_total_pos * 1.0)
o_loss_cls = self.obj_loss_cls(o_cls_scores,
o_labels,
o_label_weights,
avg_factor=num_total_pos * 1.0)
r_loss_cls = self.rel_loss_cls(r_cls_scores,
r_labels,
r_label_weights,
avg_factor=cls_avg_factor)
# Compute the average number of gt boxes across all gpus, for
# normalization purposes
num_total_pos = o_loss_cls.new_tensor([num_total_pos])
num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item()
# construct factors used for rescale bboxes
factors = []
for img_meta, bbox_pred in zip(img_metas, s_bbox_preds):
img_h, img_w, _ = img_meta['img_shape']
factor = bbox_pred.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0).repeat(
bbox_pred.size(0), 1)
factors.append(factor)
factors = torch.cat(factors, 0)
# DETR regress the relative position of boxes (cxcywh) in the image,
# thus the learning target is normalized by the image size. So here
# we need to re-scale them for calculating IoU loss
s_bbox_preds = s_bbox_preds.reshape(-1, 4)
s_bboxes = bbox_cxcywh_to_xyxy(s_bbox_preds) * factors
s_bboxes_gt = bbox_cxcywh_to_xyxy(s_bbox_targets) * factors
o_bbox_preds = o_bbox_preds.reshape(-1, 4)
o_bboxes = bbox_cxcywh_to_xyxy(o_bbox_preds) * factors
o_bboxes_gt = bbox_cxcywh_to_xyxy(o_bbox_targets) * factors
# regression IoU loss, defaultly GIoU loss
s_loss_iou = self.sub_loss_iou(s_bboxes,
s_bboxes_gt,
s_bbox_weights,
avg_factor=num_total_pos)
o_loss_iou = self.obj_loss_iou(o_bboxes,
o_bboxes_gt,
o_bbox_weights,
avg_factor=num_total_pos)
# regression L1 loss
s_loss_bbox = self.sub_loss_bbox(s_bbox_preds,
s_bbox_targets,
s_bbox_weights,
avg_factor=num_total_pos)
o_loss_bbox = self.obj_loss_bbox(o_bbox_preds,
o_bbox_targets,
o_bbox_weights,
avg_factor=num_total_pos)
# return s_loss_cls, o_loss_cls, r_loss_cls, s_loss_bbox, o_loss_bbox, s_loss_iou, o_loss_iou, s_focal_loss, s_dice_loss, o_focal_loss, o_dice_loss
return s_loss_cls, o_loss_cls, r_loss_cls, s_loss_bbox, o_loss_bbox, s_loss_iou, o_loss_iou, s_dice_loss, o_dice_loss
def get_targets(self,
s_cls_scores_list,
o_cls_scores_list,
r_cls_scores_list,
s_bbox_preds_list,
o_bbox_preds_list,
s_mask_preds_list,
o_mask_preds_list,
gt_rels_list,
gt_bboxes_list,
gt_labels_list,
gt_masks_list,
img_metas,
gt_bboxes_ignore_list=None):
assert gt_bboxes_ignore_list is None, \
'Only supports for gt_bboxes_ignore setting to None.'
num_imgs = len(s_cls_scores_list)
gt_bboxes_ignore_list = [
gt_bboxes_ignore_list for _ in range(num_imgs)
]
(s_labels_list, o_labels_list, r_labels_list, s_label_weights_list,
o_label_weights_list, r_label_weights_list, s_bbox_targets_list,
o_bbox_targets_list, s_bbox_weights_list, o_bbox_weights_list,
s_mask_targets_list, o_mask_targets_list, pos_inds_list,
neg_inds_list, s_mask_preds_list, o_mask_preds_list) = multi_apply(
self._get_target_single, s_cls_scores_list, o_cls_scores_list,
r_cls_scores_list, s_bbox_preds_list, o_bbox_preds_list,
s_mask_preds_list, o_mask_preds_list, gt_rels_list,
gt_bboxes_list, gt_labels_list, gt_masks_list, img_metas,
gt_bboxes_ignore_list)
num_total_pos = sum((inds.numel() for inds in pos_inds_list))
num_total_neg = sum((inds.numel() for inds in neg_inds_list))
return (s_labels_list, o_labels_list, r_labels_list,
s_label_weights_list, o_label_weights_list,
r_label_weights_list, s_bbox_targets_list, o_bbox_targets_list,
s_bbox_weights_list, o_bbox_weights_list, s_mask_targets_list,
o_mask_targets_list, num_total_pos, num_total_neg,
s_mask_preds_list, o_mask_preds_list)
def _get_target_single(self,
s_cls_score,
o_cls_score,
r_cls_score,
s_bbox_pred,
o_bbox_pred,
s_mask_preds,
o_mask_preds,
gt_rels,
gt_bboxes,
gt_labels,
gt_masks,
img_meta,
gt_bboxes_ignore=None):
""""Compute regression and classification targets for one image.
Outputs from a single decoder layer of a single feature level are used.
Args:
s_cls_score (Tensor): Subject box score logits from a single decoder layer
for one image. Shape [num_query, cls_out_channels].
o_cls_score (Tensor): Object box score logits from a single decoder layer
for one image. Shape [num_query, cls_out_channels].
r_cls_score (Tensor): Relation score logits from a single decoder layer
for one image. Shape [num_query, cls_out_channels].
s_bbox_pred (Tensor): Sigmoid outputs of Subject bboxes from a single decoder layer
for one image, with normalized coordinate (cx, cy, w, h) and
shape [num_query, 4].
o_bbox_pred (Tensor): Sigmoid outputs of object bboxes from a single decoder layer
for one image, with normalized coordinate (cx, cy, w, h) and
shape [num_query, 4].
s_mask_preds (Tensor): Logits before sigmoid subject masks from a single decoder layer
for one image, with shape [num_query, H, W].
o_mask_preds (Tensor): Logits before sigmoid object masks from a single decoder layer
for one image, with shape [num_query, H, W].
gt_rels (Tensor): Ground truth relation triplets for one image with
shape (num_gts, 3) in [gt_sub_id, gt_obj_id, gt_rel_class] format.
gt_bboxes (Tensor): Ground truth bboxes for one image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (Tensor): Ground truth class indices for one image
with shape (num_gts, ).
img_meta (dict): Meta information for one image.
gt_bboxes_ignore (Tensor, optional): Bounding boxes
which can be ignored. Default None.
Returns:
tuple[Tensor]: a tuple containing the following for one image.
- s/o/r_labels (Tensor): Labels of each image.
- s/o/r_label_weights (Tensor]): Label weights of each image.
- s/o_bbox_targets (Tensor): BBox targets of each image.
- s/o_bbox_weights (Tensor): BBox weights of each image.
- s/o_mask_targets (Tensor): Mask targets of each image.
- pos_inds (Tensor): Sampled positive indices for each image.
- neg_inds (Tensor): Sampled negative indices for each image.
- s/o_mask_preds (Tensor): Matched mask preds of each image.
"""
num_bboxes = s_bbox_pred.size(0)
gt_sub_bboxes = []
gt_obj_bboxes = []
gt_sub_labels = []
gt_obj_labels = []
gt_rel_labels = []
if self.use_mask:
gt_sub_masks = []
gt_obj_masks = []
assert len(gt_masks) == len(gt_bboxes)
for rel_id in range(gt_rels.size(0)):
gt_sub_bboxes.append(gt_bboxes[int(gt_rels[rel_id, 0])])
gt_obj_bboxes.append(gt_bboxes[int(gt_rels[rel_id, 1])])
gt_sub_labels.append(gt_labels[int(gt_rels[rel_id, 0])])
gt_obj_labels.append(gt_labels[int(gt_rels[rel_id, 1])])
gt_rel_labels.append(gt_rels[rel_id, 2])
if self.use_mask:
gt_sub_masks.append(gt_masks[int(gt_rels[rel_id,
0])].unsqueeze(0))
gt_obj_masks.append(gt_masks[int(gt_rels[rel_id,
1])].unsqueeze(0))
gt_sub_bboxes = torch.vstack(gt_sub_bboxes).type_as(gt_bboxes).reshape(
-1, 4)
gt_obj_bboxes = torch.vstack(gt_obj_bboxes).type_as(gt_bboxes).reshape(
-1, 4)
gt_sub_labels = torch.vstack(gt_sub_labels).type_as(gt_labels).reshape(
-1)
gt_obj_labels = torch.vstack(gt_obj_labels).type_as(gt_labels).reshape(
-1)
gt_rel_labels = torch.vstack(gt_rel_labels).type_as(gt_labels).reshape(
-1)
# assigner and sampler, only return subject&object assign result
s_assign_result, o_assign_result = self.assigner.assign(
s_bbox_pred, o_bbox_pred, s_cls_score, o_cls_score, r_cls_score,
gt_sub_bboxes, gt_obj_bboxes, gt_sub_labels, gt_obj_labels,
gt_rel_labels, img_meta, gt_bboxes_ignore)
s_sampling_result = self.sampler.sample(s_assign_result, s_bbox_pred,
gt_sub_bboxes)
o_sampling_result = self.sampler.sample(o_assign_result, o_bbox_pred,
gt_obj_bboxes)
pos_inds = o_sampling_result.pos_inds
neg_inds = o_sampling_result.neg_inds #### no-rel class indices in prediction
# label targets
s_labels = gt_sub_bboxes.new_full(
(num_bboxes, ), self.num_classes,
dtype=torch.long) ### 0-based, class [num_classes] as background
s_labels[pos_inds] = gt_sub_labels[
s_sampling_result.pos_assigned_gt_inds]
s_label_weights = gt_sub_bboxes.new_zeros(num_bboxes)
s_label_weights[pos_inds] = 1.0
o_labels = gt_obj_bboxes.new_full(
(num_bboxes, ), self.num_classes,
dtype=torch.long) ### 0-based, class [num_classes] as background
o_labels[pos_inds] = gt_obj_labels[
o_sampling_result.pos_assigned_gt_inds]
o_label_weights = gt_obj_bboxes.new_zeros(num_bboxes)
o_label_weights[pos_inds] = 1.0
r_labels = gt_obj_bboxes.new_full(
(num_bboxes, ), 0,
dtype=torch.long) ### 1-based, class 0 as background
r_labels[pos_inds] = gt_rel_labels[
o_sampling_result.pos_assigned_gt_inds]
r_label_weights = gt_obj_bboxes.new_ones(num_bboxes)
if self.use_mask:
gt_sub_masks = torch.cat(gt_sub_masks, axis=0).type_as(gt_masks[0])
gt_obj_masks = torch.cat(gt_obj_masks, axis=0).type_as(gt_masks[0])
assert gt_sub_masks.size() == gt_obj_masks.size()
# mask targets for subjects and objects
s_mask_targets = gt_sub_masks[
s_sampling_result.pos_assigned_gt_inds,
...]
s_mask_preds = s_mask_preds[pos_inds]
o_mask_targets = gt_obj_masks[
o_sampling_result.pos_assigned_gt_inds, ...]
o_mask_preds = o_mask_preds[pos_inds]
s_mask_preds = interpolate(s_mask_preds[:, None],
size=gt_sub_masks.shape[-2:],
mode='bilinear',
align_corners=False).squeeze(1)
o_mask_preds = interpolate(o_mask_preds[:, None],
size=gt_obj_masks.shape[-2:],
mode='bilinear',
align_corners=False).squeeze(1)
else:
s_mask_targets = None
s_mask_preds = None
o_mask_targets = None
o_mask_preds = None
# bbox targets for subjects and objects
s_bbox_targets = torch.zeros_like(s_bbox_pred)
s_bbox_weights = torch.zeros_like(s_bbox_pred)
s_bbox_weights[pos_inds] = 1.0
o_bbox_targets = torch.zeros_like(o_bbox_pred)
o_bbox_weights = torch.zeros_like(o_bbox_pred)
o_bbox_weights[pos_inds] = 1.0
img_h, img_w, _ = img_meta['img_shape']
# DETR regress the relative position of boxes (cxcywh) in the image.
# Thus the learning target should be normalized by the image size, also
# the box format should be converted from defaultly x1y1x2y2 to cxcywh.
factor = o_bbox_pred.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0)
pos_gt_s_bboxes_normalized = s_sampling_result.pos_gt_bboxes / factor
pos_gt_s_bboxes_targets = bbox_xyxy_to_cxcywh(
pos_gt_s_bboxes_normalized)
s_bbox_targets[pos_inds] = pos_gt_s_bboxes_targets
pos_gt_o_bboxes_normalized = o_sampling_result.pos_gt_bboxes / factor
pos_gt_o_bboxes_targets = bbox_xyxy_to_cxcywh(
pos_gt_o_bboxes_normalized)
o_bbox_targets[pos_inds] = pos_gt_o_bboxes_targets
return (s_labels, o_labels, r_labels, s_label_weights, o_label_weights,
r_label_weights, s_bbox_targets, o_bbox_targets,
s_bbox_weights, o_bbox_weights, s_mask_targets, o_mask_targets,
pos_inds, neg_inds, s_mask_preds, o_mask_preds
) ###return the interpolated predicted masks
# over-write because img_metas are needed as inputs for bbox_head.
def forward_train(self,
x,
img_metas,
gt_rels,
gt_bboxes,
gt_labels=None,
gt_masks=None,
gt_bboxes_ignore=None,
proposal_cfg=None,
**kwargs):
"""Forward function for training mode.
Args:
x (list[Tensor]): Features from backbone.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_rels (Tensor): Ground truth relation triplets for one image with
shape (num_gts, 3) in [gt_sub_id, gt_obj_id, gt_rel_class] format.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
proposal_cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert proposal_cfg is None, '"proposal_cfg" must be None'
outs = self(x, img_metas)
if gt_labels is None:
loss_inputs = outs + (gt_rels, gt_bboxes, gt_masks, img_metas)
else:
loss_inputs = outs + (gt_rels, gt_bboxes, gt_labels, gt_masks,
img_metas)
losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
@force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
def get_bboxes(self, cls_scores, bbox_preds, img_metas, rescale=False):
# NOTE defaultly only using outputs from the last feature level,
# and only the outputs from the last decoder layer is used.
result_list = []
for img_id in range(len(img_metas)):
s_cls_score = cls_scores['sub'][-1, img_id, ...]
o_cls_score = cls_scores['obj'][-1, img_id, ...]
r_cls_score = cls_scores['rel'][-1, img_id, ...]
s_bbox_pred = bbox_preds['sub'][-1, img_id, ...]
o_bbox_pred = bbox_preds['obj'][-1, img_id, ...]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
if self.use_mask:
s_mask_pred = bbox_preds['sub_seg'][img_id, ...]
o_mask_pred = bbox_preds['obj_seg'][img_id, ...]
else:
s_mask_pred = None
o_mask_pred = None
triplets = self._get_bboxes_single(s_cls_score, o_cls_score,
r_cls_score, s_bbox_pred,
o_bbox_pred, s_mask_pred,
o_mask_pred, img_shape,
scale_factor, rescale)
result_list.append(triplets)
return result_list
def _get_bboxes_single(self,
s_cls_score,
o_cls_score,
r_cls_score,
s_bbox_pred,
o_bbox_pred,
s_mask_pred,
o_mask_pred,
img_shape,
scale_factor,
rescale=False):
assert len(s_cls_score) == len(o_cls_score)
assert len(s_cls_score) == len(s_bbox_pred)
assert len(s_cls_score) == len(o_bbox_pred)
mask_size = (round(img_shape[0] / scale_factor[1]),
round(img_shape[1] / scale_factor[0]))
max_per_img = self.test_cfg.get('max_per_img', self.num_query)
assert self.sub_loss_cls.use_sigmoid == False
assert self.obj_loss_cls.use_sigmoid == False
assert self.rel_loss_cls.use_sigmoid == False
assert len(s_cls_score) == len(r_cls_score)
# 0-based label input for objects and self.num_classes as default background cls
s_logits = F.softmax(s_cls_score, dim=-1)[..., :-1]
o_logits = F.softmax(o_cls_score, dim=-1)[..., :-1]
s_scores, s_labels = s_logits.max(-1)
o_scores, o_labels = o_logits.max(-1)
r_lgs = F.softmax(r_cls_score, dim=-1)
r_logits = r_lgs[..., 1:]
r_scores, r_indexes = r_logits.reshape(-1).topk(max_per_img)
r_labels = r_indexes % self.num_relations + 1
triplet_index = r_indexes // self.num_relations
s_scores = s_scores[triplet_index]
s_labels = s_labels[triplet_index] + 1
s_bbox_pred = s_bbox_pred[triplet_index]
o_scores = o_scores[triplet_index]
o_labels = o_labels[triplet_index] + 1
o_bbox_pred = o_bbox_pred[triplet_index]
r_dists = r_lgs.reshape(
-1, self.num_relations +
1)[triplet_index] #### NOTE: to match the evaluation in vg
if self.use_mask:
s_mask_pred = s_mask_pred[triplet_index]
o_mask_pred = o_mask_pred[triplet_index]
s_mask_pred = F.interpolate(s_mask_pred.unsqueeze(1),
size=mask_size).squeeze(1)
o_mask_pred = F.interpolate(o_mask_pred.unsqueeze(1),
size=mask_size).squeeze(1)
s_mask_pred_logits = s_mask_pred
o_mask_pred_logits = o_mask_pred
s_mask_pred = torch.sigmoid(s_mask_pred) > 0.85
o_mask_pred = torch.sigmoid(o_mask_pred) > 0.85
### triplets deduplicate####
relation_classes = defaultdict(lambda: [])
for k, (s_l,o_l,r_l) in enumerate(zip(s_labels,o_labels,r_labels)):
relation_classes[(s_l.item(),o_l.item(),r_l.item())].append(k)
s_binary_masks = s_mask_pred.to(torch.float).flatten(1)
o_binary_masks = o_mask_pred.to(torch.float).flatten(1)
def dedup_triplets(triplets_ids, s_binary_masks, o_binary_masks, keep_tri):
while len(triplets_ids) > 1:
base_s_mask = s_binary_masks[triplets_ids[0]].unsqueeze(0)
base_o_mask = o_binary_masks[triplets_ids[0]].unsqueeze(0)
other_s_masks = s_binary_masks[triplets_ids[1:]]
other_o_masks = o_binary_masks[triplets_ids[1:]]
# calculate ious
s_ious = base_s_mask.mm(other_s_masks.transpose(0,1))/((base_s_mask+other_s_masks)>0).sum(-1)
o_ious = base_o_mask.mm(other_o_masks.transpose(0,1))/((base_o_mask+other_o_masks)>0).sum(-1)
ids_left = []
for s_iou, o_iou, other_id in zip(s_ious[0],o_ious[0],triplets_ids[1:]):
if (s_iou>0.5) & (o_iou>0.5):
keep_tri[other_id] = False
else:
ids_left.append(other_id)
triplets_ids = ids_left
return keep_tri
keep_tri = torch.ones_like(r_labels,dtype=torch.bool)
for triplets_ids in relation_classes.values():
if len(triplets_ids)>1:
keep_tri = dedup_triplets(triplets_ids, s_binary_masks, o_binary_masks, keep_tri)
s_labels = s_labels[keep_tri]
o_labels = o_labels[keep_tri]
s_mask_pred = s_mask_pred[keep_tri]
o_mask_pred = o_mask_pred[keep_tri]
complete_labels = torch.cat((s_labels, o_labels), 0)
output_masks = torch.cat((s_mask_pred, o_mask_pred), 0)
r_scores = r_scores[keep_tri]
r_labels = r_labels[keep_tri]
r_dists = r_dists[keep_tri]
rel_pairs = torch.arange(keep_tri.sum()*2,
dtype=torch.int).reshape(2, -1).T
complete_r_labels = r_labels
complete_r_dists = r_dists
s_binary_masks = s_binary_masks[keep_tri]
o_binary_masks = o_binary_masks[keep_tri]
s_mask_pred_logits = s_mask_pred_logits[keep_tri]
o_mask_pred_logits = o_mask_pred_logits[keep_tri]
###end triplets deduplicate####
#### for panoptic postprocessing ####
keep = (s_labels != (s_logits.shape[-1] - 1)) & (
o_labels != (s_logits.shape[-1] - 1)) & (
s_scores[keep_tri]>0.5) & (o_scores[keep_tri] > 0.5) & (r_scores > 0.3) ## the threshold is set to 0.85
r_scores = r_scores[keep]
r_labels = r_labels[keep]
r_dists = r_dists[keep]
labels = torch.cat((s_labels[keep], o_labels[keep]), 0) - 1
masks = torch.cat((s_mask_pred[keep], o_mask_pred[keep]), 0)
binary_masks = masks.to(torch.float).flatten(1)
s_mask_pred_logits = s_mask_pred_logits[keep]
o_mask_pred_logits = o_mask_pred_logits[keep]
mask_logits = torch.cat((s_mask_pred_logits, o_mask_pred_logits), 0)
h, w = masks.shape[-2:]
if labels.numel() == 0:
pan_img = torch.ones(mask_size).cpu().to(torch.long)
pan_masks = pan_img.unsqueeze(0).cpu().to(torch.long)
pan_rel_pairs = torch.arange(len(labels), dtype=torch.int).to(masks.device).reshape(2, -1).T
rels = torch.tensor([0,0,0]).view(-1,3)
pan_labels = torch.tensor([0])
else:
stuff_equiv_classes = defaultdict(lambda: [])
thing_classes = defaultdict(lambda: [])
thing_dedup = defaultdict(lambda: [])
for k, label in enumerate(labels):
if label.item() >= 80:
stuff_equiv_classes[label.item()].append(k)
else:
thing_classes[label.item()].append(k)
pan_rel_pairs = torch.arange(len(labels), dtype=torch.int).to(masks.device)
def dedup_things(pred_ids, binary_masks):
while len(pred_ids) > 1:
base_mask = binary_masks[pred_ids[0]].unsqueeze(0)
other_masks = binary_masks[pred_ids[1:]]
# calculate ious
ious = base_mask.mm(other_masks.transpose(0,1))/((base_mask+other_masks)>0).sum(-1)
ids_left = []
thing_dedup[pred_ids[0]].append(pred_ids[0])
for iou, other_id in zip(ious[0],pred_ids[1:]):
if iou>0.5:
thing_dedup[pred_ids[0]].append(other_id)
else:
ids_left.append(other_id)
pred_ids = ids_left
if len(pred_ids) == 1:
thing_dedup[pred_ids[0]].append(pred_ids[0])
# create dict that groups duplicate masks
for thing_pred_ids in thing_classes.values():
if len(thing_pred_ids) > 1:
dedup_things(thing_pred_ids, binary_masks)
else:
thing_dedup[thing_pred_ids[0]].append(thing_pred_ids[0])
def get_ids_area(masks, pan_rel_pairs, r_labels, r_dists, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
masks = masks.flatten(1)
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w),
dtype=torch.long,
device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
pan_rel_pairs[eq_id] = equiv[0]
# Merge the masks corresponding to the same thing instance
for equiv in thing_dedup.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
pan_rel_pairs[eq_id] = equiv[0]
m_ids_remain,_ = m_id.unique().sort()
pan_rel_pairs = pan_rel_pairs.reshape(2, -1).T
no_obj_filter = torch.zeros(pan_rel_pairs.shape[0],dtype=torch.bool)
for triplet_id in range(pan_rel_pairs.shape[0]):
if pan_rel_pairs[triplet_id,0] in m_ids_remain and pan_rel_pairs[triplet_id,1] in m_ids_remain:
no_obj_filter[triplet_id]=True
pan_rel_pairs = pan_rel_pairs[no_obj_filter]
r_labels, r_dists = r_labels[no_obj_filter], r_dists[no_obj_filter]
pan_labels = []
pan_masks = []
for i, m_id_remain in enumerate(m_ids_remain):
pan_masks.append(m_id.eq(m_id_remain).unsqueeze(0))
pan_labels.append(labels[m_id_remain].unsqueeze(0))
m_id.masked_fill_(m_id.eq(m_id_remain), i)
pan_rel_pairs.masked_fill_(pan_rel_pairs.eq(m_id_remain), i)
pan_masks = torch.cat(pan_masks, 0)
pan_labels = torch.cat(pan_labels, 0)
seg_img = m_id * INSTANCE_OFFSET + pan_labels[m_id]
seg_img = seg_img.view(h, w).cpu().to(torch.long)
m_id = m_id.view(h, w).cpu()
area = []
for i in range(len(masks)):
area.append(m_id.eq(i).sum().item())
return area, seg_img, pan_rel_pairs, pan_masks, r_labels, r_dists, pan_labels
area, pan_img, pan_rel_pairs, pan_masks, r_labels, r_dists, pan_labels = get_ids_area(mask_logits, pan_rel_pairs, r_labels, r_dists, dedup=True)
if r_labels.numel() == 0:
rels = torch.tensor([0,0,0]).view(-1,3)
else:
rels = torch.cat((pan_rel_pairs,r_labels.unsqueeze(-1)),-1)
# if labels.numel() > 0:
# # We know filter empty masks as long as we find some
# while True:
# filtered_small = torch.as_tensor(
# [area[i] <= 4 for i, c in enumerate(labels)],
# dtype=torch.bool,
# device=keep.device)
# if filtered_small.any().item():
# scores = scores[~filtered_small]
# labels = labels[~filtered_small]
# masks = masks[~filtered_small]
# area, pan_img = get_ids_area(masks, scores)
# else:
# break
s_det_bboxes = bbox_cxcywh_to_xyxy(s_bbox_pred)
s_det_bboxes[:, 0::2] = s_det_bboxes[:, 0::2] * img_shape[1]
s_det_bboxes[:, 1::2] = s_det_bboxes[:, 1::2] * img_shape[0]
s_det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1])
s_det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0])
if rescale:
s_det_bboxes /= s_det_bboxes.new_tensor(scale_factor)
s_det_bboxes = torch.cat((s_det_bboxes, s_scores.unsqueeze(1)), -1)
o_det_bboxes = bbox_cxcywh_to_xyxy(o_bbox_pred)
o_det_bboxes[:, 0::2] = o_det_bboxes[:, 0::2] * img_shape[1]
o_det_bboxes[:, 1::2] = o_det_bboxes[:, 1::2] * img_shape[0]
o_det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1])
o_det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0])
if rescale:
o_det_bboxes /= o_det_bboxes.new_tensor(scale_factor)
o_det_bboxes = torch.cat((o_det_bboxes, o_scores.unsqueeze(1)), -1)
det_bboxes = torch.cat((s_det_bboxes[keep_tri], o_det_bboxes[keep_tri]), 0)
if self.use_mask:
return det_bboxes, complete_labels, rel_pairs, output_masks, pan_rel_pairs, \
pan_img, complete_r_labels, complete_r_dists, r_labels, r_dists, pan_masks, rels, pan_labels
else:
return det_bboxes, labels, rel_pairs, r_labels, r_dists
def simple_test_bboxes(self, feats, img_metas, rescale=False):
# forward of this head requires img_metas
# start = time.time()
outs = self.forward(feats, img_metas)
# forward_time =time.time()
# print('------forward-----')
# print(forward_time - start)
results_list = self.get_bboxes(*outs, img_metas, rescale=rescale)
# print('-----get_bboxes-----')
# print(time.time() - forward_time)
return results_list
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN) Copied from
hoitr."""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def _expand(tensor, length: int):
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)
class MaskHeadSmallConv(nn.Module):
"""Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
inter_dims = [
dim, context_dim // 2, context_dim // 4, context_dim // 8,
context_dim // 16, context_dim // 64
]
self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)
self.gn1 = torch.nn.GroupNorm(8, dim)
self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)
self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])
self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])
self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])
self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)
self.dim = dim
self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x, bbox_mask, fpns):
x = torch.cat(
[_expand(x, bbox_mask.shape[1]),
bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = F.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = F.relu(x)
cur_fpn = self.adapter1(fpns[0])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay3(x)
x = self.gn3(x)
x = F.relu(x)
cur_fpn = self.adapter2(fpns[1])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay4(x)
x = self.gn4(x)
x = F.relu(x)
cur_fpn = self.adapter3(fpns[2])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay5(x)
x = self.gn5(x)
x = F.relu(x)
x = self.out_lay(x)
return x
class MHAttentionMap(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax
(no multiplication by value)"""
def __init__(self,
query_dim,
hidden_dim,
num_heads,
dropout=0.0,
bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads)**-0.5
def forward(self, q, k, mask=None):
q = self.q_linear(q)
k = F.conv2d(k,
self.k_linear.weight.unsqueeze(-1).unsqueeze(-1),
self.k_linear.bias)
qh = q.view(q.shape[0], q.shape[1], self.num_heads,
self.hidden_dim // self.num_heads)
kh = k.view(k.shape[0], self.num_heads,
self.hidden_dim // self.num_heads, k.shape[-2],
k.shape[-1])
weights = torch.einsum('bqnc,bnchw->bqnhw', qh * self.normalize_fact,
kh)
if mask is not None:
weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float('-inf'))
weights = F.softmax(weights.flatten(2), dim=-1).view(weights.size())
weights = self.dropout(weights)
return weights
def interpolate(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
"""Equivalent to nn.functional.interpolate, but with support for empty
batch sizes.
This will eventually be supported natively by PyTorch, and this class can
go away.
"""
if version.parse(torchvision.__version__) < version.parse('0.7'):
if input.numel() > 0:
return torch.nn.functional.interpolate(input, size, scale_factor,
mode, align_corners)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor,
mode, align_corners)
| 64,402 | 46.600148 | 189 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/detr4seg_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import time
from collections import defaultdict
from inspect import signature
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from mmcv.cnn import Conv2d, Linear, build_activation_layer
from mmcv.cnn.bricks.transformer import FFN, build_positional_encoding
from mmcv.ops import batched_nms
from mmcv.runner import force_fp32
from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh,
build_assigner, build_sampler, multi_apply,
reduce_mean)
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.dense_heads import AnchorFreeHead
from mmdet.models.utils import build_transformer
#####imports for tools
from packaging import version
if version.parse(torchvision.__version__) < version.parse('0.7'):
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
coco_id = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 92, 93, 95, 100, 107, 109,
112, 118, 119, 122, 125, 128, 130, 133, 138, 141, 144, 145, 147,
148, 149, 151, 154, 155, 156, 159, 161, 166, 168, 171, 175, 176,
177, 178, 180, 181, 184, 185, 186, 187, 188, 189, 190, 191, 192,
193, 194, 195, 196, 197, 198, 199, 200)
####default: 0-index with last index as the background class
@HEADS.register_module()
class detr4segHead(AnchorFreeHead):
_version = 2
def __init__(
self,
num_classes,
in_channels,
object_classes,
num_query=100,
num_reg_fcs=2,
transformer=None,
n_heads=8,
swin_backbone=None,
sync_cls_avg_factor=False,
bg_cls_weight=0.1,
positional_encoding=dict(type='SinePositionalEncoding',
num_feats=128,
normalize=True),
loss_cls=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
focal_loss=dict(type='BCEFocalLoss', loss_weight=1.0),
dice_loss=dict(type='DiceLoss', loss_weight=1.0),
train_cfg=dict(assigner=dict(
type='HungarianAssigner',
cls_cost=dict(type='ClassificationCost', weight=1.),
reg_cost=dict(type='BBoxL1Cost', weight=5.0),
iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))),
test_cfg=dict(max_per_img=100),
init_cfg=None,
**kwargs):
super(AnchorFreeHead, self).__init__(init_cfg)
self.sync_cls_avg_factor = sync_cls_avg_factor
# NOTE following the official DETR rep0, bg_cls_weight means
# relative classification weight of the no-object class.
assert isinstance(bg_cls_weight, float), 'Expected ' \
'bg_cls_weight to have type float. Found ' \
f'{type(bg_cls_weight)}.'
self.bg_cls_weight = bg_cls_weight
class_weight = loss_cls.get('class_weight', None)
assert isinstance(class_weight, float), 'Expected ' \
'class_weight to have type float. Found ' \
f'{type(class_weight)}.'
class_weight = torch.ones(num_classes + 1) * class_weight
# set background class as the last indice
class_weight[num_classes] = bg_cls_weight
loss_cls.update({'class_weight': class_weight})
if train_cfg:
assert 'assigner' in train_cfg, 'assigner should be provided '\
'when train_cfg is set.'
assigner = train_cfg['assigner']
assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \
'The classification weight for loss and matcher should be' \
'exactly the same.'
assert loss_bbox['loss_weight'] == assigner['reg_cost'][
'weight'], 'The regression L1 weight for loss and matcher ' \
'should be exactly the same.'
assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \
'The regression iou weight for loss and matcher should be' \
'exactly the same.'
self.assigner = build_assigner(assigner)
# DETR sampling=False, so use PseudoSampler
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.num_query = num_query
self.num_classes = num_classes
self.in_channels = in_channels
self.num_reg_fcs = num_reg_fcs
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.fp16_enabled = False
self.swin = swin_backbone
self.CLASSES = object_classes
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_iou = build_loss(loss_iou)
self.focal_loss = build_loss(focal_loss)
self.dice_loss = build_loss(dice_loss)
if self.loss_cls.use_sigmoid:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
self.act_cfg = transformer.get('act_cfg',
dict(type='ReLU', inplace=True))
self.activate = build_activation_layer(self.act_cfg)
self.positional_encoding = build_positional_encoding(
positional_encoding)
self.transformer = build_transformer(transformer)
self.n_heads = n_heads
self.embed_dims = self.transformer.embed_dims
assert 'num_feats' in positional_encoding
num_feats = positional_encoding['num_feats']
assert num_feats * 2 == self.embed_dims, 'embed_dims should' \
f' be exactly 2 times of num_feats. Found {self.embed_dims}' \
f' and {num_feats}.'
self._init_layers()
def _init_layers(self):
"""Initialize layers of the transformer head."""
self.input_proj = Conv2d(self.in_channels,
self.embed_dims,
kernel_size=1)
self.query_embed = nn.Embedding(self.num_query, self.embed_dims)
self.class_embed = Linear(self.embed_dims, self.cls_out_channels)
self.bbox_embed = MLP(self.embed_dims, self.embed_dims, 4, 3)
self.bbox_attention = MHAttentionMap(self.embed_dims,
self.embed_dims,
self.n_heads,
dropout=0.0)
if not self.swin:
self.mask_head = MaskHeadSmallConv(self.embed_dims + self.n_heads,
[1024, 512, 256],
self.embed_dims)
elif self.swin:
self.mask_head = MaskHeadSmallConv(self.embed_dims + self.n_heads,
self.swin, self.embed_dims)
def init_weights(self):
"""Initialize weights of the transformer head."""
# The initialization for transformer is important
self.transformer.init_weights()
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""load checkpoints."""
version = local_metadata.get('version', None)
if (version is None or version < 2):
convert_dict = {
'.self_attn.': '.attentions.0.',
'.ffn.': '.ffns.0.',
'.multihead_attn.': '.attentions.1.',
'.decoder.norm.': '.decoder.post_norm.',
'.query_embedding.': '.query_embed.'
}
state_dict_keys = list(state_dict.keys())
for k in state_dict_keys:
for ori_key, convert_key in convert_dict.items():
if ori_key in k:
convert_key = k.replace(ori_key, convert_key)
state_dict[convert_key] = state_dict[k]
del state_dict[k]
super(AnchorFreeHead,
self)._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys,
unexpected_keys, error_msgs)
def forward(self, feats, img_metas):
"""Forward function.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
img_metas (list[dict]): List of image information.
Returns:
all_cls_scores [dict[Tensor]]: Outputs from the classification heads(human,object,action),
shape [nb_dec, bs, num_query, cls_out_channels]. Note
cls_out_channels should includes background.
all_bbox_preds [dict[Tensor]]: Sigmoid outputs from the regression
heads(human,object) with normalized coordinate format (cx, cy, w, h).
Shape [nb_dec, bs, num_query, 4].
"""
# construct binary masks which used for the transformer.
# NOTE following the official DETR repo, non-zero values representing
# ignored positions, while zero values means valid positions.
last_features = feats[
-1] ####get feature outputs of intermediate layers
batch_size = last_features.size(0)
input_img_h, input_img_w = img_metas[0]['batch_input_shape']
masks = last_features.new_ones((batch_size, input_img_h, input_img_w))
for img_id in range(batch_size):
img_h, img_w, _ = img_metas[img_id]['img_shape']
masks[img_id, :img_h, :img_w] = 0
last_features = self.input_proj(last_features)
# interpolate masks to have the same spatial shape with feats
masks = F.interpolate(masks.unsqueeze(1),
size=last_features.shape[-2:]).to(
torch.bool).squeeze(1)
# position encoding
pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w]
# outs_dec: [nb_dec, bs, num_query, embed_dim]
outs_dec, memory = self.transformer(last_features, masks,
self.query_embed.weight, pos_embed)
outputs_class = self.class_embed(outs_dec)
outputs_coord = self.bbox_embed(outs_dec).sigmoid()
all_cls_scores = outputs_class
###########for segmentation#################
bbox_mask = self.bbox_attention(outs_dec[-1], memory, mask=masks)
seg_masks = self.mask_head(last_features, bbox_mask,
[feats[2], feats[1], feats[0]])
seg_masks = seg_masks.view(batch_size, self.num_query,
seg_masks.shape[-2], seg_masks.shape[-1])
all_bbox_preds = dict(bbox=outputs_coord, masks=seg_masks)
return all_cls_scores, all_bbox_preds
@force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
def loss(self,
all_cls_scores_list,
all_bbox_preds_list,
gt_bboxes_list,
gt_labels_list,
gt_masks_list,
img_metas,
gt_bboxes_ignore=None):
""""Loss function.
Only outputs from the last feature level are used for computing
losses by default.
Args:
all_cls_scores_list (list[dict[Tensor]]): Classification outputs
for each feature level. Each is a 4D-tensor with shape
[nb_dec, bs, num_query, cls_out_channels].
all_bbox_preds_list (list[dict[Tensor]]): Sigmoid regression
outputs for each feature level. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and shape
[nb_dec, bs, num_query, 4].
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
img_metas (list[dict]): List of image meta information.
gt_bboxes_ignore (list[Tensor], optional): Bounding boxes
which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# NOTE defaultly only the outputs from the last feature scale is used.
all_cls_scores = all_cls_scores_list
all_bbox_preds = all_bbox_preds_list
assert gt_bboxes_ignore is None, \
'Only supports for gt_bboxes_ignore setting to None.'
all_mask_preds = all_bbox_preds['masks']
all_bbox_preds = all_bbox_preds['bbox']
num_dec_layers = len(all_cls_scores)
all_mask_preds = [all_mask_preds for _ in range(num_dec_layers)]
all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]
all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
all_gt_bboxes_ignore_list = [
gt_bboxes_ignore for _ in range(num_dec_layers)
]
all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers)]
img_metas_list = [img_metas for _ in range(num_dec_layers)]
losses_cls, losses_bbox, losses_iou, dice_losses, focal_losses = multi_apply(
self.loss_single, all_cls_scores, all_bbox_preds, all_mask_preds,
all_gt_bboxes_list, all_gt_labels_list, all_gt_masks_list,
img_metas_list, all_gt_bboxes_ignore_list)
loss_dict = dict()
# loss from the last decoder layer
loss_dict['loss_cls'] = losses_cls[-1]
loss_dict['loss_bbox'] = losses_bbox[-1]
loss_dict['loss_iou'] = losses_iou[-1]
loss_dict['focal_losses'] = focal_losses[-1]
loss_dict['dice_losses'] = dice_losses[-1]
# loss from other decoder layers
num_dec_layer = 0
for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1],
losses_bbox[:-1],
losses_iou[:-1]):
loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i
loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i
num_dec_layer += 1
return loss_dict
def loss_single(self,
cls_scores,
bbox_preds,
mask_preds,
gt_bboxes_list,
gt_labels_list,
gt_masks_list,
img_metas,
gt_bboxes_ignore_list=None):
""""Loss function for outputs from a single decoder layer of a single
feature level.
Args:
cls_scores dict[Tensor]: Score logits from a single decoder layer
for all images. Shape [bs, num_query, cls_out_channels].
bbox_preds dict[Tensor]: Sigmoid outputs from a single decoder layer
for all images, with normalized coordinate (cx, cy, w, h) and
shape [bs, num_query, 4].
gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
img_metas (list[dict]): List of image meta information.
gt_bboxes_ignore_list (list[Tensor], optional): Bounding
boxes which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components for outputs from
a single decoder layer.
"""
num_imgs = cls_scores.size(0)
cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)]
mask_preds_list = [mask_preds[i] for i in range(num_imgs)]
cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list,
mask_preds_list, gt_bboxes_list,
gt_labels_list, gt_masks_list,
img_metas, gt_bboxes_ignore_list)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
mask_targets_list, num_total_pos, num_total_neg,
mask_preds_list) = cls_reg_targets
labels = torch.cat(labels_list, 0)
label_weights = torch.cat(label_weights_list, 0)
bbox_targets = torch.cat(bbox_targets_list, 0)
bbox_weights = torch.cat(bbox_weights_list, 0)
mask_targets = torch.cat(mask_targets_list, 0).float().flatten(1)
mask_preds = torch.cat(mask_preds_list, 0).flatten(1)
num_matches = mask_preds.shape[0]
# mask loss
focal_loss = self.focal_loss(mask_preds, mask_targets, num_matches)
dice_loss = self.dice_loss(
mask_preds, mask_targets,
num_matches) #,s_mask_weights,avg_factor=num_total_pos)
# classification loss
cls_scores = cls_scores.reshape(-1, self.cls_out_channels)
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = num_total_pos * 1.0 + \
num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
cls_scores.new_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
loss_cls = self.loss_cls(cls_scores,
labels,
label_weights,
avg_factor=cls_avg_factor)
# Compute the average number of gt boxes across all gpus, for
# normalization purposes
num_total_pos = loss_cls.new_tensor([num_total_pos])
num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item()
# construct factors used for rescale bboxes
factors = []
for img_meta, bbox_pred in zip(img_metas, bbox_preds):
img_h, img_w, _ = img_meta['img_shape']
factor = bbox_pred.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0).repeat(
bbox_pred.size(0), 1)
factors.append(factor)
factors = torch.cat(factors, 0)
# DETR regress the relative position of boxes (cxcywh) in the image,
# thus the learning target is normalized by the image size. So here
# we need to re-scale them for calculating IoU loss
bbox_preds = bbox_preds.reshape(-1, 4)
bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors
bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors
# regression IoU loss, defaultly GIoU loss
loss_iou = self.loss_iou(bboxes,
bboxes_gt,
bbox_weights,
avg_factor=num_total_pos)
# regression L1 loss
loss_bbox = self.loss_bbox(bbox_preds,
bbox_targets,
bbox_weights,
avg_factor=num_total_pos)
return loss_cls, loss_bbox, loss_iou, dice_loss, focal_loss
def get_targets(self,
cls_scores_list,
bbox_preds_list,
mask_preds_list,
gt_bboxes_list,
gt_labels_list,
gt_masks_list,
img_metas,
gt_bboxes_ignore_list=None):
assert gt_bboxes_ignore_list is None, \
'Only supports for gt_bboxes_ignore setting to None.'
num_imgs = len(cls_scores_list)
gt_bboxes_ignore_list = [
gt_bboxes_ignore_list for _ in range(num_imgs)
]
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
mask_targets_list, pos_inds_list, neg_inds_list,
mask_preds_list) = multi_apply(self._get_target_single,
cls_scores_list, bbox_preds_list,
mask_preds_list, gt_bboxes_list,
gt_labels_list, gt_masks_list,
img_metas, gt_bboxes_ignore_list)
num_total_pos = sum((inds.numel() for inds in pos_inds_list))
num_total_neg = sum((inds.numel() for inds in neg_inds_list))
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, mask_targets_list, num_total_pos,
num_total_neg, mask_preds_list)
def _get_target_single(self,
cls_score,
bbox_pred,
mask_preds,
gt_bboxes,
gt_labels,
gt_masks,
img_meta,
gt_bboxes_ignore=None):
num_bboxes = bbox_pred.size(0)
assert len(gt_masks) == len(gt_bboxes)
# print('o_pred mask shape after interpolating')
# print(o_mask_preds.shape)
# assigner and sampler, only return human&object assign result
assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes,
gt_labels, img_meta,
gt_bboxes_ignore)
sampling_result = self.sampler.sample(assign_result, bbox_pred,
gt_bboxes)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds #### no-rel class indices in prediction
# label targets
labels = gt_bboxes.new_full((num_bboxes, ),
self.num_classes,
dtype=torch.long) ### 0-based
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
label_weights = gt_bboxes.new_ones(num_bboxes)
# mask targets for subjects and objects
mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds,
...] ###FIXME some transform might be needed
mask_preds = mask_preds[pos_inds]
mask_preds = interpolate(mask_preds[:, None],
size=gt_masks.shape[-2:],
mode='bilinear',
align_corners=False).squeeze(1)
# bbox targets for subjects and objects
bbox_targets = torch.zeros_like(bbox_pred)
bbox_weights = torch.zeros_like(bbox_pred)
bbox_weights[pos_inds] = 1.0
img_h, img_w, _ = img_meta['img_shape']
# DETR regress the relative position of boxes (cxcywh) in the image.
# Thus the learning target should be normalized by the image size, also
# the box format should be converted from defaultly x1y1x2y2 to cxcywh.
factor = bbox_pred.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0)
pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor
pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized)
bbox_targets[pos_inds] = pos_gt_bboxes_targets
# print('---single--')
# print(s_mask_targets.shape)
return (labels, label_weights, bbox_targets, bbox_weights,
mask_targets, pos_inds, neg_inds, mask_preds
) ###return the interpolated predicted masks
# over-write because img_metas are needed as inputs for bbox_head.
def forward_train(self,
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_masks=None,
gt_bboxes_ignore=None,
proposal_cfg=None,
**kwargs):
"""Forward function for training mode.
Args:
x (list[Tensor]): Features from backbone.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
proposal_cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert proposal_cfg is None, '"proposal_cfg" must be None'
outs = self(x, img_metas)
if gt_labels is None:
loss_inputs = outs + (gt_bboxes, gt_masks, img_metas)
else:
loss_inputs = outs + (gt_bboxes, gt_labels, gt_masks, img_metas)
losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
@force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
def get_bboxes(self, cls_scores, bbox_preds, img_metas, rescale=False):
"""NOTE:Transform network outputs for a batch into psg predictions, but
still use the name of get_bboxes for now.
Args:
all_cls_scores_list (list[Tensor]): Classification outputs
for each feature level. Each is a 4D-tensor with shape
[nb_dec, bs, num_query, cls_out_channels].
all_bbox_preds_list (list[Tensor]): Sigmoid regression
outputs for each feature level. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and shape
[nb_dec, bs, num_query, 4].
img_metas (list[dict]): Meta information of each image.
rescale (bool, optional): If True, return boxes in original
image space. Default False.
Returns:
list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \
The first item is an (n, 5) tensor, where the first 4 columns \
are bounding box positions (tl_x, tl_y, br_x, br_y) and the \
5-th column is a score between 0 and 1. The second item is a \
(n,) tensor where each item is the predicted class label of \
the corresponding box.
"""
# NOTE defaultly only using outputs from the last feature level,
# and only the outputs from the last decoder layer is used.
result_list = []
for img_id in range(len(img_metas)):
cls_score = cls_scores[-1, img_id, ...]
bbox_pred = bbox_preds['bbox'][-1, img_id, ...]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
mask_pred = bbox_preds['masks'][img_id, ...]
triplets = self._get_bboxes_single(cls_score, bbox_pred, mask_pred,
img_shape, scale_factor,
rescale)
result_list.append(triplets)
return result_list
def _get_bboxes_single(self,
cls_score,
bbox_pred,
mask_pred,
img_shape,
scale_factor,
rescale=False):
"""Transform outputs from the last decoder layer into bbox predictions
for each image.
Args:
h_cls_score/o_cls_score/i_cls_score (dict[Tensor]): Box score logits from the last decoder layer
for each image. Each tensor shape [num_query, h/o/i_cls_out_channels].
h_bbox_pred/o_bbox_pred (dict[Tensor]): Sigmoid outputs from the last decoder layer
for each image, each tensor with coordinate format (cx, cy, w, h) and
shape [num_query, 4].
img_shape (tuple[int]): Shape of input image, (height, width, 3).
scale_factor (ndarray, optional): Scale factor of the image arange
as (w_scale, h_scale, w_scale, h_scale).
rescale (bool, optional): If True, return boxes in original image
space. Default False.
Returns:
tuple[Tensor]: Results of detected bboxes and labels.
- det_bboxes: Predicted bboxes with shape [num_query, 5], \
where the first 4 columns are bounding box positions \
(tl_x, tl_y, br_x, br_y) and the 5-th column are scores \
between 0 and 1.
- det_labels: Predicted labels of the corresponding box with \
shape [num_query].
"""
mask_size = (round(img_shape[0] / scale_factor[1]),
round(img_shape[1] / scale_factor[0]))
max_per_img = self.test_cfg.get('max_per_img', self.num_query)
# 1-based label input and 0 as default background cls
logits = F.softmax(cls_score, dim=-1)
scores, labels = logits.max(-1)
scores, bbox_index = scores.topk(max_per_img)
bbox_pred = bbox_pred[bbox_index]
labels = labels[bbox_index]
mask_pred = mask_pred[bbox_index]
keep = (labels != logits.shape[-1] - 1) & (
scores > 0.85) ## the threshold is set to 0.85
bbox_pred = bbox_pred[keep]
det_labels = labels[keep]
det_masks = mask_pred[keep]
scores = scores[keep]
det_masks = F.interpolate(det_masks.unsqueeze(1),
size=mask_size,
mode='bilinear').squeeze(1)
h, w = det_masks.shape[-2:]
assert len(det_labels) == len(bbox_pred)
det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred)
det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1]
det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0]
det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1])
det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0])
if rescale:
det_bboxes /= det_bboxes.new_tensor(scale_factor)
det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1)
bbox_labels = det_labels
if det_labels.numel() == 0:
pan_img = torch.ones(mask_size).cpu().to(torch.long)
return det_bboxes, bbox_labels, pan_img
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
det_masks = det_masks.flatten(1)
stuff_equiv_classes = defaultdict(lambda: [])
for k, label in enumerate(det_labels):
if label.item() >= 80:
stuff_equiv_classes[label.item()].append(k)
def get_ids_area(masks, scores, dedup=False):
# This helper function creates the final panoptic segmentation image
# It also returns the area of the masks that appears on the image
m_id = masks.transpose(0, 1).softmax(-1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = torch.zeros((h, w),
dtype=torch.long,
device=m_id.device)
else:
m_id = m_id.argmax(-1).view(h, w)
if dedup:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
if len(equiv) > 1:
for eq_id in equiv:
m_id.masked_fill_(m_id.eq(eq_id), equiv[0])
seg_img = m_id * INSTANCE_OFFSET + det_labels[m_id]
seg_img = seg_img.view(h, w).cpu().to(torch.long)
m_id = m_id.view(h, w).cpu()
area = []
for i in range(len(scores)):
area.append(m_id.eq(i).sum().item())
return area, seg_img
area, pan_img = get_ids_area(det_masks, scores, dedup=True)
if det_labels.numel() > 0:
# We know filter empty masks as long as we find some
while True:
filtered_small = torch.as_tensor(
[area[i] <= 4 for i, c in enumerate(det_labels)],
dtype=torch.bool,
device=keep.device)
if filtered_small.any().item():
scores = scores[~filtered_small]
det_labels = det_labels[~filtered_small]
det_masks = det_masks[~filtered_small]
area, pan_img = get_ids_area(det_masks, scores)
else:
break
return det_bboxes, bbox_labels, pan_img
def simple_test_bboxes(self, feats, img_metas, rescale=False):
"""Test det bboxes without test-time augmentation.
Args:
feats (tuple[torch.Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[tuple[Tensor]]: Each item in result_list is 6-tuple:
bbox, labels, bbox, labels, scores, labels
The first item is ``bboxes`` with shape (n, 5),
where 5 represent (tl_x, tl_y, br_x, br_y, score).
The shape of the second tensor in the tuple is ``labels``
with shape (n,)
"""
# forward of this head requires img_metas
outs = self.forward(feats, img_metas)
results_list = self.get_bboxes(*outs, img_metas, rescale=rescale)
return results_list
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN) Copied from
hoitr."""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def _expand(tensor, length: int):
return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)
class MaskHeadSmallConv(nn.Module):
"""Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
inter_dims = [
dim, context_dim // 2, context_dim // 4, context_dim // 8,
context_dim // 16, context_dim // 64
]
self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)
self.gn1 = torch.nn.GroupNorm(8, dim)
self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)
self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])
self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])
self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])
self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])
self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)
self.dim = dim
self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
nn.init.constant_(m.bias, 0)
def forward(self, x, bbox_mask, fpns):
x = torch.cat(
[_expand(x, bbox_mask.shape[1]),
bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = F.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = F.relu(x)
cur_fpn = self.adapter1(fpns[0])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay3(x)
x = self.gn3(x)
x = F.relu(x)
cur_fpn = self.adapter2(fpns[1])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay4(x)
x = self.gn4(x)
x = F.relu(x)
cur_fpn = self.adapter3(fpns[2])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode='nearest')
x = self.lay5(x)
x = self.gn5(x)
x = F.relu(x)
x = self.out_lay(x)
return x
class MHAttentionMap(nn.Module):
"""This is a 2D attention module, which only returns the attention softmax
(no multiplication by value)"""
def __init__(self,
query_dim,
hidden_dim,
num_heads,
dropout=0.0,
bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)
nn.init.zeros_(self.k_linear.bias)
nn.init.zeros_(self.q_linear.bias)
nn.init.xavier_uniform_(self.k_linear.weight)
nn.init.xavier_uniform_(self.q_linear.weight)
self.normalize_fact = float(hidden_dim / self.num_heads)**-0.5
def forward(self, q, k, mask=None):
q = self.q_linear(q)
k = F.conv2d(k,
self.k_linear.weight.unsqueeze(-1).unsqueeze(-1),
self.k_linear.bias)
qh = q.view(q.shape[0], q.shape[1], self.num_heads,
self.hidden_dim // self.num_heads)
kh = k.view(k.shape[0], self.num_heads,
self.hidden_dim // self.num_heads, k.shape[-2],
k.shape[-1])
weights = torch.einsum('bqnc,bnchw->bqnhw', qh * self.normalize_fact,
kh)
if mask is not None:
weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float('-inf'))
weights = F.softmax(weights.flatten(2), dim=-1).view(weights.size())
weights = self.dropout(weights)
return weights
def interpolate(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None):
"""Equivalent to nn.functional.interpolate, but with support for empty
batch sizes.
This will eventually be supported natively by PyTorch, and this class can
go away.
"""
if version.parse(torchvision.__version__) < version.parse('0.7'):
if input.numel() > 0:
return torch.nn.functional.interpolate(input, size, scale_factor,
mode, align_corners)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor,
mode, align_corners)
| 41,053 | 43.049356 | 108 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/vctree_util.py | # ---------------------------------------------------------------
# vctree_util.py
# Set-up time: 2020/6/4 下午3:43
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
from collections import Counter
import torch
def generate_forest(pair_scores, det_result):
"""
generate a list of trees that covers all the objects in a batch
det_result:
pair_scores: [obj_num, obj_num]
output: list of trees, each present a chunk of overlapping objects
"""
output_forest = [
] # the list of trees, each one is a chunk of overlapping objects
labels = det_result.labels
bboxes = det_result.bboxes
for pair_score, label, bbox in zip(
pair_scores,
labels,
bboxes,
):
num_obj = pair_score.shape[0]
obj_label = label
assert pair_score.shape[0] == obj_label.shape[0]
assert pair_score.shape[0] == pair_score.shape[1]
node_scores = pair_score.mean(1).view(-1)
root_idx = int(node_scores.max(-1)[1])
root = ArbitraryTree(root_idx,
float(node_scores[root_idx]),
int(obj_label[root_idx]),
bbox[root_idx],
is_root=True)
node_container = []
remain_index = []
# put all nodes into node container
for idx in list(range(num_obj)):
if idx == root_idx:
continue
new_node = ArbitraryTree(idx, float(node_scores[idx]),
int(obj_label[idx]), bbox[idx])
node_container.append(new_node)
remain_index.append(idx)
# iteratively generate tree
gen_tree(node_container, pair_score, node_scores, root, remain_index)
output_forest.append(root)
return output_forest
def gen_tree(node_container, pair_score, node_scores, root, remain_index):
"""Step 1: Divide all nodes into left child container and right child
container Step 2: From left child container and right child container,
select their respective sub roots.
pair_scores: [obj_num, obj_num]
node_scores: [obj_num]
"""
num_nodes = len(node_container)
device = pair_score.device
# Step 0
if num_nodes == 0:
return
# Step 1
select_node = []
select_index = []
select_node.append(root)
select_index.append(root.index)
while len(node_container) > 0:
wid = len(remain_index)
select_indexs = torch.tensor(select_index,
device=device,
dtype=torch.int64)
remain_indexs = torch.tensor(remain_index,
device=device,
dtype=torch.int64)
select_score_map = pair_score[select_indexs][:, remain_indexs].view(-1)
best_id = select_score_map.max(0)[1]
depend_id = int(best_id) // wid
insert_id = int(best_id) % wid
best_depend_node = select_node[depend_id]
best_insert_node = node_container[insert_id]
best_depend_node.add_child(best_insert_node)
select_node.append(best_insert_node)
select_index.append(best_insert_node.index)
node_container.remove(best_insert_node)
remain_index.remove(best_insert_node.index)
def arbForest_to_biForest(forest):
"""
forest: a set of arbitrary Tree
output: a set of corresponding binary Tree
"""
output = []
for i in range(len(forest)):
result_tree = arTree_to_biTree(forest[i])
output.append(result_tree)
return output
def arTree_to_biTree(arTree):
root_node = arTree.generate_bi_tree()
arNode_to_biNode(arTree, root_node)
return root_node
def arNode_to_biNode(arNode, biNode):
if arNode.get_child_num() >= 1:
new_bi_node = arNode.children[0].generate_bi_tree()
biNode.add_left_child(new_bi_node)
arNode_to_biNode(arNode.children[0], biNode.left_child)
if arNode.get_child_num() > 1:
current_bi_node = biNode.left_child
for i in range(arNode.get_child_num() - 1):
new_bi_node = arNode.children[i + 1].generate_bi_tree()
current_bi_node.add_right_child(new_bi_node)
current_bi_node = current_bi_node.right_child
arNode_to_biNode(arNode.children[i + 1], current_bi_node)
def find_best_node(node_container):
max_node_score = -1
best_node = None
for i in range(len(node_container)):
if node_container[i].score > max_node_score:
max_node_score = node_container[i].score
best_node = node_container[i]
return best_node
class BasicBiTree(object):
def __init__(self, idx, is_root=False):
self.index = int(idx)
self.is_root = is_root
self.left_child = None
self.right_child = None
self.parent = None
self.num_child = 0
def add_left_child(self, child):
if self.left_child is not None:
print('Left child already exist')
return
child.parent = self
self.num_child += 1
self.left_child = child
def add_right_child(self, child):
if self.right_child is not None:
print('Right child already exist')
return
child.parent = self
self.num_child += 1
self.right_child = child
def get_total_child(self):
sum = 0
sum += self.num_child
if self.left_child is not None:
sum += self.left_child.get_total_child()
if self.right_child is not None:
sum += self.right_child.get_total_child()
return sum
def depth(self):
if hasattr(self, '_depth'):
return self._depth
if self.parent is None:
count = 1
else:
count = self.parent.depth() + 1
self._depth = count
return self._depth
def max_depth(self):
if hasattr(self, '_max_depth'):
return self._max_depth
count = 0
if self.left_child is not None:
left_depth = self.left_child.max_depth()
if left_depth > count:
count = left_depth
if self.right_child is not None:
right_depth = self.right_child.max_depth()
if right_depth > count:
count = right_depth
count += 1
self._max_depth = count
return self._max_depth
# by index
def is_descendant(self, idx):
left_flag = False
right_flag = False
# node is left child
if self.left_child is not None:
if self.left_child.index is idx:
return True
else:
left_flag = self.left_child.is_descendant(idx)
# node is right child
if self.right_child is not None:
if self.right_child.index is idx:
return True
else:
right_flag = self.right_child.is_descendant(idx)
# node is descendant
if left_flag or right_flag:
return True
else:
return False
# whether input node is under left sub tree
def is_left_descendant(self, idx):
if self.left_child is not None:
if self.left_child.index is idx:
return True
else:
return self.left_child.is_descendant(idx)
else:
return False
# whether input node is under right sub tree
def is_right_descendant(self, idx):
if self.right_child is not None:
if self.right_child.index is idx:
return True
else:
return self.right_child.is_descendant(idx)
else:
return False
class ArbitraryTree(object):
def __init__(self, idx, score, label=-1, box=None, is_root=False):
self.index = int(idx)
self.is_root = is_root
self.score = float(score)
self.children = []
self.label = label
self.embeded_label = None
self.box = box.view(-1) if box is not None else None # [x1,y1,x2,y2]
self.parent = None
self.node_order = -1 # the n_th node added to the tree
# define state and cell_state for lstm
self.chain_state_h = None
self.chain_state_c = None
self.chain_state_h_backward = None
self.chain_state_c_backward = None
self.tree_state_h = None
self.tree_state_c = None
self.tree_state_h_backward = None
self.tree_state_c_backward = None
def generate_bi_tree(self):
# generate a BiTree node, parent/child relationship are not inherited
return BiTree(self.index, self.score, self.label, self.box,
self.is_root)
def add_child(self, child):
child.parent = self
self.children.append(child)
def print(self):
print('index: ', self.index)
print('node_order: ', self.node_order)
print('num of child: ', len(self.children))
for node in self.children:
node.print()
def find_node_by_order(self, order, result_node):
if self.node_order == order:
result_node = self
elif len(self.children) > 0:
for i in range(len(self.children)):
result_node = self.children[i].find_node_by_order(
order, result_node)
return result_node
def find_node_by_index(self, index, result_node):
if self.index == index:
result_node = self
elif len(self.children) > 0:
for i in range(len(self.children)):
result_node = self.children[i].find_node_by_index(
index, result_node)
return result_node
def search_best_insert(self,
score_map,
best_score,
insert_node,
best_depend_node,
best_insert_node,
ignore_root=True):
if self.is_root and ignore_root:
pass
elif float(score_map[self.index,
insert_node.index]) > float(best_score):
best_score = score_map[self.index, insert_node.index]
best_depend_node = self
best_insert_node = insert_node
# iteratively search child
for i in range(self.get_child_num()):
best_score, best_depend_node, best_insert_node = \
self.children[i].search_best_insert(score_map, best_score, insert_node, best_depend_node,
best_insert_node)
return best_score, best_depend_node, best_insert_node
def get_child_num(self):
return len(self.children)
def get_total_child(self):
sum = 0
num_current_child = self.get_child_num()
sum += num_current_child
for i in range(num_current_child):
sum += self.children[i].get_total_child()
return sum
def max_depth(self):
if hasattr(self, '_max_depth'):
return self._max_depth
count = 0
if len(self.children):
for i in range(len(self.children)):
depth = self.children[i].max_depth()
if depth > count:
count = depth
count += 1
self._max_depth = count
return self._max_depth
def depth(self):
if hasattr(self, '_depth'):
return self._depth
if self.parent is None:
count = -1
else:
count = self.parent.depth() + 1
self._depth = count
return self._depth
def max_width(self):
if hasattr(self, '_max_width'):
return self._max_width
counter = Counter()
counter.update([self.depth()])
for i in range(len(self.children)):
counter.update([self.children[i].depth()])
self._max_width = counter.most_common(1)[0][0]
return self._max_width
def leafcount(self):
if hasattr(self, '_leafcount'):
return self._leafcount
self._leafcount = 0
for i in range(len(self.children)):
if self.children[i].get_child_num() == 0:
self._leafcount += 1
else:
self._leafcount += self.children[i].leafcount()
return self._leafcount
# only support binary tree
class BiTree(BasicBiTree):
def __init__(self, idx, node_score, label, box, is_root=False):
super(BiTree, self).__init__(idx, is_root)
self.state_c = None
self.state_h = None
self.state_c_backward = None
self.state_h_backward = None
# used to select node
self.node_score = float(node_score)
self.label = label
self.embeded_label = None
self.box = box.view(-1) # [x1,y1,x2,y2]
def bbox_intersection(box_a, box_b):
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy + 1.0), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def bbox_overlap(box_a, box_b):
inter = bbox_intersection(box_a, box_b)
area_a = ((box_a[:, 2] - box_a[:, 0] + 1.0) *
(box_a[:, 3] - box_a[:, 1] + 1.0)).unsqueeze(1).expand_as(
inter) # [A,B]
area_b = ((box_b[:, 2] - box_b[:, 0] + 1.0) *
(box_b[:, 3] - box_b[:, 1] + 1.0)).unsqueeze(0).expand_as(
inter) # [A,B]
union = area_a + area_b - inter
return inter / (union + 1e-9)
def bbox_area(bbox):
area = (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1])
return area.view(-1, 1)
def get_overlap_info(infostruct):
bboxes, img_shapes = infostruct.bboxes, infostruct.img_shape
overlap_info = []
for bbox, img_shape in zip(bboxes, img_shapes):
bbox = bbox[:, :4]
intersection = bbox_intersection(bbox, bbox).float() # num, num
overlap = bbox_overlap(bbox, bbox).float() # num, num
area = bbox_area(bbox).float() # num, 1
info1 = (intersection > 0.0).float().sum(1).view(-1, 1)
info2 = intersection.sum(1).view(-1, 1) / float(
img_shape[0] * img_shape[1])
info3 = overlap.sum(1).view(-1, 1)
info4 = info2 / (info1 + 1e-9)
info5 = info3 / (info1 + 1e-9)
info6 = area / float(img_shape[0] * img_shape[1])
info = torch.cat([info1, info2, info3, info4, info5, info6], dim=1)
overlap_info.append(info)
return torch.cat(overlap_info, dim=0)
| 15,131 | 32.330396 | 105 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/imp.py | # ---------------------------------------------------------------
# imp.py
# Set-up time: 2020/5/21 下午11:26
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import torch
from mmcv.cnn import kaiming_init
from torch import nn
from torch.nn import functional as F
from .motif_util import to_onehot
class IMPContext(nn.Module):
def __init__(self, config, obj_classes, rel_classes):
super(IMPContext, self).__init__()
self.cfg = config
in_channels = self.cfg.roi_dim
self.num_object_classes = len(obj_classes)
self.num_predicates = len(rel_classes)
self.hidden_dim = self.cfg.hidden_dim
self.num_iter = self.cfg.num_iter
# mode
if self.cfg.use_gt_box:
if self.cfg.use_gt_label:
self.mode = 'predcls'
else:
self.mode = 'sgcls'
else:
self.mode = 'sgdet'
self.rel_fc = nn.Linear(self.hidden_dim, self.num_predicates)
self.obj_fc = nn.Linear(self.hidden_dim, self.num_object_classes)
self.obj_unary = nn.Linear(in_channels, self.hidden_dim)
self.edge_unary = nn.Linear(in_channels, self.hidden_dim)
self.edge_gru = nn.GRUCell(input_size=self.hidden_dim,
hidden_size=self.hidden_dim)
self.node_gru = nn.GRUCell(input_size=self.hidden_dim,
hidden_size=self.hidden_dim)
self.sub_vert_w_fc = nn.Sequential(nn.Linear(self.hidden_dim * 2, 1),
nn.Sigmoid())
self.obj_vert_w_fc = nn.Sequential(nn.Linear(self.hidden_dim * 2, 1),
nn.Sigmoid())
self.out_edge_w_fc = nn.Sequential(nn.Linear(self.hidden_dim * 2, 1),
nn.Sigmoid())
self.in_edge_w_fc = nn.Sequential(nn.Linear(self.hidden_dim * 2, 1),
nn.Sigmoid())
def init_weights(self):
for module in [
self.sub_vert_w_fc, self.obj_vert_w_fc, self.out_edge_w_fc,
self.in_edge_w_fc
]:
for m in module:
if isinstance(m, nn.Linear):
kaiming_init(m, distribution='uniform', a=1)
for module in [
self.rel_fc, self.obj_fc, self.obj_unary, self.edge_unary
]:
kaiming_init(module, distribution='uniform', a=1)
def forward(self, x, union_features, det_result, logger=None):
num_objs = [len(b) for b in det_result.bboxes]
rel_pair_idxes = det_result.rel_pair_idxes
obj_rep = self.obj_unary(x)
rel_rep = F.relu(self.edge_unary(union_features))
obj_count = obj_rep.shape[0]
rel_count = rel_rep.shape[0]
# generate sub-rel-obj mapping
sub2rel = torch.zeros(obj_count, rel_count).to(obj_rep)
obj2rel = torch.zeros(obj_count, rel_count).to(obj_rep)
obj_offset = 0
rel_offset = 0
sub_global_inds = []
obj_global_inds = []
for pair_idx, num_obj in zip(rel_pair_idxes, num_objs):
num_rel = pair_idx.shape[0]
sub_idx = pair_idx[:, 0].contiguous().long().view(-1) + obj_offset
obj_idx = pair_idx[:, 1].contiguous().long().view(-1) + obj_offset
rel_idx = torch.arange(num_rel).to(
obj_rep.device).long().view(-1) + rel_offset
sub_global_inds.append(sub_idx)
obj_global_inds.append(obj_idx)
sub2rel[sub_idx, rel_idx] = 1.0
obj2rel[obj_idx, rel_idx] = 1.0
obj_offset += num_obj
rel_offset += num_rel
sub_global_inds = torch.cat(sub_global_inds, dim=0)
obj_global_inds = torch.cat(obj_global_inds, dim=0)
# iterative message passing
hx_obj = torch.zeros(obj_count, self.hidden_dim,
requires_grad=False).to(obj_rep)
hx_rel = torch.zeros(rel_count, self.hidden_dim,
requires_grad=False).to(obj_rep)
vert_factor = [self.node_gru(obj_rep, hx_obj)]
edge_factor = [self.edge_gru(rel_rep, hx_rel)]
for i in range(self.num_iter):
# compute edge context
sub_vert = vert_factor[i][sub_global_inds]
obj_vert = vert_factor[i][obj_global_inds]
weighted_sub = self.sub_vert_w_fc(
torch.cat((sub_vert, edge_factor[i]), 1)) * sub_vert
weighted_obj = self.obj_vert_w_fc(
torch.cat((obj_vert, edge_factor[i]), 1)) * obj_vert
edge_factor.append(
self.edge_gru(weighted_sub + weighted_obj, edge_factor[i]))
# Compute vertex context
pre_out = self.out_edge_w_fc(
torch.cat((sub_vert, edge_factor[i]), 1)) * edge_factor[i]
pre_in = self.in_edge_w_fc(torch.cat(
(obj_vert, edge_factor[i]), 1)) * edge_factor[i]
vert_ctx = sub2rel @ pre_out + obj2rel @ pre_in
vert_factor.append(self.node_gru(vert_ctx, vert_factor[i]))
if self.mode == 'predcls':
obj_labels = torch.cat(det_result.labels, dim=0)
obj_dists = to_onehot(obj_labels, self.num_object_classes)
else:
obj_dists = self.obj_fc(vert_factor[-1])
rel_dists = self.rel_fc(edge_factor[-1])
return obj_dists, rel_dists
| 5,667 | 38.915493 | 78 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/pointnet.py | # ---------------------------------------------------------------
# pointnet.py
# Set-up time: 2020/10/6 23:24
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class STN2d(nn.Module):
def __init__(self, init_c=32):
super(STN2d, self).__init__()
self.init_c = init_c
self.conv1 = torch.nn.Conv1d(2, init_c, 1)
self.conv2 = torch.nn.Conv1d(init_c, init_c * 2, 1)
self.conv3 = torch.nn.Conv1d(init_c * 2, init_c * 4, 1)
self.fc1 = nn.Linear(init_c * 4, init_c * 2)
self.fc2 = nn.Linear(init_c * 2, init_c)
self.fc3 = nn.Linear(init_c, 4)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(init_c)
self.bn2 = nn.BatchNorm1d(init_c * 2)
self.bn3 = nn.BatchNorm1d(init_c * 4)
self.bn4 = nn.BatchNorm1d(init_c * 2)
self.bn5 = nn.BatchNorm1d(init_c)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(batchsize, -1)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = torch.eye(2).view(1, -1).repeat(batchsize, 1).to(x)
x = x + iden
x = x.view(-1, 2, 2)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = torch.nn.Conv1d(k, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 256, 1)
self.fc1 = nn.Linear(256, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, k * k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(256)
self.bn4 = nn.BatchNorm1d(128)
self.bn5 = nn.BatchNorm1d(64)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 256)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = torch.eye(self.k).view(1, -1).repeat(batchsize, 1).to(x)
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetFeat(nn.Module):
def __init__(self,
init_c=32,
out_c=1024,
global_feat=True,
feature_transform=False):
super(PointNetFeat, self).__init__()
self.stn = STN2d()
self.init_c = init_c
self.out_c = out_c
self.conv1 = torch.nn.Conv1d(2, init_c, 1)
self.conv2 = torch.nn.Conv1d(init_c, init_c * 2, 1)
self.conv3 = torch.nn.Conv1d(init_c * 2, init_c * 4, 1)
self.bn1 = nn.BatchNorm1d(init_c)
self.bn2 = nn.BatchNorm1d(init_c * 2)
self.bn3 = nn.BatchNorm1d(init_c * 4)
self.global_feat = global_feat
if self.global_feat and out_c != init_c * 4:
self.fc = nn.Linear(init_c * 4, out_c)
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
def forward(self, x):
batchsize = x.size()[0]
n_pts = x.size()[2]
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = F.relu(self.bn1(self.conv1(x)))
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2, 1)
else:
trans_feat = None
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(batchsize, -1)
if self.global_feat:
if hasattr(self, 'fc'):
x = self.fc(x)
return x, trans, trans_feat
else:
x = x.view(batchsize, -1, 1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class PointNetCls(nn.Module):
def __init__(self, k=2, feature_transform=False):
super(PointNetCls, self).__init__()
self.feature_transform = feature_transform
self.feat = PointNetFeat(global_feat=True,
feature_transform=feature_transform)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k)
self.dropout = nn.Dropout(p=0.3)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.relu = nn.ReLU()
def forward(self, x):
x, trans, trans_feat = self.feat(x)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.dropout(self.fc2(x))))
x = self.fc3(x)
return F.log_softmax(x, dim=1), trans, trans_feat
class PointNetDenseCls(nn.Module):
def __init__(self, k=2, feature_transform=False):
super(PointNetDenseCls, self).__init__()
self.k = k
self.feature_transform = feature_transform
self.feat = PointNetFeat(global_feat=False,
feature_transform=feature_transform)
self.conv1 = torch.nn.Conv1d(1088, 512, 1)
self.conv2 = torch.nn.Conv1d(512, 256, 1)
self.conv3 = torch.nn.Conv1d(256, 128, 1)
self.conv4 = torch.nn.Conv1d(128, self.k, 1)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.bn3 = nn.BatchNorm1d(128)
def forward(self, x):
x = x - torch.mean(x, dim=-1, keepdim=True)
x = x / torch.max(torch.sqrt(torch.sum(x**2, dim=1, keepdim=True)),
dim=-1,
keepdim=True)[0]
batchsize = x.size()[0]
n_pts = x.size()[2]
x, trans, trans_feat = self.feat(x)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.conv4(x)
x = x.transpose(2, 1).contiguous()
x = F.log_softmax(x.view(-1, self.k), dim=-1)
x = x.view(batchsize, n_pts, self.k)
return x, trans, trans_feat
def feature_transform_regularizer(trans):
d = trans.size()[1]
I = torch.eye(d)[None, :, :].to(trans)
# bug: pytorch-1.4: do not support torch.norm on GPU directly
diff = torch.bmm(trans, trans.transpose(2, 1)) - I
loss = torch.mean(torch.norm(diff.cpu(), dim=(1, 2)).to(trans))
#loss = torch.mean(torch.norm(, dim=(1, 2)))
return loss
| 7,176 | 33.671498 | 75 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/dmp.py | # ---------------------------------------------------------------
# dmp.py
# Set-up time: 2020/10/7 22:23
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import torch
from torch import nn
from torch.nn import functional as F
from .motif_util import encode_box_info, obj_edge_vectors, to_onehot
def matmul(tensor3d, mat):
out = []
for i in range(tensor3d.size(-1)):
out.append(torch.mm(tensor3d[:, :, i], mat))
return torch.cat(out, -1)
class DirectionAwareMessagePassing(nn.Module):
"""Adapted from the [CVPR 2020] GPS-Net: Graph Property Scensing Network
for Scene Graph Generation]"""
def __init__(self, config, obj_classes):
super(DirectionAwareMessagePassing, self).__init__()
self.cfg = config
self.obj_classes = obj_classes
self.num_obj_classes = len(obj_classes)
in_channels = self.cfg.roi_dim
self.use_gt_box = self.cfg.use_gt_box
self.use_gt_label = self.cfg.use_gt_label
# mode
if self.cfg.use_gt_box:
if self.cfg.use_gt_label:
self.mode = 'predcls'
else:
self.mode = 'sgcls'
else:
self.mode = 'sgdet'
# word embedding
self.embed_dim = self.cfg.embed_dim
self.obj_embed = nn.Embedding(self.num_obj_classes, self.embed_dim)
obj_embed_vecs = obj_edge_vectors(self.obj_classes,
wv_dir=self.cfg.glove_dir,
wv_dim=self.embed_dim)
with torch.no_grad():
self.obj_embed.weight.copy_(obj_embed_vecs, non_blocking=True)
# position embedding
self.pos_embed = nn.Sequential(*[
nn.Linear(9, 32),
nn.BatchNorm1d(32, momentum=0.001),
nn.Linear(32, 128),
nn.ReLU(inplace=True),
])
self.obj_dim = in_channels
self.obj_input_dim = self.obj_dim + self.embed_dim + 128
# 1024 + 200 + 128
# set the direction-aware attention mapping
self.ws = nn.Linear(self.obj_dim, self.obj_dim)
self.wo = nn.Linear(self.obj_dim, self.obj_dim)
self.wu = nn.Linear(self.obj_dim, self.obj_dim)
self.w = nn.Linear(self.obj_dim, 1)
# now begin to set the DMP
self.project_input = nn.Sequential(*[
nn.Linear(self.obj_input_dim, self.obj_dim),
nn.ReLU(inplace=True)
])
self.trans = nn.Sequential(*[
nn.Linear(self.obj_dim, self.obj_dim // 4),
nn.LayerNorm(self.obj_dim // 4),
nn.ReLU(inplace=True),
nn.Linear(self.obj_dim // 4, self.obj_dim)
])
self.W_t3 = nn.Sequential(*[
nn.Linear(self.obj_dim, self.obj_dim // 2),
nn.ReLU(inplace=True)
])
# object classifier
self.out_obj = nn.Linear(self.obj_dim, self.num_obj_classes)
def get_attention(self, obj_feat, union_feat, rel_pair_idx):
num_obj = obj_feat.shape[0]
atten_coeff = self.w(
self.ws(obj_feat[rel_pair_idx[:, 0]]) *
self.wo(obj_feat[rel_pair_idx[:, 1]]) * self.wu(union_feat))
atten_tensor = torch.zeros(num_obj, num_obj, 1).to(atten_coeff)
atten_tensor[rel_pair_idx[:, 0], rel_pair_idx[:, 1]] += atten_coeff
atten_tensor = F.sigmoid(atten_tensor)
atten_tensor = atten_tensor * (
1 - torch.eye(num_obj).unsqueeze(-1).to(atten_tensor))
atten_tensor_sum = torch.sum(atten_tensor, dim=1, keepdim=True)
# handle 1 object case, avoid divideByZero
if atten_tensor.shape[0] == 1:
atten_tensor_sum = torch.ones(
atten_tensor_sum.size()).to(atten_tensor_sum)
# handle 1 object case done
return atten_tensor / atten_tensor_sum
def forward(self, obj_feats, union_feats, det_result):
if self.training or self.use_gt_box:
# predcls or sgcls or training, just put obj_labels here
obj_labels = torch.cat(det_result.labels)
else:
obj_labels = None
if self.use_gt_label: # predcls
obj_embed = self.obj_embed(obj_labels.long())
else:
obj_dists = torch.cat(det_result.dists, dim=0).detach()
obj_embed = obj_dists @ self.obj_embed.weight
pos_embed = self.pos_embed(encode_box_info(det_result)) # N x 128
obj_rep = torch.cat((obj_feats, obj_embed, pos_embed),
-1) # N x (1024 + 200 + 128)
obj_rep = self.project_input(obj_rep) # N x 1024
rel_pair_idxes = det_result.rel_pair_idxes
num_rels = [r.shape[0] for r in det_result.rel_pair_idxes]
num_objs = [len(b) for b in det_result.bboxes]
neighbour_feats = []
split_obj_rep = obj_rep.split(num_objs)
split_union_rep = union_feats.split(num_rels)
for obj_feat, union_feat, rel_pair_idx in zip(split_obj_rep,
split_union_rep,
rel_pair_idxes):
atten_tensor = self.get_attention(obj_feat, union_feat,
rel_pair_idx) # N x N x 1
atten_tensor_t = torch.transpose(atten_tensor, 1, 0)
atten_tensor = torch.cat((atten_tensor, atten_tensor_t),
dim=-1) # N x N x 2
context_feats = matmul(atten_tensor, self.W_t3(obj_feat))
neighbour_feats.append(self.trans(context_feats))
obj_context_rep = F.relu(obj_rep + torch.cat(neighbour_feats, 0),
inplace=True)
if self.mode != 'predcls':
obj_scores = self.out_obj(obj_context_rep)
obj_dists = F.softmax(obj_scores, dim=1)
obj_preds = obj_dists[:, 1:].max(1)[1] + 1
else:
assert obj_labels is not None
obj_preds = obj_labels
obj_scores = to_onehot(obj_preds, self.num_obj_classes)
return obj_scores, obj_preds, obj_context_rep
| 6,337 | 39.113924 | 76 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/motif_util.py | # ---------------------------------------------------------------
# motif_util.py
# Set-up time: 2020/5/4 下午4:36
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import array
import itertools
import os
import sys
import zipfile
import numpy as np
import six
import torch
from six.moves.urllib.request import urlretrieve
from tqdm import tqdm
def normalize_sigmoid_logits(orig_logits):
orig_logits = torch.sigmoid(orig_logits)
orig_logits = orig_logits / (orig_logits.sum(1).unsqueeze(-1) + 1e-12)
return orig_logits
def generate_attributes_target(attributes, device, max_num_attri,
num_attri_cat):
"""from list of attribute indexes to [1,0,1,0,0,1] form."""
assert max_num_attri == attributes.shape[1]
num_obj = attributes.shape[0]
with_attri_idx = (attributes.sum(-1) > 0).long()
attribute_targets = torch.zeros((num_obj, num_attri_cat),
device=device).float()
for idx in torch.nonzero(with_attri_idx).squeeze(1).tolist():
for k in range(max_num_attri):
att_id = int(attributes[idx, k])
if att_id == 0:
break
else:
attribute_targets[idx, att_id] = 1
return attribute_targets, with_attri_idx
def transpose_packed_sequence_inds(lengths):
"""Get a TxB indices from sorted lengths.
Fetch new_inds, split by new_lens, padding to max(new_lens), and stack.
Returns:
new_inds (np.array) [sum(lengths), ]
new_lens (list(np.array)): number of elements of each time step,
descending
"""
new_inds = []
new_lens = []
cum_add = np.cumsum([0] + lengths)
max_len = lengths[0]
length_pointer = len(lengths) - 1
for i in range(max_len):
while length_pointer > 0 and lengths[length_pointer] <= i:
length_pointer -= 1
new_inds.append(cum_add[:(length_pointer + 1)].copy())
cum_add[:(length_pointer + 1)] += 1
new_lens.append(length_pointer + 1)
new_inds = np.concatenate(new_inds, 0)
return new_inds, new_lens
def sort_by_score(infostruct, scores):
"""We'll sort everything scorewise from Hi->low, BUT we need to keep images
together and sort LSTM from l.
:param im_inds: Which im we're on
:param scores: Goodness ranging between [0, 1]. Higher numbers come FIRST
:return: Permutation to put everything in the right order for the LSTM
Inverse permutation
Lengths for the TxB packed sequence.
"""
num_rois = [len(b) for b in infostruct.bboxes]
num_im = len(num_rois)
scores = scores.split(num_rois, dim=0)
ordered_scores = []
for i, (score, num_roi) in enumerate(zip(scores, num_rois)):
ordered_scores.append(score + 2.0 * float(num_roi * 2 * num_im - i))
ordered_scores = torch.cat(ordered_scores, dim=0)
_, perm = torch.sort(ordered_scores, 0, descending=True)
num_rois = sorted(num_rois, reverse=True)
inds, ls_transposed = transpose_packed_sequence_inds(
num_rois) # move it to TxB form
inds = torch.LongTensor(inds).to(scores[0].device)
ls_transposed = torch.LongTensor(ls_transposed)
perm = perm[inds] # (batch_num_box, )
_, inv_perm = torch.sort(perm)
return perm, inv_perm, ls_transposed
def to_onehot(vec, num_classes, fill=1000):
"""
Creates a [size, num_classes] torch FloatTensor where
one_hot[i, vec[i]] = fill
:param vec: 1d torch tensor
:param num_classes: int
:param fill: value that we want + and - things to be.
:return:
"""
onehot_result = vec.new(vec.size(0), num_classes).float().fill_(-fill)
arange_inds = vec.new(vec.size(0)).long()
torch.arange(0, vec.size(0), out=arange_inds)
onehot_result.view(-1)[vec.long() + num_classes * arange_inds] = fill
return onehot_result
def get_dropout_mask(dropout_probability, tensor_shape, device):
"""once get, it is fixed all the time."""
binary_mask = (torch.rand(tensor_shape) > dropout_probability)
# Scale mask by 1/keep_prob to preserve output statistics.
dropout_mask = binary_mask.float().to(device).div(1.0 -
dropout_probability)
return dropout_mask
def center_x(infostruct):
boxes = torch.cat(infostruct.bboxes, dim=0)
c_x = 0.5 * (boxes[:, 0] + boxes[:, 2])
return c_x.view(-1)
def encode_box_info(infostruct):
"""encode proposed box information (x1, y1, x2, y2) to (cx/wid, cy/hei,
w/wid, h/hei, x1/wid, y1/hei, x2/wid, y2/hei, wh/wid*hei)"""
bboxes, img_shapes = infostruct.bboxes, infostruct.img_shape
boxes_info = []
for bbox, img_shape in zip(bboxes, img_shapes):
wid = img_shape[1]
hei = img_shape[0]
wh = bbox[:, 2:4] - bbox[:, 0:2] + 1.0
xy = bbox[:, 0:2] + 0.5 * wh
w, h = wh[:, 0], wh[:, 1]
x, y = xy[:, 0], xy[:, 1]
x1, y1, x2, y2 = bbox[:, 0], bbox[:, 1], bbox[:, 2], bbox[:, 3]
assert wid * hei != 0
info = torch.stack([
w / wid, h / hei, x / wid, y / hei, x1 / wid, y1 / hei, x2 / wid,
y2 / hei, w * h / (wid * hei)
],
dim=-1).view(-1, 9)
boxes_info.append(info)
return torch.cat(boxes_info, dim=0)
def obj_edge_vectors(names, wv_dir, wv_type='glove.6B', wv_dim=300):
wv_dict, wv_arr, wv_size = load_word_vectors(wv_dir, wv_type, wv_dim)
vectors = torch.Tensor(len(names), wv_dim)
vectors.normal_(0, 1)
for i, token in enumerate(names):
wv_index = wv_dict.get(token, None)
if wv_index is not None:
vectors[i] = wv_arr[wv_index]
else:
# Try the longest word
lw_token = sorted(token.split(' '),
key=lambda x: len(x),
reverse=True)[0]
print('{} -> {} '.format(token, lw_token))
wv_index = wv_dict.get(lw_token, None)
if wv_index is not None:
vectors[i] = wv_arr[wv_index]
else:
print('fail on {}'.format(token))
return vectors
def load_word_vectors(root, wv_type, dim):
"""Load word vectors from a path, trying .pt, .txt, and .zip extensions."""
URL = {
'glove.42B': 'http://nlp.stanford.edu/data/glove.42B.300d.zip',
'glove.840B': 'http://nlp.stanford.edu/data/glove.840B.300d.zip',
'glove.twitter.27B':
'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
'glove.6B': 'http://nlp.stanford.edu/data/glove.6B.zip',
}
if isinstance(dim, int):
dim = str(dim) + 'd'
fname = os.path.join(root, wv_type + '.' + dim)
if os.path.isfile(fname + '.pt'):
fname_pt = fname + '.pt'
print('loading word vectors from', fname_pt)
try:
return torch.load(fname_pt, map_location=torch.device('cpu'))
except Exception as e:
print('Error loading the model from {}{}'.format(fname_pt, str(e)))
sys.exit(-1)
if os.path.isfile(fname + '.txt'):
fname_txt = fname + '.txt'
cm = open(fname_txt, 'rb')
cm = [line for line in cm]
elif os.path.basename(wv_type) in URL:
url = URL[wv_type]
print('downloading word vectors from {}'.format(url))
filename = os.path.basename(fname)
if not os.path.exists(root):
os.makedirs(root)
with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
fname, _ = urlretrieve(url, fname, reporthook=reporthook(t))
with zipfile.ZipFile(fname, 'r') as zf:
print('extracting word vectors into {}'.format(root))
zf.extractall(root)
if not os.path.isfile(fname + '.txt'):
raise RuntimeError('no word vectors of requested dimension found')
return load_word_vectors(root, wv_type, dim)
else:
raise RuntimeError('unable to load word vectors')
wv_tokens, wv_arr, wv_size = [], array.array('d'), None
if cm is not None:
for line in tqdm(
range(len(cm)),
desc='loading word vectors from {}'.format(fname_txt)):
entries = cm[line].strip().split(b' ')
word, entries = entries[0], entries[1:]
if wv_size is None:
wv_size = len(entries)
try:
if isinstance(word, six.binary_type):
word = word.decode('utf-8')
except:
print('non-UTF8 token', repr(word), 'ignored')
continue
wv_arr.extend(float(x) for x in entries)
wv_tokens.append(word)
wv_dict = {word: i for i, word in enumerate(wv_tokens)}
wv_arr = torch.Tensor(wv_arr).view(-1, wv_size)
ret = (wv_dict, wv_arr, wv_size)
torch.save(ret, fname + '.pt')
return ret
def reporthook(t):
"""https://github.com/tqdm/tqdm."""
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
def block_orthogonal(tensor, split_sizes, gain=1.0):
"""
An initializer which allows initializing model parameters in "blocks".
This is helpful in the case of recurrent models which use multiple
gates applied to linear projections, which can be computed efficiently
if they are concatenated together.
However, they are separate parameters which should be initialized
independently.
Parameters
----------
tensor : ``torch.Tensor``, required.
A tensor to initialize.
split_sizes : List[int], required.
A list of length ``tensor.ndim()`` specifying the size of the
blocks along that particular dimension. E.g. ``[10, 20]`` would
result in the tensor being split into chunks of size 10 along the
first dimension and 20 along the second.
gain : float, optional (default = 1.0)
The gain (scaling) applied to the orthogonal initialization.
"""
sizes = list(tensor.size())
if any([a % b != 0 for a, b in zip(sizes, split_sizes)]):
raise ValueError(
'tensor dimensions must be divisible by their respective '
'split_sizes. Found size: {} and split_sizes: {}'.format(
sizes, split_sizes))
indexes = [
list(range(0, max_size, split))
for max_size, split in zip(sizes, split_sizes)
]
# Iterate over all possible blocks within the tensor.
for block_start_indices in itertools.product(*indexes):
# A list of tuples containing the index to start at for this block
# and the appropriate step size (i.e split_size[i] for dimension i).
index_and_step_tuples = zip(block_start_indices, split_sizes)
# This is a tuple of slices corresponding to:
# tensor[index: index + step_size, ...]. This is
# required because we could have an arbitrary number
# of dimensions. The actual slices we need are the
# start_index: start_index + step for each dimension in the tensor.
block_slice = tuple([
slice(start_index, start_index + step)
for start_index, step in index_and_step_tuples
])
# not initialize empty things to 0s because THAT SOUNDS REALLY BAD
assert len(block_slice) == 2
sizes = [x.stop - x.start for x in block_slice]
tensor_copy = tensor.new(max(sizes), max(sizes))
torch.nn.init.orthogonal(tensor_copy, gain=gain)
tensor[block_slice] = tensor_copy[0:sizes[0], 0:sizes[1]]
| 11,894 | 36.40566 | 79 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/matcher.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import AssignResult, BaseAssigner, bbox_cxcywh_to_xyxy
from mmdet.core.bbox.builder import BBOX_ASSIGNERS
from mmdet.core.bbox.match_costs import build_match_cost
try:
from scipy.optimize import linear_sum_assignment
except ImportError:
linear_sum_assignment = None
@BBOX_ASSIGNERS.register_module()
class HTriMatcher(BaseAssigner):
def __init__(self,
s_cls_cost=dict(type='ClassificationCost', weight=1.),
s_reg_cost=dict(type='BBoxL1Cost', weight=1.0),
s_iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0),
o_cls_cost=dict(type='ClassificationCost', weight=1.),
o_reg_cost=dict(type='BBoxL1Cost', weight=1.0),
o_iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0),
r_cls_cost=dict(type='ClassificationCost', weight=1.)):
self.s_cls_cost = build_match_cost(s_cls_cost)
self.s_reg_cost = build_match_cost(s_reg_cost)
self.s_iou_cost = build_match_cost(s_iou_cost)
self.o_cls_cost = build_match_cost(o_cls_cost)
self.o_reg_cost = build_match_cost(o_reg_cost)
self.o_iou_cost = build_match_cost(o_iou_cost)
self.r_cls_cost = build_match_cost(r_cls_cost)
def assign(self,
sub_bbox_pred,
obj_bbox_pred,
sub_cls_score,
obj_cls_score,
rel_cls_score,
gt_sub_bboxes,
gt_obj_bboxes,
gt_sub_labels,
gt_obj_labels,
gt_rel_labels,
img_meta,
gt_bboxes_ignore=None,
eps=1e-7):
assert gt_bboxes_ignore is None, \
'Only case when gt_bboxes_ignore is None is supported.'
num_gts, num_bboxes = gt_sub_bboxes.size(0), sub_bbox_pred.size(0)
# 1. assign -1 by default
assigned_gt_inds = sub_bbox_pred.new_full((num_bboxes, ),
-1,
dtype=torch.long)
assigned_s_labels = sub_bbox_pred.new_full((num_bboxes, ),
-1,
dtype=torch.long)
assigned_o_labels = sub_bbox_pred.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
if num_gts == 0:
# No ground truth, assign all to background
assigned_gt_inds[:] = 0
return AssignResult(num_gts,
assigned_gt_inds,
None,
labels=assigned_s_labels), AssignResult(
num_gts,
assigned_gt_inds,
None,
labels=assigned_o_labels)
img_h, img_w, _ = img_meta['img_shape']
factor = gt_sub_bboxes.new_tensor([img_w, img_h, img_w,
img_h]).unsqueeze(0)
# 2. compute the weighted costs
# classification and bboxcost.
s_cls_cost = self.s_cls_cost(sub_cls_score, gt_sub_labels)
o_cls_cost = self.o_cls_cost(obj_cls_score, gt_obj_labels)
r_cls_cost = self.r_cls_cost(rel_cls_score, gt_rel_labels)
# regression L1 cost
normalize_gt_sub_bboxes = gt_sub_bboxes / factor
normalize_gt_obj_bboxes = gt_obj_bboxes / factor
s_reg_cost = self.s_reg_cost(sub_bbox_pred, normalize_gt_sub_bboxes)
o_reg_cost = self.o_reg_cost(obj_bbox_pred, normalize_gt_obj_bboxes)
# regression iou cost, defaultly giou is used in official DETR.
sub_bboxes = bbox_cxcywh_to_xyxy(sub_bbox_pred) * factor
obj_bboxes = bbox_cxcywh_to_xyxy(obj_bbox_pred) * factor
s_iou_cost = self.s_iou_cost(sub_bboxes, gt_sub_bboxes)
o_iou_cost = self.o_iou_cost(obj_bboxes, gt_obj_bboxes)
# weighted sum of above three costs
beta_1, beta_2 = 1.2, 1
alpha_s, alpha_o, alpha_r = 1, 1, 1
cls_cost = (alpha_s * s_cls_cost + alpha_o * o_cls_cost +
alpha_r * r_cls_cost) / (alpha_s + alpha_o + alpha_r)
bbox_cost = (s_reg_cost + o_reg_cost + s_iou_cost + o_iou_cost) / 2
cost = beta_1 * cls_cost + beta_2 * bbox_cost
# 3. do Hungarian matching on CPU using linear_sum_assignment
cost = cost.detach().cpu()
if linear_sum_assignment is None:
raise ImportError('Please run "pip install scipy" '
'to install scipy first.')
matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
matched_row_inds = torch.from_numpy(matched_row_inds).to(
sub_bbox_pred.device)
matched_col_inds = torch.from_numpy(matched_col_inds).to(
sub_bbox_pred.device)
# 4. assign backgrounds and foregrounds
# assign all indices to backgrounds first
assigned_gt_inds[:] = 0
# assign foregrounds based on matching results
assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
assigned_s_labels[matched_row_inds] = gt_sub_labels[matched_col_inds]
assigned_o_labels[matched_row_inds] = gt_obj_labels[matched_col_inds]
return AssignResult(num_gts,
assigned_gt_inds,
None,
labels=assigned_s_labels), AssignResult(
num_gts,
assigned_gt_inds,
None,
labels=assigned_o_labels)
@BBOX_ASSIGNERS.register_module()
class IdMatcher(BaseAssigner):
def __init__(self,
sub_id_cost=dict(type='ClassificationCost', weight=1.),
obj_id_cost=dict(type='ClassificationCost', weight=1.),
r_cls_cost=dict(type='ClassificationCost', weight=1.)):
self.sub_id_cost = build_match_cost(sub_id_cost)
self.obj_id_cost = build_match_cost(obj_id_cost)
self.r_cls_cost = build_match_cost(r_cls_cost)
def assign(self,
sub_match_score,
obj_match_score,
rel_cls_score,
gt_sub_ids,
gt_obj_ids,
gt_rel_labels,
img_meta,
gt_bboxes_ignore=None,
eps=1e-7):
"""gt_ids are mapped from previous Hungarian matchinmg results.
~[0,99]
"""
assert gt_bboxes_ignore is None, \
'Only case when gt_bboxes_ignore is None is supported.'
num_gts, num_bboxes = gt_rel_labels.size(0), rel_cls_score.size(0)
# 1. assign -1 by default
assigned_gt_inds = rel_cls_score.new_full((num_bboxes, ),
-1,
dtype=torch.long)
assigned_s_labels = rel_cls_score.new_full((num_bboxes, ),
-1,
dtype=torch.long)
assigned_o_labels = rel_cls_score.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
if num_gts == 0:
# No ground truth, assign all to background
assigned_gt_inds[:] = 0
return AssignResult(num_gts,
assigned_gt_inds,
None,
labels=assigned_s_labels), AssignResult(
num_gts,
assigned_gt_inds,
None,
labels=assigned_o_labels)
# 2. compute the weighted costs
# classification and bboxcost.
sub_id_cost = self.sub_id_cost(sub_match_score, gt_sub_ids)
obj_id_cost = self.obj_id_cost(obj_match_score, gt_obj_ids)
r_cls_cost = self.r_cls_cost(rel_cls_score, gt_rel_labels)
# weighted sum of above three costs
cost = sub_id_cost + obj_id_cost + r_cls_cost
# 3. do Hungarian matching on CPU using linear_sum_assignment
cost = cost.detach().cpu()
if linear_sum_assignment is None:
raise ImportError('Please run "pip install scipy" '
'to install scipy first.')
matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
matched_row_inds = torch.from_numpy(matched_row_inds).to(
rel_cls_score.device)
matched_col_inds = torch.from_numpy(matched_col_inds).to(
rel_cls_score.device)
# 4. assign backgrounds and foregrounds
# assign all indices to backgrounds first
assigned_gt_inds[:] = 0
# assign foregrounds based on matching results
assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
assigned_s_labels[matched_row_inds] = gt_sub_ids[matched_col_inds]
assigned_o_labels[matched_row_inds] = gt_obj_ids[matched_col_inds]
return AssignResult(num_gts,
assigned_gt_inds,
None,
labels=assigned_s_labels), AssignResult(
num_gts,
assigned_gt_inds,
None,
labels=assigned_o_labels)
| 9,976 | 45.404651 | 78 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/relation_ranker.py | # ---------------------------------------------------------------
# relation_ranker.py
# Set-up time: 2021/5/11 16:21
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
# from .transformer import Encoder, EncoderLayer, MultiHeadedAttention, PositionwiseFeedForward
import copy
import numpy as np
import torch
from mmcv.cnn import xavier_init
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import PackedSequence
from .motif_util import center_x, sort_by_score
from .relation_util import Result
# class TransformerRanker(nn.Module):
# def __init__(self, num_head=8, input_dim=1024, hidden_dim=512, inner_dim=1024, dropout_rate=0.1, nl_layer=6, num_out=1):
# super(TransformerRanker, self).__init__()
# self.num_head = num_head
# self.input_dim = input_dim
# self.hidden_dim = hidden_dim
# self.inner_dim = inner_dim
# self.dropout_rate = dropout_rate
# self.nl_layer = nl_layer
# self.num_out = num_out
# c = copy.deepcopy
# attn = MultiHeadedAttention(self.num_head, self.hidden_dim, self.dropout_rate)
# ff = PositionwiseFeedForward(self.hidden_dim, self.inner_dim, self.dropout_rate)
# self.ranking_context = Encoder(EncoderLayer(self.hidden_dim, c(attn), c(ff), self.dropout_rate), self.nl_layer)
# self.proj = nn.Linear(self.input_dim, self.hidden_dim)
# self.rank_proj = nn.Linear(self.hidden_dim, self.num_out)
# def forward(self, union_feats, det_result=None, union_rois=None):
# rel_pair_idxes = det_result.rel_pair_idxes
# num_rels = [len(r) for r in rel_pair_idxes]
# return self.rank_proj(self.ranking_context(self.proj(union_feats), num_rels))
class LSTMRanker(nn.Module):
def __init__(self,
input_dim=1024,
hidden_dim=512,
dropout_rate=0.2,
nl_layer=1,
bidirectional=True,
num_out=1):
super(LSTMRanker, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dropout_rate = dropout_rate
self.nl_layer = nl_layer
self.bidirectional = bidirectional
self.num_out = num_out
self.ranking_ctx_rnn = torch.nn.LSTM(input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.nl_layer,
dropout=self.dropout_rate,
bidirectional=self.bidirectional)
self.rank_proj = nn.Linear(self.hidden_dim, self.num_out)
def sort_rois(self, result):
"""
:param batch_idx: tensor with what index we're on
:param confidence: tensor with confidences between [0,1)
:param boxes: tensor with (x1, y1, x2, y2)
:return: Permutation, inverse permutation, and the lengths transposed (same as _sort_by_score)
"""
c_x = center_x(result)
scores = c_x / (c_x.max() + 1)
return sort_by_score(result, scores)
def forward(self, union_feats, det_result, union_rois):
"""Forward pass through the object and edge context.
:param obj_priors:
:param obj_fmaps:
:param im_inds:
:param obj_labels:
:param boxes:
:return:
"""
rel_pair_idxes = det_result.rel_pair_idxes
num_rels = [len(r) for r in rel_pair_idxes]
result = Result(bboxes=union_rois.split(num_rels, 0))
perm, inv_perm, ls_transposed = self.sort_rois(result)
rel_inpunt_rep = union_feats[perm].contiguous()
rel_input_packed = PackedSequence(rel_inpunt_rep, ls_transposed)
rel_rank_rep = self.ranking_ctx_rnn(rel_input_packed)[0][0]
if self.bidirectional:
rel_rank_rep = torch.mean(
torch.stack((rel_rank_rep[:, :self.hidden_dim],
rel_rank_rep[:, self.hidden_dim:])), 0)
rel_rank_rep = rel_rank_rep[inv_perm].contiguous()
ranking_scores = self.rank_proj(rel_rank_rep)
return ranking_scores
class LinearRanker(nn.Module):
def __init__(self, input_dim=1024, hidden_dim=512, nl_layer=1, num_out=1):
super(LinearRanker, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.nl_layer = nl_layer
self.num_out = num_out
ranking_net = []
for i in range(self.nl_layer):
dim = self.input_dim if i == 0 else self.hidden_dim
ranking_net += [
nn.Linear(dim, self.hidden_dim),
nn.ReLU(inplace=True)
]
ranking_net.append(nn.Linear(self.hidden_dim, self.num_out))
self.ranking_net = nn.Sequential(*ranking_net)
def forward(self, union_feats, det_result=None, union_rois=None):
"""Forward pass through the object and edge context.
:param obj_priors:
:param obj_fmaps:
:param im_inds:
:param obj_labels:
:param boxes:
:return:
"""
ranking_scores = self.ranking_net(union_feats)
return ranking_scores
def get_size_maps(size, boxes_int, form='rect'):
h, w = size
#boxes_int = bbox.long()
boxes_w, boxes_h = boxes_int[:,
2] - boxes_int[:,
0] + 1, boxes_int[:,
3] - boxes_int[:,
1] + 1
#boxes_w, boxes_h = bbox[:, 2] - bbox[:, 0] + 1, bbox[:, 3] - boxes_int[:, 1] + 1
areas = boxes_w * boxes_h
areas_ratios = areas.float() / (h * w)
##TODO: maybe there exists better area maps
# sigma1 = boxes_w / 6
# sigma2 = boxes_h / 6
# mus = torch.cat(((boxes_int[:, 0] + boxes_w // 2)[:, None], (boxes_int[:, 1] + boxes_h // 2)[:, None]), dim=-1)
# x = torch.arange(0, w).long().to(bbox.device)
# y = torch.arange(0, h).long().to(bbox.device)
# xx, yy = torch.meshgrid(x, y)
# # evaluate kernels at grid points
# xys = torch.cat((xx.view(-1, 1), yy.view(-1, 1)), dim=-1)
# for sid, (box_int, areas_ratio, mu, sig1, sig2) in enumerate(zip(boxes_int, areas_ratios, mus, sigma1, sigma2)):
# xxyy = xys.clone()
# xxyy -= mu
# x_term = xxyy[:, 0] ** 2 / sig1 ** 2
# y_term = xxyy[:, 1] ** 2 / sig2 ** 2
# exp_value = - (x_term + y_term) / 2
# area_map = torch.exp(exp_value)
# area_map = area_map.view((h, w))
# area_map = area_map / area_map.max() * areas_ratio
return areas_ratios
def get_weak_key_rel_labels(det_result,
gt_result,
comb_factor=0.5,
area_form='rect'):
gt_bboxes = gt_result.bboxes
det_bboxes = det_result.bboxes
saliency_maps = det_result.saliency_maps
key_rel_labels = []
rel_pair_idxes = det_result.rel_pair_idxes
for rel_pair_idx, gt_bbox, det_bbox, saliency_map in zip(
rel_pair_idxes, gt_bboxes, det_bboxes, saliency_maps):
assert det_bbox.shape[0] == gt_bbox.shape[0]
det_bbox_int = det_bbox.clone()
det_bbox_int = det_bbox_int.long()
h, w = saliency_map.shape[1:]
det_bbox_int[:, 0::2] = torch.clamp(det_bbox_int[:, 0::2],
min=0,
max=w - 1)
det_bbox_int[:, 1::2] = torch.clamp(det_bbox_int[:, 1::2],
min=0,
max=h - 1)
object_saliency = torch.cat([
torch.mean(saliency_map[0, box[1]:box[3] + 1,
box[0]:box[2] + 1])[None]
for box in det_bbox_int
], 0).float().to(det_bbox_int.device)
object_area = get_size_maps(saliency_map.shape[1:], det_bbox_int,
area_form)
object_importance = object_saliency * comb_factor + (
1.0 - comb_factor) * object_area
pair_importance = object_importance[
rel_pair_idx[:, 0]] + object_importance[rel_pair_idx[:, 1]]
pair_importance = F.softmax(pair_importance)
key_rel_labels.append(pair_importance)
return key_rel_labels
| 8,696 | 41.014493 | 126 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/relation_util.py | # ---------------------------------------------------------------
# relation_util.py
# Set-up time: 2020/5/7 下午11:13
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import copy
from collections import defaultdict
# import anytree
import numpy as np
import torch
import torch.nn as nn
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from torch.nn import functional as F
class Result(object):
""" little container class for holding the detection result
od: object detector, rm: rel model"""
def __init__(
self,
bboxes=None, # gt bboxes / OD: det bboxes
dists=None, # OD: predicted dists
labels=None, # gt labels / OD: det labels
masks=None, # gt masks / OD: predicted masks
formatted_masks=None, # OD: Transform the masks for object detection evaluation
points=None, # gt points / OD: predicted points
rels=None, # gt rel triplets / OD: sampled triplets (training) with target rel labels
key_rels=None, # gt key rels
relmaps=None, # gt relmaps
refine_bboxes=None, # RM: refined object bboxes (score is changed)
formatted_bboxes=None, # OD: Transform the refine_bboxes for object detection evaluation
refine_scores=None, # RM: refined object scores (before softmax)
refine_dists=None, # RM: refined object dists (after softmax)
refine_labels=None, # RM: refined object labels
target_labels=None, # RM: assigned object labels for training the relation module.
rel_scores=None, # RM: predicted relation scores (before softmax)
rel_dists=None, # RM: predicted relation prob (after softmax)
triplet_scores=None, # RM: predicted triplet scores (the multiplication of sub-obj-rel scores)
ranking_scores=None, # RM: predicted ranking scores for rank the triplet
rel_pair_idxes=None, # gt rel_pair_idxes / RM: training/testing sampled rel_pair_idxes
rel_labels=None, # gt rel_labels / RM: predicted rel labels
target_rel_labels=None, # RM: assigned target rel labels
target_key_rel_labels=None, # RM: assigned target key rel labels
saliency_maps=None, # SAL: predicted or gt saliency map
attrs=None, # gt attr
rel_cap_inputs=None, # gt relational caption inputs
rel_cap_targets=None, # gt relational caption targets
rel_ipts=None, # gt relational importance scores
tgt_rel_cap_inputs=None, # RM: assigned target relational caption inputs
tgt_rel_cap_targets=None, # RM: assigned target relational caption targets
tgt_rel_ipts=None, # RM: assigned target relational importance scores
rel_cap_scores=None, # RM: predicted relational caption scores
rel_cap_seqs=None, # RM: predicted relational seqs
rel_cap_sents=None, # RM: predicted relational decoded captions
rel_ipt_scores=None, # RM: predicted relational caption ipt scores
cap_inputs=None,
cap_targets=None,
cap_scores=None,
cap_scores_from_triplet=None,
alphas=None,
rel_distribution=None,
obj_distribution=None,
word_obj_distribution=None,
cap_seqs=None,
cap_sents=None,
img_shape=None,
scenes=None, # gt scene labels
target_scenes=None, # target_scene labels
add_losses=None, # For Recording the loss except for final object loss and rel loss, e.g.,
# use in causal head or VCTree, for recording auxiliary loss
head_spec_losses=None, # For method-specific loss
pan_results=None,
):
self.__dict__.update(locals())
del self.__dict__['self']
def is_none(self):
return all(
[v is None for k, v in self.__dict__.items() if k != 'self'])
# HACK: To turn this object into an iterable
def __len__(self):
return 1
# HACK:
def __getitem__(self, i):
return self
# HACK:
def __iter__(self):
yield self
class PostProcessor(nn.Module):
"""Obtain the final relation information for evaluation."""
def __init__(self):
"""
Arguments:
"""
super(PostProcessor, self).__init__()
def forward(self, det_result, key_first=False):
"""
Arguments:
det_result
Returns:
det_result: add the
"""
if det_result.refine_scores is None:
return det_result
relation_logits, finetune_obj_logits = det_result.rel_scores, det_result.refine_scores
rel_pair_idxes = det_result.rel_pair_idxes
ranking_scores = det_result.ranking_scores
finetune_labels, finetune_dists, finetune_bboxes, \
rels, rel_dists, prop_rel_pair_idxes, prop_rel_labels, prop_rel_scores, triplet_scores = \
[], [], [], [], [], [], [], [], []
prop_ranking_scores = None if ranking_scores is None else []
for i, (rel_logit, obj_logit, rel_pair_idx, bbox) in enumerate(
zip(relation_logits, finetune_obj_logits, rel_pair_idxes,
det_result.bboxes)):
obj_class_prob = F.softmax(obj_logit, -1)
obj_class_prob[:, 0] = 0 # set background score to 0
num_obj_bbox = obj_class_prob.shape[0]
obj_scores, obj_pred = obj_class_prob[:, 1:].max(dim=1)
obj_pred = obj_pred + 1
assert obj_scores.shape[0] == num_obj_bbox
obj_class = obj_pred
finetune_labels.append(obj_class)
finetune_dists.append(obj_class_prob)
if bbox.shape[1] == 4:
bbox = torch.cat((bbox, obj_scores[:, None]), dim=-1)
else:
bbox[:, -1] = obj_scores
finetune_bboxes.append(bbox)
# sorting triples according to score production
obj_scores0 = obj_scores[rel_pair_idx[:, 0]]
obj_scores1 = obj_scores[rel_pair_idx[:, 1]]
rel_class_prob = F.softmax(rel_logit, -1)
rel_scores, rel_class = rel_class_prob[:, 1:].max(dim=1)
rel_class = rel_class + 1
# TODO Kaihua: how about using weighted some here? e.g. rel*1 + obj *0.8 + obj*0.8
triple_scores = rel_scores * obj_scores0 * obj_scores1
if key_first and ranking_scores is not None:
triple_scores *= ranking_scores[i]
_, sorting_idx = torch.sort(triple_scores.view(-1),
dim=0,
descending=True)
triple_scores = triple_scores.view(-1)[sorting_idx].contiguous()
rel_pair_idx = rel_pair_idx[sorting_idx]
rel_class_prob = rel_class_prob[sorting_idx]
rel_labels = rel_class[sorting_idx]
rel_logit = rel_logit[sorting_idx]
if key_first and ranking_scores is not None:
prop_ranking_scores.append(ranking_scores[i][sorting_idx])
prop_rel_pair_idxes.append(rel_pair_idx)
prop_rel_labels.append(rel_labels)
prop_rel_scores.append(rel_logit)
rel = torch.cat((rel_pair_idx, rel_labels[:, None]), dim=-1)
rels.append(rel)
rel_dists.append(rel_class_prob)
triplet_scores.append(triple_scores)
det_result.refine_bboxes = finetune_bboxes
det_result.refine_dists = finetune_dists
det_result.refine_labels = finetune_labels
det_result.rels = rels
det_result.rel_dists = rel_dists
det_result.rel_pair_idxes = prop_rel_pair_idxes
det_result.triplet_scores = triplet_scores
det_result.rel_labels = prop_rel_labels
det_result.rel_scores = prop_rel_scores
det_result.ranking_scores = prop_ranking_scores
return det_result
class DemoPostProcessor(object):
"""This API is used for obtaining the final information for demonstrating
the scene graphs.
It's usually invoked after the PostProcessor. Especially applying NMS to
suppress the repetition.
"""
def __init__(self):
super(DemoPostProcessor, self).__init__()
def filter_AB_rels(self, det_result):
new_rel_pair_idxes = []
rel_pair_idxes = det_result.rel_pair_idxes
keep_rel_idxes = []
for idx, pair in enumerate(rel_pair_idxes):
subj, obj = pair[0], pair[1]
pair = pair.tolist()
if pair in new_rel_pair_idxes or [obj, subj] in new_rel_pair_idxes:
continue
new_rel_pair_idxes.append(pair)
keep_rel_idxes.append(idx)
new_rel_pair_idxes = np.array(new_rel_pair_idxes).astype(np.int32)
det_result.rel_pair_idxes = new_rel_pair_idxes
det_result.rel_labels = det_result.rel_labels[keep_rel_idxes]
if len(keep_rel_idxes) > 0:
det_result.rels = np.hstack(
(det_result.rel_pair_idxes, det_result.rel_labels[:, None]))
else:
det_result.rels = np.array([]).astype(np.int32)
det_result.rel_dists = det_result.rel_dists[keep_rel_idxes]
det_result.triplet_scores = det_result.triplet_scores[keep_rel_idxes]
return det_result
def filter_rels_by_duplicated_names(self, det_result):
new_rel_pair_idxes = []
rel_pair_idxes = det_result.rel_pair_idxes
refine_labels = det_result.refine_labels
keep_rel_idxes = []
for idx, pair in enumerate(rel_pair_idxes):
subj, obj = pair[0], pair[1]
if refine_labels[subj] == refine_labels[obj]:
continue
new_rel_pair_idxes.append(pair)
keep_rel_idxes.append(idx)
new_rel_pair_idxes = np.array(new_rel_pair_idxes).astype(np.int32)
det_result.rel_pair_idxes = new_rel_pair_idxes
det_result.rel_labels = det_result.rel_labels[keep_rel_idxes]
if len(keep_rel_idxes) > 0:
det_result.rels = np.hstack(
(det_result.rel_pair_idxes, det_result.rel_labels[:, None]))
else:
det_result.rels = np.array([]).astype(np.int32)
det_result.rel_dists = det_result.rel_dists[keep_rel_idxes]
det_result.triplet_scores = det_result.triplet_scores[keep_rel_idxes]
return det_result
def filter_nonoverlap_rels(self, det_result, must_overlap_predicates=None):
refine_bboxes = det_result.refine_bboxes
refine_labels = det_result.refine_labels
ious = bbox_overlaps(refine_bboxes[:, :-1], refine_bboxes[:, :-1])
# refine_logits = det_result.refine_scores # N * (C+1)
new_rel_pair_idxes = []
rel_pair_idxes = det_result.rel_pair_idxes
rel_labels = det_result.rel_labels
rel_dists = det_result.rel_dists
keep_rel_idxes = []
for idx, (pair, predicate) in enumerate(zip(rel_pair_idxes,
rel_labels)):
subj, obj = pair[0], pair[1]
iou = ious[subj, obj]
if must_overlap_predicates is not None and predicate in must_overlap_predicates and iou <= 0:
continue
new_rel_pair_idxes.append(pair)
keep_rel_idxes.append(idx)
new_rel_pair_idxes = np.array(new_rel_pair_idxes).astype(np.int32)
det_result.rel_pair_idxes = new_rel_pair_idxes
det_result.rel_labels = det_result.rel_labels[keep_rel_idxes]
if len(keep_rel_idxes) > 0:
det_result.rels = np.hstack(
(det_result.rel_pair_idxes, det_result.rel_labels[:, None]))
else:
det_result.rels = np.array([]).astype(np.int32)
det_result.rel_dists = det_result.rel_dists[keep_rel_idxes]
det_result.triplet_scores = det_result.triplet_scores[keep_rel_idxes]
return det_result
def filter_duplicate_triplets(self, det_result, vocab_objects,
vocab_predicates):
all_triplets = []
new_rel_pair_idxes = []
refine_labels = det_result.refine_labels
rel_pair_idxes = det_result.rel_pair_idxes
rel_labels = det_result.rel_labels
rel_dists = det_result.rel_dists
keep_rel_idxes = []
for idx, (pair, predicate) in enumerate(zip(rel_pair_idxes,
rel_labels)):
triplet = [
vocab_objects[refine_labels[pair[0]]],
vocab_predicates[predicate],
vocab_objects[refine_labels[pair[1]]]
]
if triplet in all_triplets:
continue
new_rel_pair_idxes.append(pair)
keep_rel_idxes.append(idx)
all_triplets.append(triplet)
new_rel_pair_idxes = np.array(new_rel_pair_idxes).astype(np.int32)
det_result.rel_pair_idxes = new_rel_pair_idxes
det_result.rel_labels = det_result.rel_labels[keep_rel_idxes]
if len(keep_rel_idxes) > 0:
det_result.rels = np.hstack(
(det_result.rel_pair_idxes, det_result.rel_labels[:, None]))
else:
det_result.rels = np.array([]).astype(np.int32)
det_result.rel_dists = det_result.rel_dists[keep_rel_idxes]
det_result.triplet_scores = det_result.triplet_scores[keep_rel_idxes]
return det_result
def filter_rels_by_num(self, det_result, num):
det_result.rel_pair_idxes = det_result.rel_pair_idxes[:num]
det_result.rel_labels = det_result.rel_labels[:num]
if len(det_result.rel_labels) > 0:
det_result.rels = np.hstack(
(det_result.rel_pair_idxes, det_result.rel_labels[:, None]))
else:
det_result.rels = np.array([]).astype(np.int32)
det_result.rel_dists = det_result.rel_dists[:num]
det_result.triplet_scores = det_result.triplet_scores[:num]
return det_result
def filtered_rels_by_mincover(self, det_result):
new_rel_pair_idxes = []
rel_pair_idxes = det_result.rel_pair_idxes
rel_labels = det_result.rel_labels
rel_dists = det_result.rel_dists
keep_rel_idxes = []
covered_objects = []
for idx, (pair, predicate) in enumerate(zip(rel_pair_idxes,
rel_labels)):
if pair[0] in covered_objects and pair[1] in covered_objects:
continue
if pair[0] not in covered_objects:
covered_objects.append(pair[0])
if pair[1] not in covered_objects:
covered_objects.append(pair[1])
new_rel_pair_idxes.append(pair)
keep_rel_idxes.append(idx)
new_rel_pair_idxes = np.array(new_rel_pair_idxes).astype(np.int32)
det_result.rel_pair_idxes = new_rel_pair_idxes
det_result.rel_labels = det_result.rel_labels[keep_rel_idxes]
if len(keep_rel_idxes) > 0:
det_result.rels = np.hstack(
(det_result.rel_pair_idxes, det_result.rel_labels[:, None]))
else:
det_result.rels = np.array([]).astype(np.int32)
det_result.rel_dists = det_result.rel_dists[keep_rel_idxes]
det_result.triplet_scores = det_result.triplet_scores[keep_rel_idxes]
return det_result
def clean_relations_via_objects(self, keep_obj_ids, det_result):
det_result.refine_labels = det_result.refine_labels[keep_obj_ids]
det_result.refine_bboxes = det_result.refine_bboxes[keep_obj_ids]
det_result.refine_dists = det_result.refine_dists[keep_obj_ids]
old_to_new = dict(
zip(keep_obj_ids.tolist(), list(range(len(keep_obj_ids)))))
new_rel_pair_idxes = []
rel_pair_idxes = det_result.rel_pair_idxes
keep_rel_idxes = []
for idx, rel in enumerate(rel_pair_idxes):
if rel[0] not in keep_obj_ids or rel[1] not in keep_obj_ids:
continue
new_rel_pair_idxes.append([old_to_new[rel[0]], old_to_new[rel[1]]])
keep_rel_idxes.append(idx)
new_rel_pair_idxes = np.array(new_rel_pair_idxes).astype(np.int32)
det_result.rel_pair_idxes = new_rel_pair_idxes
det_result.rel_labels = det_result.rel_labels[keep_rel_idxes]
if len(keep_rel_idxes) > 0:
det_result.rels = np.hstack(
(det_result.rel_pair_idxes, det_result.rel_labels[:, None]))
else:
det_result.rels = np.array([]).astype(np.int32)
det_result.rel_dists = det_result.rel_dists[keep_rel_idxes]
det_result.triplet_scores = det_result.triplet_scores[keep_rel_idxes]
return det_result
def forward(self,
det_result,
vocab_objects,
vocab_predicates,
object_thres=0.01,
nms_thres=0.1,
ignore_classes=None,
must_overlap_predicates=None,
max_rel_num=None):
# TODO: Here we only process the box. Any other things related with box, e.g., masks, points are not processed yet.
# directly ignore objects:
keep_obj_ids = np.where(
np.isin(det_result.refine_labels, ignore_classes) == 0)[0]
det_result = self.clean_relations_via_objects(keep_obj_ids, det_result)
if len(keep_obj_ids) == 0:
return det_result
# apply NMS
nms_keep_obj_ids, gathered = multiclass_nms_for_cluster(
det_result.refine_bboxes[:, :-1],
det_result.refine_bboxes[:, -1],
det_result.refine_labels,
nms_thres=nms_thres)
det_result = self.clean_relations_via_objects(nms_keep_obj_ids,
det_result)
if len(nms_keep_obj_ids) == 0:
return det_result
# NOTE: This may be not necessary: Suppress the low-score objects
score_keep_obj_ids = np.where(
det_result.refine_bboxes[:, -1] >= object_thres)[0]
det_result = self.clean_relations_via_objects(score_keep_obj_ids,
det_result)
if len(score_keep_obj_ids) == 0:
return det_result
# Filter the A-B & B-A pairs, keep the pairs with higher scores
det_result = self.filter_AB_rels(det_result)
# Filter the rels whose pairs must be overlapped
det_result = self.filter_nonoverlap_rels(det_result,
must_overlap_predicates)
# Filter the duplicate triplets
det_result = self.filter_duplicate_triplets(det_result, vocab_objects,
vocab_predicates)
# Filter the rels by min cover
det_result = self.filtered_rels_by_mincover(det_result)
# Filter the rel pairs with the same subj-obj names
det_result = self.filter_rels_by_duplicated_names(det_result)
# Control the number of the relations
num_obj = det_result.refine_bboxes.shape[0]
rel_num = max_rel_num if max_rel_num is not None else int(
num_obj * (num_obj - 1) / 2 - num_obj)
det_result = self.filter_rels_by_num(det_result, rel_num)
return det_result
def get_box_info(boxes, need_norm=True, size=None):
"""
input: [batch_size, (x1,y1,x2,y2)]
size: [h, w]
output: [batch_size, (x1,y1,x2,y2,cx,cy,w,h)]
"""
wh = boxes[:, 2:4] - boxes[:, :2] + 1.0
center_box = torch.cat((boxes[:, :2] + 0.5 * wh, wh), 1)
box_info = torch.cat((boxes, center_box), 1)
if need_norm:
box_info = box_info / float(max(max(size[0], size[1]), 100))
return box_info
def get_box_pair_info(box1, box2):
"""
input:
box1 [batch_size, (x1,y1,x2,y2,cx,cy,w,h)]
box2 [batch_size, (x1,y1,x2,y2,cx,cy,w,h)]
output:
32-digits: [box1, box2, unionbox, intersectionbox]
"""
# union box
unionbox = box1[:, :4].clone()
unionbox[:, 0] = torch.min(box1[:, 0], box2[:, 0])
unionbox[:, 1] = torch.min(box1[:, 1], box2[:, 1])
unionbox[:, 2] = torch.max(box1[:, 2], box2[:, 2])
unionbox[:, 3] = torch.max(box1[:, 3], box2[:, 3])
union_info = get_box_info(unionbox, need_norm=False)
# intersection box
intersextion_box = box1[:, :4].clone()
intersextion_box[:, 0] = torch.max(box1[:, 0], box2[:, 0])
intersextion_box[:, 1] = torch.max(box1[:, 1], box2[:, 1])
intersextion_box[:, 2] = torch.min(box1[:, 2], box2[:, 2])
intersextion_box[:, 3] = torch.min(box1[:, 3], box2[:, 3])
case1 = torch.nonzero(intersextion_box[:, 2].contiguous().view(
-1) < intersextion_box[:, 0].contiguous().view(-1)).view(-1)
case2 = torch.nonzero(intersextion_box[:, 3].contiguous().view(
-1) < intersextion_box[:, 1].contiguous().view(-1)).view(-1)
intersextion_info = get_box_info(intersextion_box, need_norm=False)
if case1.numel() > 0:
intersextion_info[case1, :] = 0
if case2.numel() > 0:
intersextion_info[case2, :] = 0
return torch.cat((box1, box2, union_info, intersextion_info), 1)
def group_regions(result, prior_pairs, thres=0.9):
"""
Arguments:
result: (Result object)
prior_pairs: (List[list]): candidate pair that may be a group
obj_classes: (List): including the background
Returns:
dict: describing the region governing hierarchy
"""
# NOTE: Extract the RM refined ones.
bboxes, obj_labels = result.refine_bboxes, result.refine_labels
region_groups = []
for boxes, labels in zip(bboxes, obj_labels):
if isinstance(boxes, torch.Tensor):
boxes_np = boxes.cpu().numpy()
else:
boxes_np = boxes.copy()
num_obj = len(boxes_np)
if num_obj == 0:
region_groups.append(None)
continue
box_areas = (boxes_np[:, 2] - boxes_np[:, 0] +
1) * (boxes_np[:, 3] - boxes_np[:, 1] + 1)
intersect = bbox_overlaps(boxes, boxes, mode='iof')
if isinstance(labels, torch.Tensor):
labels_np = labels.cpu().numpy()
else:
labels_np = labels.copy()
region_group = defaultdict(list)
for i in range(num_obj):
for j in range(i):
subj_cls, obj_cls = labels_np[i], labels_np[j]
subj_area, obj_area = box_areas[i], box_areas[j]
if [subj_cls, obj_cls] in prior_pairs:
# this pair maybe the group ones, check the position
if subj_area > obj_area:
if intersect[j, i] > thres:
if j in region_group:
region_group[i] = list(
set(region_group[i] + region_group[j] +
[j]))
else:
region_group[i].append(j)
else:
if intersect[i, j] > thres:
if i in region_group:
region_group[j] = list(
set(region_group[j] + region_group[i] +
[i]))
region_group[j].append(i)
region_groups.append(dict(region_group))
return region_groups
def get_internal_labels(leaf_labels, hierarchy, vocab):
leaf_labels_np = leaf_labels.cpu().numpy()
internal_labels = [[] for _ in leaf_labels_np]
for idx, leaf_label in enumerate(leaf_labels_np):
leaf_name = vocab[leaf_label]
start_node = anytree.search.find(hierarchy,
lambda node: node.id == leaf_name)
iter_node = start_node
while iter_node.parent is not None:
iter_node = iter_node.parent
internal_labels[idx].append(vocab.index(iter_node.id))
internal_labels[idx] = torch.from_numpy(
internal_labels[idx]).to(leaf_labels)
return internal_labels
def get_pattern_labels(leaf_labels, hierarchy, vocab):
pattern_labels = []
for idx, leaf_label in enumerate(leaf_labels):
leaf_name = vocab[leaf_label]
start_node = anytree.search.find(hierarchy,
lambda node: node.id == leaf_name)
iter_node = start_node
while iter_node.parent.id != 'Root':
iter_node = iter_node.parent
pattern_labels.append(vocab.index(iter_node.id))
pattern_labels = np.array(pattern_labels, dtype=np.int32)
return pattern_labels
def _topdown_hook(root, output_vector, input_vector, vocab, reduce='avg'):
if len(root.children) == 0:
if root.id == 'Root':
return output_vector
else:
output_vector[vocab.index(root.id)] = input_vector[vocab.index(
root.id)]
return output_vector
else:
gather_values = []
for c in root.children:
output_vector = _topdown_hook(c, output_vector, input_vector,
vocab, reduce)
gather_values.append(output_vector[vocab.index(c.id)][None])
if reduce == 'avg':
op = torch.mean
elif reduce == 'sum':
op = torch.sum
elif reduce == 'max':
op = torch.max
elif reduce == 'min':
op = torch.min
else:
raise NotImplementedError
if root.id == 'Root':
return output_vector
else:
output_vector[vocab.index(root.id)] = op(torch.cat(gather_values))
return output_vector
def top_down_induce(x, hierarchy, vocab, reduce='avg', solveroot=None):
"""The first n elements of vector belong the the first n elements of vocab.
trick: the input vector name must be "x"!!!!
"""
vocab_vec = torch.zeros((x.shape[0], len(vocab))).to(x)
vocab_vec[:, :x.shape[1]] = x
if solveroot is not None:
for i in range(x.shape[1], len(vocab)):
vocab_vec[:, i] = eval(solveroot[i])
else:
vocab_vec = _topdown_hook(hierarchy,
vocab_vec,
x,
vocab,
reduce=reduce)
vocab_vec += 1e-7
return vocab_vec
def multiclass_nms_for_cluster(multi_bboxes,
multi_scores,
labels,
nms_thres=0.5):
"""NMS for multi-class bboxes.
Args:
multi_bboxes (np.array): shape (n, #class*4) or (n, 4)
multi_scores (np.array): shape (n, ),
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_cfg (float): NMS IoU threshold
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept.
Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels
are 0-based.
"""
# Modified from https://github.com/pytorch/vision/blob
# /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = multi_bboxes.max()
offsets = labels * (max_coordinate + 1)
bboxes_for_nms = multi_bboxes + offsets[:, None]
order = np.argsort(multi_scores)[::-1]
num_box = len(multi_bboxes)
suppressed = np.zeros(num_box)
gathered = (np.ones(num_box) * -1).astype(np.int32)
ious = bbox_overlaps(bboxes_for_nms, bboxes_for_nms)
for i in range(num_box):
if suppressed[order[i]]:
continue
for j in range(i + 1, num_box):
if suppressed[order[j]]:
continue
iou = ious[order[i], order[j]]
if iou >= nms_thres:
suppressed[order[j]] = 1
gathered[order[j]] = order[i]
keep = np.where(suppressed == 0)[0]
return keep, gathered
| 28,570 | 41.707025 | 123 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/sampling.py | # ---------------------------------------------------------------
# sampling.py
# Set-up time: 2020/5/7 下午4:31
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import numpy as np
import numpy.random as npr
import torch
from mmdet.core import bbox_overlaps
from torch.nn import functional as F
# from maskrcnn_benchmark.modeling.box_coder import BoxCoder
# from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
# from maskrcnn_benchmark.modeling.utils import cat
class RelationSampler(object):
def __init__(self,
type,
pos_iou_thr,
require_overlap,
num_sample_per_gt_rel,
num_rel_per_image,
pos_fraction,
use_gt_box,
test_overlap=False,
key_sample=False):
self.type = type
self.pos_iou_thr = pos_iou_thr
self.require_overlap = require_overlap
self.num_sample_per_gt_rel = num_sample_per_gt_rel
self.num_rel_per_image = num_rel_per_image
self.pos_fraction = pos_fraction
self.use_gt_box = use_gt_box
self.test_overlap = test_overlap
self.key_sample = key_sample
def prepare_test_pairs(self, det_result):
# prepare object pairs for relation prediction
rel_pair_idxes = []
device = det_result.bboxes[0].device
for p in det_result.bboxes:
n = len(p)
cand_matrix = torch.ones(
(n, n), device=device) - torch.eye(n, device=device)
# mode==sgdet and require_overlap
# if (not self.use_gt_box) and self.test_overlap:
if self.test_overlap:
cand_matrix = cand_matrix.byte() & bbox_overlaps(
p[:, :4], p[:, :4]).gt(0).byte()
idxs = torch.nonzero(cand_matrix).view(-1, 2)
if len(idxs) > 0:
rel_pair_idxes.append(idxs)
else:
# if there is no candidate pairs, give a placeholder of [[0, 0]]
rel_pair_idxes.append(
torch.zeros((1, 2), dtype=torch.int64, device=device))
return rel_pair_idxes
def gtbox_relsample(self, det_result, gt_result):
assert self.use_gt_box
num_pos_per_img = int(self.num_rel_per_image * self.pos_fraction)
rel_idx_pairs = []
rel_labels = []
rel_sym_binarys = []
key_rel_labels = []
bboxes, labels = det_result.bboxes, det_result.labels
gt_bboxes, gt_labels, gt_relmaps, gt_rels, gt_keyrels = gt_result.bboxes, gt_result.labels, gt_result.relmaps, \
gt_result.rels, gt_result.key_rels
device = bboxes[0].device
if gt_keyrels is None:
gt_keyrels = [None] * len(gt_bboxes)
for img_id, (prp_box, prp_lab, tgt_box, tgt_lab, tgt_rel_matrix,
tgt_rel, tgt_keyrel) in enumerate(
zip(bboxes, labels, gt_bboxes, gt_labels, gt_relmaps,
gt_rels, gt_keyrels)):
num_prp = prp_box.shape[0]
assert num_prp == tgt_box.shape[0]
#tgt_pair_idxs = torch.nonzero(tgt_rel_matrix > 0)
tgt_pair_idxs = tgt_rel.long()[:, :2]
assert tgt_pair_idxs.shape[1] == 2
# generate the keyrel labels:
img_keyrel_labels = None
if tgt_keyrel is not None:
img_keyrel_labels = torch.zeros(
tgt_pair_idxs.shape[0]).long().to(tgt_pair_idxs.device)
img_keyrel_labels[tgt_keyrel.long()] = 1
# sort the rel pairs to coordinate with tgt_pair_idxs
#if tgt_keyrel is not None:
#perm = torch.from_numpy(np.lexsort((tgt_rel.cpu().numpy()[:, 1], tgt_rel.cpu().numpy()[:, 0]))).to(
# device)
#assert (torch.sum(tgt_rel[perm][:, :2] - tgt_pair_idxs) == 0)
#tgt_keyrel = tgt_keyrel[perm]
tgt_head_idxs = tgt_pair_idxs[:, 0].contiguous().view(-1)
tgt_tail_idxs = tgt_pair_idxs[:, 1].contiguous().view(-1)
tgt_rel_labs = tgt_rel.long()[:, -1].contiguous().view(-1)
# sym_binary_rels
binary_rel = torch.zeros((num_prp, num_prp), device=device).long()
binary_rel[tgt_head_idxs, tgt_tail_idxs] = 1
binary_rel[tgt_tail_idxs, tgt_head_idxs] = 1
rel_sym_binarys.append(binary_rel)
rel_possibility = torch.ones(
(num_prp, num_prp), device=device).long() - torch.eye(
num_prp, device=device).long()
rel_possibility[tgt_head_idxs, tgt_tail_idxs] = 0
rel_possibility[tgt_tail_idxs, tgt_head_idxs] = 0
tgt_bg_idxs = torch.nonzero(rel_possibility > 0)
# generate fg bg rel_pairs
if tgt_pair_idxs.shape[0] > num_pos_per_img:
perm = torch.randperm(tgt_pair_idxs.shape[0],
device=device)[:num_pos_per_img]
tgt_pair_idxs = tgt_pair_idxs[perm]
tgt_rel_labs = tgt_rel_labs[perm]
if img_keyrel_labels is not None:
img_keyrel_labels = img_keyrel_labels[perm]
num_fg = min(tgt_pair_idxs.shape[0], num_pos_per_img)
num_bg = self.num_rel_per_image - num_fg
perm = torch.randperm(tgt_bg_idxs.shape[0], device=device)[:num_bg]
tgt_bg_idxs = tgt_bg_idxs[perm]
img_rel_idxs = torch.cat((tgt_pair_idxs, tgt_bg_idxs), dim=0)
img_rel_labels = torch.cat(
(tgt_rel_labs.long(),
torch.zeros(tgt_bg_idxs.shape[0], device=device).long()),
dim=0).contiguous().view(-1)
if img_keyrel_labels is not None:
img_keyrel_labels = torch.cat(
(img_keyrel_labels.long(),
torch.ones(tgt_bg_idxs.shape[0], device=device).long() *
-1),
dim=0).contiguous().view(-1)
key_rel_labels.append(img_keyrel_labels)
rel_idx_pairs.append(img_rel_idxs)
rel_labels.append(img_rel_labels)
if self.key_sample:
return rel_labels, rel_idx_pairs, rel_sym_binarys, key_rel_labels
else:
return rel_labels, rel_idx_pairs, rel_sym_binarys
def detect_relsample(self, det_result, gt_result):
# corresponding to rel_assignments function in neural-motifs
"""
The input proposals are already processed by subsample function of box_head,
in this function, we should only care about fg box, and sample corresponding fg/bg relations
Note: this function keeps a state.
Arguments:
proposals (list[BoxList]) contain fields: labels, boxes(5 columns)
targets (list[BoxList]) contain fields: labels
"""
if self.type == 'Motif':
sampling_function = self.motif_rel_fg_bg_sampling
else:
raise NotImplementedError
bboxes, labels = det_result.bboxes, det_result.labels
gt_bboxes, gt_labels, gt_relmaps, gt_rels, gt_keyrels = gt_result.bboxes, gt_result.labels, gt_result.relmaps,\
gt_result.rels, gt_result.key_rels
device = bboxes[0].device
self.num_pos_per_img = int(self.num_rel_per_image * self.pos_fraction)
rel_idx_pairs = []
rel_labels = []
rel_sym_binarys = []
key_rel_labels = []
if gt_keyrels is None:
gt_keyrels = [None] * len(gt_bboxes)
for img_id, (prp_box, prp_lab, tgt_box, tgt_lab, tgt_rel_matrix,
tgt_rel, tgt_keyrel) in enumerate(
zip(bboxes, labels, gt_bboxes, gt_labels, gt_relmaps,
gt_rels, gt_keyrels)):
# IoU matching
ious = bbox_overlaps(tgt_box, prp_box[:, :4]) # [tgt, prp]
is_match = (tgt_lab[:, None] == prp_lab[None]) & (
ious > self.pos_iou_thr) # [tgt, prp]
# Proposal self IoU to filter non-overlap
prp_self_iou = bbox_overlaps(prp_box[:, :4],
prp_box[:, :4]) # [prp, prp]
if self.require_overlap and (not self.use_gt_box):
rel_possibility = (prp_self_iou > 0) & (
prp_self_iou < 1) # not self & intersect
else:
num_prp = prp_box.shape[0]
rel_possibility = torch.ones(
(num_prp, num_prp), device=device).long() - torch.eye(
num_prp, device=device).long()
# only select relations between fg proposals
rel_possibility[prp_lab == 0] = 0
rel_possibility[:, prp_lab == 0] = 0
img_rel_triplets, binary_rel = sampling_function(
device, tgt_rel_matrix, tgt_rel, tgt_keyrel, ious, is_match,
rel_possibility)
rel_idx_pairs.append(
img_rel_triplets[:, :2]) # (num_rel, 2), (sub_idx, obj_idx)
rel_labels.append(img_rel_triplets[:, 2]) # (num_rel, )
if tgt_keyrel is not None:
key_rel_labels.append(img_rel_triplets[:, -1])
rel_sym_binarys.append(binary_rel)
if self.key_sample:
return rel_labels, rel_idx_pairs, rel_sym_binarys, key_rel_labels
else:
return rel_labels, rel_idx_pairs, rel_sym_binarys
def motif_rel_fg_bg_sampling(self, device, tgt_rel_matrix, tgt_rel,
tgt_keyrel, ious, is_match, rel_possibility):
"""
prepare to sample fg relation triplet and bg relation triplet
tgt_rel_matrix: # [number_target, number_target]
ious: # [number_target, num_proposal]
is_match: # [number_target, num_proposal]
rel_possibility:# [num_proposal, num_proposal]
"""
tgt_pair_idxs = tgt_rel.long()[:, :2]
assert tgt_pair_idxs.shape[1] == 2
tgt_head_idxs = tgt_pair_idxs[:, 0].contiguous().view(-1)
tgt_tail_idxs = tgt_pair_idxs[:, 1].contiguous().view(-1)
tgt_rel_labs = tgt_rel.long()[:, -1].contiguous().view(-1)
# # sort the rel pairs to coordinate with tgt_pair_idxs
# if tgt_keyrel is not None:
# perm = torch.from_numpy(np.lexsort((tgt_rel.cpu().numpy()[:, 1]), tgt_rel.cpu().numpy()[:, 0])).to(
# device)
# assert (torch.sum(tgt_rel[perm][:, :2] - tgt_pair_idxs) == 0)
# tgt_keyrel = tgt_keyrel[perm]
# generate the keyrel labels:
img_keyrel_labels = None
if tgt_keyrel is not None:
img_keyrel_labels = torch.zeros(tgt_pair_idxs.shape[0]).long().to(
tgt_pair_idxs.device)
img_keyrel_labels[tgt_keyrel.long()] = 1
num_tgt_rels = tgt_rel_labs.shape[0]
# generate binary prp mask
num_prp = is_match.shape[-1]
binary_prp_head = is_match[
tgt_head_idxs] # num_tgt_rel, num_prp (matched prp head)
binary_prp_tail = is_match[
tgt_tail_idxs] # num_tgt_rel, num_prp (matched prp head)
binary_rel = torch.zeros((num_prp, num_prp), device=device).long()
fg_rel_triplets = []
for i in range(num_tgt_rels):
# generate binary prp mask
bi_match_head = torch.nonzero(binary_prp_head[i] > 0)
bi_match_tail = torch.nonzero(binary_prp_tail[i] > 0)
num_bi_head = bi_match_head.shape[0]
num_bi_tail = bi_match_tail.shape[0]
if num_bi_head > 0 and num_bi_tail > 0:
bi_match_head = bi_match_head.view(1, num_bi_head).expand(
num_bi_tail, num_bi_head).contiguous()
bi_match_tail = bi_match_tail.view(num_bi_tail, 1).expand(
num_bi_tail, num_bi_head).contiguous()
# binary rel only consider related or not, so its symmetric
binary_rel[bi_match_head.view(-1), bi_match_tail.view(-1)] = 1
binary_rel[bi_match_tail.view(-1), bi_match_head.view(-1)] = 1
tgt_head_idx = int(tgt_head_idxs[i])
tgt_tail_idx = int(tgt_tail_idxs[i])
tgt_rel_lab = int(tgt_rel_labs[i])
tgt_key_rel_lab = int(img_keyrel_labels[i]
) if img_keyrel_labels is not None else None
# find matching pair in proposals (might be more than one)
prp_head_idxs = torch.nonzero(is_match[tgt_head_idx]).squeeze(1)
prp_tail_idxs = torch.nonzero(is_match[tgt_tail_idx]).squeeze(1)
num_match_head = prp_head_idxs.shape[0]
num_match_tail = prp_tail_idxs.shape[0]
if num_match_head <= 0 or num_match_tail <= 0:
continue
# all combination pairs
prp_head_idxs = prp_head_idxs.view(-1, 1).expand(
num_match_head, num_match_tail).contiguous().view(-1)
prp_tail_idxs = prp_tail_idxs.view(1, -1).expand(
num_match_head, num_match_tail).contiguous().view(-1)
valid_pair = prp_head_idxs != prp_tail_idxs
if valid_pair.sum().item() <= 0:
continue
# remove self-pair
# remove selected pair from rel_possibility
prp_head_idxs = prp_head_idxs[valid_pair]
prp_tail_idxs = prp_tail_idxs[valid_pair]
rel_possibility[prp_head_idxs, prp_tail_idxs] = 0
# construct corresponding proposal triplets corresponding to i_th gt relation
fg_labels = torch.tensor([tgt_rel_lab] * prp_tail_idxs.shape[0],
dtype=torch.int64,
device=device).view(-1, 1)
fg_rel_i = torch.cat((prp_head_idxs.view(
-1, 1), prp_tail_idxs.view(-1, 1), fg_labels),
dim=-1).to(torch.int64)
if tgt_key_rel_lab is not None:
fg_key_labels = torch.tensor([tgt_key_rel_lab] *
prp_tail_idxs.shape[0],
dtype=torch.int64,
device=device).view(-1, 1)
fg_rel_i = torch.cat((fg_rel_i, fg_key_labels), dim=-1)
# select if too many corresponding proposal pairs to one pair of gt relationship triplet
# NOTE that in original motif, the selection is based on a ious_score score
if fg_rel_i.shape[0] > self.num_sample_per_gt_rel:
ious_score = (ious[tgt_head_idx, prp_head_idxs] *
ious[tgt_tail_idx, prp_tail_idxs]
).view(-1).detach().cpu().numpy()
ious_score = ious_score / ious_score.sum()
perm = npr.choice(ious_score.shape[0],
p=ious_score,
size=self.num_sample_per_gt_rel,
replace=False)
fg_rel_i = fg_rel_i[perm]
if fg_rel_i.shape[0] > 0:
fg_rel_triplets.append(fg_rel_i)
# select fg relations
if len(fg_rel_triplets) == 0:
col = 4 if self.key_sample else 3
fg_rel_triplets = torch.zeros((0, col),
dtype=torch.int64,
device=device)
else:
fg_rel_triplets = torch.cat(fg_rel_triplets, dim=0).to(torch.int64)
if fg_rel_triplets.shape[0] > self.num_pos_per_img:
perm = torch.randperm(fg_rel_triplets.shape[0],
device=device)[:self.num_pos_per_img]
fg_rel_triplets = fg_rel_triplets[perm]
# select bg relations
bg_rel_inds = torch.nonzero(rel_possibility > 0).view(-1, 2)
bg_rel_labs = torch.zeros(bg_rel_inds.shape[0],
dtype=torch.int64,
device=device)
bg_rel_triplets = torch.cat((bg_rel_inds, bg_rel_labs.view(-1, 1)),
dim=-1).to(torch.int64)
if self.key_sample:
bg_key_labels = torch.tensor(bg_rel_inds.shape[0],
dtype=torch.int64,
device=device).fill_(-1).view(-1, 1)
bg_rel_triplets = torch.cat((bg_rel_triplets, bg_key_labels),
dim=-1)
num_neg_per_img = min(
self.num_rel_per_image - fg_rel_triplets.shape[0],
bg_rel_triplets.shape[0])
if bg_rel_triplets.shape[0] > 0:
perm = torch.randperm(bg_rel_triplets.shape[0],
device=device)[:num_neg_per_img]
bg_rel_triplets = bg_rel_triplets[perm]
else:
bg_rel_triplets = torch.zeros((0, 4 if self.key_sample else 3),
dtype=torch.int64,
device=device)
# if both fg and bg is none
if fg_rel_triplets.shape[0] == 0 and bg_rel_triplets.shape[0] == 0:
col = 4 if self.key_sample else 3
bg_rel_triplets = torch.zeros((1, col),
dtype=torch.int64,
device=device)
if col == 4:
bg_rel_triplets[0, -1] = -1
return torch.cat((fg_rel_triplets, bg_rel_triplets), dim=0), binary_rel
| 18,035 | 46.968085 | 120 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/motif.py | # ---------------------------------------------------------------
# motif.py
# Set-up time: 2020/5/4 下午4:31
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from mmcv.cnn import kaiming_init
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import PackedSequence
from .motif_util import (block_orthogonal, center_x, encode_box_info,
get_dropout_mask, obj_edge_vectors, sort_by_score,
to_onehot)
class FrequencyBias(nn.Module):
"""The goal of this is to provide a simplified way of computing
P(predicate.
| obj1, obj2, img).
"""
def __init__(self, cfg, statistics, eps=1e-3):
super(FrequencyBias, self).__init__()
pred_dist = statistics['pred_dist'].float()
assert pred_dist.size(0) == pred_dist.size(1)
self.num_objs = pred_dist.size(0)
self.num_rels = pred_dist.size(2)
pred_dist = pred_dist.view(-1, self.num_rels)
self.obj_baseline = nn.Embedding(self.num_objs * self.num_objs,
self.num_rels)
with torch.no_grad():
self.obj_baseline.weight.copy_(pred_dist, non_blocking=True)
def index_with_labels(self, labels):
"""
:param labels: [batch_size, 2]
:return:
"""
ret = self.obj_baseline(labels[:, 0] * self.num_objs + labels[:, 1])
if ret.isnan().any():
print('motif: nan')
return ret
def index_with_probability(self, pair_prob):
"""
:param labels: [batch_size, num_obj, 2]
:return:
"""
batch_size, num_obj, _ = pair_prob.shape
joint_prob = pair_prob[:, :, 0].contiguous().view(
batch_size, num_obj, 1) * pair_prob[:, :, 1].contiguous().view(
batch_size, 1, num_obj)
return joint_prob.view(batch_size,
num_obj * num_obj) @ self.obj_baseline.weight
def forward(self, labels):
# implement through index_with_labels
return self.index_with_labels(labels)
class DecoderRNN(nn.Module):
def __init__(self, config, obj_classes, embed_dim, inputs_dim, hidden_dim,
rnn_drop):
super(DecoderRNN, self).__init__()
self.cfg = config
self.obj_classes = obj_classes
self.embed_dim = embed_dim
obj_embed_vecs = obj_edge_vectors(['start'] + self.obj_classes,
wv_dir=self.cfg.glove_dir,
wv_dim=embed_dim)
self.obj_embed = nn.Embedding(len(self.obj_classes) + 1, embed_dim)
with torch.no_grad():
self.obj_embed.weight.copy_(obj_embed_vecs, non_blocking=True)
self.hidden_size = hidden_dim
self.inputs_dim = inputs_dim
self.input_size = self.inputs_dim + self.embed_dim
self.nms_thresh = 0.5
self.rnn_drop = rnn_drop
self.input_linearity = torch.nn.Linear(self.input_size,
6 * self.hidden_size,
bias=True)
self.state_linearity = torch.nn.Linear(self.hidden_size,
5 * self.hidden_size,
bias=True)
self.out_obj = nn.Linear(self.hidden_size, len(self.obj_classes))
def init_weights(self):
# Use sensible default initializations for parameters.
block_orthogonal(self.input_linearity.weight.data,
[self.hidden_size, self.input_size])
block_orthogonal(self.state_linearity.weight.data,
[self.hidden_size, self.hidden_size])
self.state_linearity.bias.data.fill_(0.0)
# Initialize forget gate biases to 1.0 as per An Empirical
# Exploration of Recurrent Network Architectures, (Jozefowicz, 2015).
self.state_linearity.bias.data[self.hidden_size:2 *
self.hidden_size].fill_(1.0)
self.input_linearity.bias.data.fill_(0.0)
self.input_linearity.bias.data[self.hidden_size:2 *
self.hidden_size].fill_(1.0)
def lstm_equations(self,
timestep_input,
previous_state,
previous_memory,
dropout_mask=None):
"""Does the hairy LSTM math.
:param timestep_input:
:param previous_state:
:param previous_memory:
:param dropout_mask:
:return:
"""
# Do the projections for all the gates all at once.
projected_input = self.input_linearity(timestep_input)
projected_state = self.state_linearity(previous_state)
# Main LSTM equations using relevant chunks of the big linear
# projections of the hidden state and inputs.
input_gate = torch.sigmoid(
projected_input[:, 0 * self.hidden_size:1 * self.hidden_size] +
projected_state[:, 0 * self.hidden_size:1 * self.hidden_size])
forget_gate = torch.sigmoid(
projected_input[:, 1 * self.hidden_size:2 * self.hidden_size] +
projected_state[:, 1 * self.hidden_size:2 * self.hidden_size])
memory_init = torch.tanh(
projected_input[:, 2 * self.hidden_size:3 * self.hidden_size] +
projected_state[:, 2 * self.hidden_size:3 * self.hidden_size])
output_gate = torch.sigmoid(
projected_input[:, 3 * self.hidden_size:4 * self.hidden_size] +
projected_state[:, 3 * self.hidden_size:4 * self.hidden_size])
memory = input_gate * memory_init + forget_gate * previous_memory
timestep_output = output_gate * torch.tanh(memory)
highway_gate = torch.sigmoid(
projected_input[:, 4 * self.hidden_size:5 * self.hidden_size] +
projected_state[:, 4 * self.hidden_size:5 * self.hidden_size])
highway_input_projection = projected_input[:, 5 * self.hidden_size:6 *
self.hidden_size]
timestep_output = highway_gate * timestep_output + (
1 - highway_gate) * highway_input_projection
# Only do dropout if the dropout prob is > 0.0 and we are in training mode.
if dropout_mask is not None and self.training:
timestep_output = timestep_output * dropout_mask
return timestep_output, memory
def forward(self,
inputs,
initial_state=None,
labels=None,
boxes_for_nms=None):
if not isinstance(inputs, PackedSequence):
raise ValueError('inputs must be PackedSequence but got %s' %
(type(inputs)))
assert isinstance(inputs, PackedSequence)
sequence_tensor, batch_lengths, _, _ = inputs
batch_size = batch_lengths[0]
# We're just doing an LSTM decoder here so ignore states, etc
if initial_state is None:
previous_memory = sequence_tensor.new().resize_(
batch_size, self.hidden_size).fill_(0)
previous_state = sequence_tensor.new().resize_(
batch_size, self.hidden_size).fill_(0)
else:
assert len(initial_state) == 2
previous_memory = initial_state[1].squeeze(0)
previous_state = initial_state[0].squeeze(0)
previous_obj_embed = self.obj_embed.weight[0, None].expand(
batch_size, self.embed_dim)
if self.rnn_drop > 0.0:
dropout_mask = get_dropout_mask(self.rnn_drop,
previous_memory.size(),
previous_memory.device)
else:
dropout_mask = None
# Only accumulating label predictions here, discarding everything else
out_dists = []
out_commitments = []
end_ind = 0
for i, l_batch in enumerate(batch_lengths):
start_ind = end_ind
end_ind = end_ind + l_batch
if previous_memory.size(0) != l_batch:
previous_memory = previous_memory[:l_batch]
previous_state = previous_state[:l_batch]
previous_obj_embed = previous_obj_embed[:l_batch]
if dropout_mask is not None:
dropout_mask = dropout_mask[:l_batch]
timestep_input = torch.cat(
(sequence_tensor[start_ind:end_ind], previous_obj_embed), 1)
previous_state, previous_memory = self.lstm_equations(
timestep_input,
previous_state,
previous_memory,
dropout_mask=dropout_mask)
pred_dist = self.out_obj(previous_state)
out_dists.append(pred_dist)
if self.training:
labels_to_embed = labels[start_ind:end_ind].clone()
# Whenever labels are 0 set input to be our max prediction
nonzero_pred = pred_dist[:, 1:].max(1)[1] + 1
is_bg = (labels_to_embed == 0).nonzero()
if is_bg.dim() > 0:
labels_to_embed[is_bg.squeeze(1)] = nonzero_pred[
is_bg.squeeze(1)]
out_commitments.append(labels_to_embed)
previous_obj_embed = self.obj_embed(labels_to_embed + 1)
else:
# assert l_batch == 1
out_dist_sample = F.softmax(pred_dist, dim=1)
best_ind = out_dist_sample[:, 1:].max(1)[1] + 1
out_commitments.append(best_ind)
previous_obj_embed = self.obj_embed(best_ind + 1)
out_commitments = torch.cat(out_commitments, 0)
return torch.cat(out_dists, 0), out_commitments
class LSTMContext(nn.Module):
"""Modified from neural-motifs to encode contexts for each objects."""
def __init__(self, config, obj_classes, rel_classes):
super(LSTMContext, self).__init__()
self.cfg = config
self.obj_classes = obj_classes
self.rel_classes = rel_classes
self.num_obj_classes = len(obj_classes)
in_channels = self.cfg.roi_dim
self.use_gt_box = self.cfg.use_gt_box
self.use_gt_label = self.cfg.use_gt_label
# mode
if self.cfg.use_gt_box:
if self.cfg.use_gt_label:
self.mode = 'predcls'
else:
self.mode = 'sgcls'
else:
self.mode = 'sgdet'
# word embedding
self.embed_dim = self.cfg.embed_dim
self.obj_embed1 = nn.Embedding(self.num_obj_classes, self.embed_dim)
self.obj_embed2 = nn.Embedding(self.num_obj_classes, self.embed_dim)
obj_embed_vecs = obj_edge_vectors(self.obj_classes,
wv_dir=self.cfg.glove_dir,
wv_dim=self.embed_dim)
with torch.no_grad():
self.obj_embed1.weight.copy_(obj_embed_vecs, non_blocking=True)
self.obj_embed2.weight.copy_(obj_embed_vecs, non_blocking=True)
# position embedding
self.pos_embed = nn.Sequential(*[
nn.Linear(9, 32),
nn.BatchNorm1d(32, momentum=0.001),
nn.Linear(32, 128),
nn.ReLU(inplace=True),
])
# object & relation context
self.obj_dim = in_channels
self.dropout_rate = self.cfg.dropout_rate
self.hidden_dim = self.cfg.hidden_dim
self.nl_obj = self.cfg.context_object_layer
self.nl_edge = self.cfg.context_edge_layer
assert self.nl_obj > 0 and self.nl_edge > 0
# TODO
# AlternatingHighwayLSTM is invalid for pytorch 1.0
self.obj_ctx_rnn = torch.nn.LSTM(
input_size=self.obj_dim + self.embed_dim + 128,
hidden_size=self.hidden_dim,
num_layers=self.nl_obj,
dropout=self.dropout_rate if self.nl_obj > 1 else 0,
bidirectional=True)
self.decoder_rnn = DecoderRNN(self.cfg,
self.obj_classes,
embed_dim=self.embed_dim,
inputs_dim=self.hidden_dim +
self.obj_dim + self.embed_dim + 128,
hidden_dim=self.hidden_dim,
rnn_drop=self.dropout_rate)
self.edge_ctx_rnn = torch.nn.LSTM(
input_size=self.embed_dim + self.hidden_dim + self.obj_dim,
hidden_size=self.hidden_dim,
num_layers=self.nl_edge,
dropout=self.dropout_rate if self.nl_edge > 1 else 0,
bidirectional=True)
# map bidirectional hidden states of dimension self.hidden_dim*2 to self.hidden_dim
self.lin_obj_h = nn.Linear(self.hidden_dim * 2, self.hidden_dim)
self.lin_edge_h = nn.Linear(self.hidden_dim * 2, self.hidden_dim)
# untreated average features
self.average_ratio = 0.0005
self.effect_analysis = self.cfg.causal_effect_analysis
if self.effect_analysis:
self.register_buffer(
'untreated_dcd_feat',
torch.zeros(self.hidden_dim + self.obj_dim + self.embed_dim +
128))
self.register_buffer(
'untreated_obj_feat',
torch.zeros(self.obj_dim + self.embed_dim + 128))
self.register_buffer('untreated_edg_feat',
torch.zeros(self.embed_dim + self.obj_dim))
def init_weights(self):
self.decoder_rnn.init_weights()
for m in self.pos_embed:
if isinstance(m, nn.Linear):
kaiming_init(m, distribution='uniform', a=1)
kaiming_init(self.lin_obj_h, distribution='uniform', a=1)
kaiming_init(self.lin_edge_h, distribution='uniform', a=1)
def sort_rois(self, det_result):
c_x = center_x(det_result)
# leftright order
scores = c_x / (c_x.max() + 1)
return sort_by_score(det_result, scores)
def obj_ctx(self,
obj_feats,
det_result,
obj_labels=None,
ctx_average=False):
"""Object context and object classification.
:param obj_feats: [num_obj, img_dim + object embedding0 dim]
:param obj_labels: [num_obj] the GT labels of the image
:param box_priors: [num_obj, 4] boxes. We'll use this for NMS
:param boxes_per_cls
:return: obj_dists: [num_obj, #classes] new probability distribution.
obj_preds: argmax of that distribution.
obj_final_ctx: [num_obj, #feats] For later!
"""
# Sort by the confidence of the maximum detection.
perm, inv_perm, ls_transposed = self.sort_rois(det_result)
# Pass object features, sorted by score, into the encoder LSTM
obj_inp_rep = obj_feats[perm].contiguous()
input_packed = PackedSequence(obj_inp_rep, ls_transposed)
encoder_rep = self.obj_ctx_rnn(input_packed)[0][0]
encoder_rep = self.lin_obj_h(encoder_rep) # map to hidden_dim
# untreated decoder input
batch_size = encoder_rep.shape[0]
if (not self.training) and self.effect_analysis and ctx_average:
decoder_inp = self.untreated_dcd_feat.view(1, -1).expand(
batch_size, -1)
else:
decoder_inp = torch.cat((obj_inp_rep, encoder_rep), 1)
if self.training and self.effect_analysis:
self.untreated_dcd_feat = self.moving_average(
self.untreated_dcd_feat, decoder_inp)
# Decode in order
if self.mode != 'predcls':
decoder_inp = PackedSequence(decoder_inp, ls_transposed)
obj_dists, obj_preds = self.decoder_rnn(
decoder_inp, # obj_dists[perm],
labels=obj_labels[perm] if obj_labels is not None else None)
obj_preds = obj_preds[inv_perm]
obj_dists = obj_dists[inv_perm]
else:
assert obj_labels is not None
obj_preds = obj_labels
obj_dists = to_onehot(obj_preds, self.num_obj_classes)
encoder_rep = encoder_rep[inv_perm]
return obj_dists, obj_preds, encoder_rep, perm, inv_perm, ls_transposed
def edge_ctx(self, inp_feats, perm, inv_perm, ls_transposed):
"""Object context and object classification.
:param obj_feats: [num_obj, img_dim + object embedding0 dim]
:return: edge_ctx: [num_obj, #feats] For later!
"""
edge_input_packed = PackedSequence(inp_feats[perm], ls_transposed)
edge_reps = self.edge_ctx_rnn(edge_input_packed)[0][0]
edge_reps = self.lin_edge_h(edge_reps) # map to hidden_dim
edge_ctx = edge_reps[inv_perm]
return edge_ctx
def moving_average(self, holder, input):
assert len(input.shape) == 2
with torch.no_grad():
holder = holder * (1 - self.average_ratio
) + self.average_ratio * input.mean(0).view(-1)
return holder
def forward(self, x, det_result, all_average=False, ctx_average=False):
# labels will be used in DecoderRNN during training (for nms)
if self.training or self.use_gt_box: # predcls or sgcls or training, just put obj_labels here
obj_labels = torch.cat(det_result.labels)
else:
obj_labels = None
if self.use_gt_label: # predcls
obj_embed = self.obj_embed1(obj_labels.long())
else:
obj_dists = torch.cat(det_result.dists, dim=0).detach()
obj_embed = obj_dists @ self.obj_embed1.weight
pos_embed = self.pos_embed(encode_box_info(det_result)) # N x 128
batch_size = x.shape[0]
if all_average and self.effect_analysis and (
not self.training): # TDE: only in test mode
obj_pre_rep = self.untreated_obj_feat.view(1, -1).expand(
batch_size, -1)
else:
obj_pre_rep = torch.cat((x, obj_embed, pos_embed),
-1) # N x (1024 + 200 + 128)
# object level contextual feature
obj_dists, obj_preds, obj_ctx, perm, inv_perm, ls_transposed = self.obj_ctx(
obj_pre_rep, det_result, obj_labels, ctx_average=ctx_average)
# edge level contextual feature
obj_embed2 = self.obj_embed2(obj_preds.long())
if (all_average or ctx_average) and self.effect_analysis and (
not self.training): # TDE: Testing
obj_rel_rep = torch.cat((self.untreated_edg_feat.view(
1, -1).expand(batch_size, -1), obj_ctx),
dim=-1)
else:
obj_rel_rep = torch.cat((obj_embed2, x, obj_ctx), -1)
edge_ctx = self.edge_ctx(obj_rel_rep,
perm=perm,
inv_perm=inv_perm,
ls_transposed=ls_transposed)
# memorize average feature
if self.training and self.effect_analysis:
self.untreated_obj_feat = self.moving_average(
self.untreated_obj_feat, obj_pre_rep)
self.untreated_edg_feat = self.moving_average(
self.untreated_edg_feat, torch.cat((obj_embed2, x), -1))
return obj_dists, obj_preds, edge_ctx, None
| 19,918 | 41.112051 | 102 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/__init__.py | from .dmp import DirectionAwareMessagePassing
from .imp import IMPContext
from .motif import FrequencyBias, LSTMContext
from .pointnet import PointNetFeat
from .relation_ranker import get_weak_key_rel_labels
from .relation_util import PostProcessor, Result
from .sampling import RelationSampler
from .vctree import VCTreeLSTMContext
| 333 | 36.111111 | 52 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/treelstm_util.py | # ---------------------------------------------------------------
# treelstm_util.py
# Set-up time: 2020/6/4 下午4:42
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from .motif_util import block_orthogonal, get_dropout_mask
class MultiLayer_BTreeLSTM(nn.Module):
"""Multilayer Bidirectional Tree LSTM Each layer contains one forward
lstm(leaves to root) and one backward lstm(root to leaves)"""
def __init__(self, in_dim, out_dim, num_layer, dropout=0.0):
super(MultiLayer_BTreeLSTM, self).__init__()
self.num_layer = num_layer
layers = []
layers.append(BidirectionalTreeLSTM(in_dim, out_dim, dropout))
for i in range(num_layer - 1):
layers.append(BidirectionalTreeLSTM(out_dim, out_dim, dropout))
self.multi_layer_lstm = nn.ModuleList(layers)
def forward(self, tree, features, num_obj):
for i in range(self.num_layer):
features = self.multi_layer_lstm[i](tree, features, num_obj)
return features
class BidirectionalTreeLSTM(nn.Module):
"""Bidirectional Tree LSTM Contains one forward lstm(leaves to root) and
one backward lstm(root to leaves) Dropout mask will be generated one time
for all trees in the forest, to make sure the consistency."""
def __init__(self, in_dim, out_dim, dropout=0.0):
super(BidirectionalTreeLSTM, self).__init__()
self.dropout = dropout
self.out_dim = out_dim
self.treeLSTM_foreward = OneDirectionalTreeLSTM(
in_dim, int(out_dim / 2), 'foreward', dropout)
self.treeLSTM_backward = OneDirectionalTreeLSTM(
in_dim, int(out_dim / 2), 'backward', dropout)
def forward(self, tree, features, num_obj):
foreward_output = self.treeLSTM_foreward(tree, features, num_obj)
backward_output = self.treeLSTM_backward(tree, features, num_obj)
final_output = torch.cat((foreward_output, backward_output), 1)
return final_output
class OneDirectionalTreeLSTM(nn.Module):
"""
One Way Tree LSTM
direction = forward | backward
"""
def __init__(self, in_dim, out_dim, direction, dropout=0.0):
super(OneDirectionalTreeLSTM, self).__init__()
self.dropout = dropout
self.out_dim = out_dim
if direction == 'foreward':
self.treeLSTM = BiTreeLSTM_Foreward(in_dim, out_dim)
elif direction == 'backward':
self.treeLSTM = BiTreeLSTM_Backward(in_dim, out_dim)
else:
print('Error Tree LSTM Direction')
def forward(self, tree, features, num_obj):
# calc dropout mask, same for all
if self.dropout > 0.0:
dropout_mask = get_dropout_mask(self.dropout, (1, self.out_dim),
features.device)
else:
dropout_mask = None
# tree lstm input
h_order = torch.tensor([0] * num_obj,
device=features.device,
dtype=torch.int64) # used to resume order
lstm_io = TreeLSTM_IO(None, h_order, 0, None, None, dropout_mask)
# run tree lstm forward (leaves to root)
self.treeLSTM(tree, features, lstm_io)
# resume order to the same as input
output = lstm_io.hidden[lstm_io.order.long()]
return output
class BiTreeLSTM_Foreward(nn.Module):
"""From leaves to root."""
def __init__(self,
feat_dim,
h_dim,
is_pass_embed=False,
embed_layer=None,
embed_out_layer=None):
super(BiTreeLSTM_Foreward, self).__init__()
self.feat_dim = feat_dim
self.h_dim = h_dim
self.is_pass_embed = is_pass_embed
self.embed_layer = embed_layer
self.embed_out_layer = embed_out_layer
self.px = nn.Linear(self.feat_dim, self.h_dim)
self.ioffux = nn.Linear(self.feat_dim, 6 * self.h_dim)
self.ioffuh_left = nn.Linear(self.h_dim, 6 * self.h_dim)
self.ioffuh_right = nn.Linear(self.h_dim, 6 * self.h_dim)
# initialization
with torch.no_grad():
block_orthogonal(self.px.weight, [self.h_dim, self.feat_dim])
block_orthogonal(self.ioffux.weight, [self.h_dim, self.feat_dim])
block_orthogonal(self.ioffuh_left.weight, [self.h_dim, self.h_dim])
block_orthogonal(self.ioffuh_right.weight,
[self.h_dim, self.h_dim])
self.px.bias.fill_(0.0)
self.ioffux.bias.fill_(0.0)
self.ioffuh_left.bias.fill_(0.0)
self.ioffuh_right.bias.fill_(0.0)
# Initialize forget gate biases to 1.0 as per An Empirical
# Exploration of Recurrent Network Architectures, (Jozefowicz, 2015).
self.ioffuh_left.bias[2 * self.h_dim:4 * self.h_dim].fill_(0.5)
self.ioffuh_right.bias[2 * self.h_dim:4 * self.h_dim].fill_(0.5)
def node_forward(self, feat_inp, left_c, right_c, left_h, right_h,
dropout_mask):
projected_x = self.px(feat_inp)
ioffu = self.ioffux(feat_inp) + self.ioffuh_left(
left_h) + self.ioffuh_right(right_h)
i, o, f_l, f_r, u, r = torch.split(ioffu, ioffu.size(1) // 6, dim=1)
i, o, f_l, f_r, u, r = torch.sigmoid(i), torch.sigmoid(
o), torch.sigmoid(f_l), torch.sigmoid(f_r), torch.tanh(
u), torch.sigmoid(r)
c = torch.mul(i, u) + torch.mul(f_l, left_c) + torch.mul(f_r, right_c)
h = torch.mul(o, torch.tanh(c))
h_final = torch.mul(r, h) + torch.mul((1 - r), projected_x)
# Only do dropout if the dropout prob is > 0.0 and we are in training mode.
if dropout_mask is not None and self.training:
h_final = torch.mul(h_final, dropout_mask)
return c, h_final
def forward(self, tree, features, treelstm_io):
"""
tree: The root for a tree
features: [num_obj, featuresize]
treelstm_io.hidden: init as None, cat until it covers all objects as [num_obj, hidden_size]
treelstm_io.order: init as 0 for all [num_obj], update for recovering original order
"""
# recursively search child
if tree.left_child is not None:
self.forward(tree.left_child, features, treelstm_io)
if tree.right_child is not None:
self.forward(tree.right_child, features, treelstm_io)
# get c,h from left child
if tree.left_child is None:
left_c = torch.tensor([0.0] * self.h_dim,
device=features.device).float().view(1, -1)
left_h = torch.tensor([0.0] * self.h_dim,
device=features.device).float().view(1, -1)
# Only being used in decoder network
if self.is_pass_embed:
left_embed = self.embed_layer.weight[0]
else:
left_c = tree.left_child.state_c
left_h = tree.left_child.state_h
# Only being used in decoder network
if self.is_pass_embed:
left_embed = tree.left_child.embeded_label
# get c,h from right child
if tree.right_child is None:
right_c = torch.tensor([0.0] * self.h_dim,
device=features.device).float().view(1, -1)
right_h = torch.tensor([0.0] * self.h_dim,
device=features.device).float().view(1, -1)
# Only being used in decoder network
if self.is_pass_embed:
right_embed = self.embed_layer.weight[0]
else:
right_c = tree.right_child.state_c
right_h = tree.right_child.state_h
# Only being used in decoder network
if self.is_pass_embed:
right_embed = tree.right_child.embeded_label
# Only being used in decoder network
if self.is_pass_embed:
next_feature = torch.cat((features[tree.index].view(
1, -1), left_embed.view(1, -1), right_embed.view(1, -1)), 1)
else:
next_feature = features[tree.index].view(1, -1)
c, h = self.node_forward(next_feature, left_c, right_c, left_h,
right_h, treelstm_io.dropout_mask)
tree.state_c = c
tree.state_h = h
# record label prediction
# Only being used in decoder network
if self.is_pass_embed:
pass_embed_postprocess(h, self.embed_out_layer, self.embed_layer,
tree, treelstm_io, self.training)
# record hidden state
if treelstm_io.hidden is None:
treelstm_io.hidden = h.view(1, -1)
else:
treelstm_io.hidden = torch.cat((treelstm_io.hidden, h.view(1, -1)),
0)
treelstm_io.order[tree.index] = treelstm_io.order_count
treelstm_io.order_count += 1
return
class BiTreeLSTM_Backward(nn.Module):
"""from root to leaves."""
def __init__(self,
feat_dim,
h_dim,
is_pass_embed=False,
embed_layer=None,
embed_out_layer=None):
super(BiTreeLSTM_Backward, self).__init__()
self.feat_dim = feat_dim
self.h_dim = h_dim
self.is_pass_embed = is_pass_embed
self.embed_layer = embed_layer
self.embed_out_layer = embed_out_layer
self.px = nn.Linear(self.feat_dim, self.h_dim)
self.iofux = nn.Linear(self.feat_dim, 5 * self.h_dim)
self.iofuh = nn.Linear(self.h_dim, 5 * self.h_dim)
# initialization
with torch.no_grad():
block_orthogonal(self.px.weight, [self.h_dim, self.feat_dim])
block_orthogonal(self.iofux.weight, [self.h_dim, self.feat_dim])
block_orthogonal(self.iofuh.weight, [self.h_dim, self.h_dim])
self.px.bias.fill_(0.0)
self.iofux.bias.fill_(0.0)
self.iofuh.bias.fill_(0.0)
# Initialize forget gate biases to 1.0 as per An Empirical
# Exploration of Recurrent Network Architectures, (Jozefowicz, 2015).
self.iofuh.bias[2 * self.h_dim:3 * self.h_dim].fill_(1.0)
def node_backward(self, feat_inp, root_c, root_h, dropout_mask):
projected_x = self.px(feat_inp)
iofu = self.iofux(feat_inp) + self.iofuh(root_h)
i, o, f, u, r = torch.split(iofu, iofu.size(1) // 5, dim=1)
i, o, f, u, r = torch.sigmoid(i), torch.sigmoid(o), torch.sigmoid(
f), torch.tanh(u), torch.sigmoid(r)
c = torch.mul(i, u) + torch.mul(f, root_c)
h = torch.mul(o, torch.tanh(c))
h_final = torch.mul(r, h) + torch.mul((1 - r), projected_x)
# Only do dropout if the dropout prob is > 0.0 and we are in training mode.
if dropout_mask is not None and self.training:
h_final = torch.mul(h_final, dropout_mask)
return c, h_final
def forward(self, tree, features, treelstm_io):
"""
tree: The root for a tree
features: [num_obj, featuresize]
treelstm_io.hidden: init as None, cat until it covers all objects as [num_obj, hidden_size]
treelstm_io.order: init as 0 for all [num_obj], update for recovering original order
"""
if tree.parent is None:
root_c = torch.tensor([0.0] * self.h_dim,
device=features.device).float().view(1, -1)
root_h = torch.tensor([0.0] * self.h_dim,
device=features.device).float().view(1, -1)
if self.is_pass_embed:
root_embed = self.embed_layer.weight[0]
else:
root_c = tree.parent.state_c_backward
root_h = tree.parent.state_h_backward
if self.is_pass_embed:
root_embed = tree.parent.embeded_label
if self.is_pass_embed:
next_features = torch.cat(
(features[tree.index].view(1, -1), root_embed.view(1, -1)), 1)
else:
next_features = features[tree.index].view(1, -1)
c, h = self.node_backward(next_features, root_c, root_h,
treelstm_io.dropout_mask)
tree.state_c_backward = c
tree.state_h_backward = h
# record label prediction
# Only being used in decoder network
if self.is_pass_embed:
pass_embed_postprocess(h, self.embed_out_layer, self.embed_layer,
tree, treelstm_io, self.training)
# record hidden state
if treelstm_io.hidden is None:
treelstm_io.hidden = h.view(1, -1)
else:
treelstm_io.hidden = torch.cat((treelstm_io.hidden, h.view(1, -1)),
0)
treelstm_io.order[tree.index] = treelstm_io.order_count
treelstm_io.order_count += 1
# recursively update from root to leaves
if tree.left_child is not None:
self.forward(tree.left_child, features, treelstm_io)
if tree.right_child is not None:
self.forward(tree.right_child, features, treelstm_io)
return
def pass_embed_postprocess(h, embed_out_layer, embed_layer, tree, treelstm_io,
is_training):
"""Calculate districution and predict/sample labels Add to lstm_IO."""
pred_dist = embed_out_layer(h)
label_to_embed = F.softmax(pred_dist.view(-1), 0)[1:].max(0)[1] + 1
if is_training:
sampled_label = F.softmax(pred_dist.view(-1),
0)[1:].multinomial(1).detach() + 1
tree.embeded_label = embed_layer(sampled_label + 1)
else:
tree.embeded_label = embed_layer(label_to_embed + 1)
if treelstm_io.dists is None:
treelstm_io.dists = pred_dist.view(1, -1)
else:
treelstm_io.dists = torch.cat(
(treelstm_io.dists, pred_dist.view(1, -1)), 0)
if treelstm_io.commitments is None:
treelstm_io.commitments = label_to_embed.view(-1)
else:
treelstm_io.commitments = torch.cat(
(treelstm_io.commitments, label_to_embed.view(-1)), 0)
class TreeLSTM_IO(object):
def __init__(self, hidden_tensor, order_tensor, order_count, dists_tensor,
commitments_tensor, dropout_mask):
self.hidden = hidden_tensor # Float tensor [num_obj, self.out_dim]
self.order = order_tensor # Long tensor [num_obj]
self.order_count = order_count # int
self.dists = dists_tensor # FLoat tensor [num_obj, len(self.classes)]
self.commitments = commitments_tensor
self.dropout_mask = dropout_mask
| 15,090 | 41.271709 | 99 | py |
OpenPSG | OpenPSG-main/openpsg/models/relation_heads/approaches/vctree.py | # ---------------------------------------------------------------
# vctree.py
# Set-up time: 2020/6/4 上午10:22
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import torch
from mmcv.cnn import xavier_init
from torch import nn
from torch.nn import functional as F
from .motif_util import (encode_box_info, get_dropout_mask, obj_edge_vectors,
to_onehot)
from .treelstm_util import (BiTreeLSTM_Backward, BiTreeLSTM_Foreward,
MultiLayer_BTreeLSTM, TreeLSTM_IO)
from .vctree_util import (arbForest_to_biForest, generate_forest,
get_overlap_info)
class DecoderTreeLSTM(nn.Module):
def __init__(self,
cfg,
classes,
embed_dim,
inputs_dim,
hidden_dim,
direction='backward',
dropout=0.2):
super(DecoderTreeLSTM, self).__init__()
"""
Initializes the RNN
:param embed_dim: Dimension of the embeddings
:param encoder_hidden_dim: Hidden dim of the encoder, for attention
:param hidden_dim: Hidden dim of the decoder
:param vocab_size: Number of words in the vocab
:param bos_token: To use during decoding (non teacher forcing mode))
:param bos: beginning of sentence token
:param unk: unknown token (not used)
direction = forward | backward
"""
self.cfg = cfg
self.classes = classes
self.hidden_size = hidden_dim
self.inputs_dim = inputs_dim
self.nms_thresh = 0.5
self.dropout = dropout
# generate embed layer
embed_vecs = obj_edge_vectors(['start'] + self.classes,
wv_dir=self.cfg.glove_dir,
wv_dim=embed_dim)
self.obj_embed = nn.Embedding(len(self.classes) + 1, embed_dim)
with torch.no_grad():
self.obj_embed.weight.copy_(embed_vecs, non_blocking=True)
# generate out layer
self.out = nn.Linear(self.hidden_size, len(self.classes))
if direction == 'backward':
self.input_size = inputs_dim + embed_dim
self.decoderLSTM = BiTreeLSTM_Backward(self.input_size,
self.hidden_size,
is_pass_embed=True,
embed_layer=self.obj_embed,
embed_out_layer=self.out)
elif direction == 'foreward':
self.input_size = inputs_dim + embed_dim * 2
self.decoderLSTM = BiTreeLSTM_Foreward(self.input_size,
self.hidden_size,
is_pass_embed=True,
embed_layer=self.obj_embed,
embed_out_layer=self.out)
else:
print('Error Decoder LSTM Direction')
def forward(self, tree, features, num_obj):
# generate dropout
if self.dropout > 0.0:
dropout_mask = get_dropout_mask(self.dropout,
(1, self.hidden_size),
features.device)
else:
dropout_mask = None
# generate tree lstm input/output class
h_order = torch.tensor([0] * num_obj, device=features.device)
lstm_io = TreeLSTM_IO(None, h_order, 0, None, None, dropout_mask)
self.decoderLSTM(tree, features, lstm_io)
out_h = lstm_io.hidden[lstm_io.order.long()]
out_dists = lstm_io.dists[lstm_io.order.long()]
out_commitments = lstm_io.commitments[lstm_io.order.long()]
return out_dists, out_commitments
class VCTreeLSTMContext(nn.Module):
"""Modified from neural-motifs to encode contexts for each objects."""
def __init__(self, config, obj_classes, rel_classes):
super(VCTreeLSTMContext, self).__init__()
self.cfg = config
self.obj_classes = obj_classes
self.rel_classes = rel_classes
self.num_obj_classes = len(obj_classes)
in_channels = self.cfg.roi_dim
self.use_gt_box = self.cfg.use_gt_box
self.use_gt_label = self.cfg.use_gt_label
# mode
if self.cfg.use_gt_box:
if self.cfg.use_gt_label:
self.mode = 'predcls'
else:
self.mode = 'sgcls'
else:
self.mode = 'sgdet'
# word embedding
self.embed_dim = self.cfg.embed_dim
obj_embed_vecs = obj_edge_vectors(self.obj_classes,
wv_dir=self.cfg.glove_dir,
wv_dim=self.embed_dim)
self.obj_embed1 = nn.Embedding(self.num_obj_classes, self.embed_dim)
self.obj_embed2 = nn.Embedding(self.num_obj_classes, self.embed_dim)
with torch.no_grad():
self.obj_embed1.weight.copy_(obj_embed_vecs, non_blocking=True)
self.obj_embed2.weight.copy_(obj_embed_vecs, non_blocking=True)
# position embedding
self.pos_embed = nn.Sequential(*[
nn.Linear(9, 32),
nn.BatchNorm1d(32, momentum=0.001),
nn.Linear(32, 128),
nn.ReLU(inplace=True),
])
# overlap embedding
self.overlap_embed = nn.Sequential(*[
nn.Linear(6, 128),
nn.BatchNorm1d(128, momentum=0.001),
nn.ReLU(inplace=True),
])
# box embed
self.box_embed = nn.Sequential(*[
nn.Linear(9, 128),
nn.BatchNorm1d(128, momentum=0.001),
nn.ReLU(inplace=True),
])
# object & relation context
self.obj_dim = in_channels
self.dropout_rate = self.cfg.dropout_rate
self.hidden_dim = self.cfg.hidden_dim
self.nl_obj = self.cfg.context_object_layer
self.nl_edge = self.cfg.context_edge_layer
assert self.nl_obj > 0 and self.nl_edge > 0
self.obj_reduce = nn.Linear(self.obj_dim, 128)
self.emb_reduce = nn.Linear(self.embed_dim, 128)
self.score_pre = nn.Linear(128 * 4, self.hidden_dim)
self.score_sub = nn.Linear(self.hidden_dim, self.hidden_dim)
self.score_obj = nn.Linear(self.hidden_dim, self.hidden_dim)
self.vision_prior = nn.Linear(self.hidden_dim * 3, 1)
self.obj_ctx_rnn = MultiLayer_BTreeLSTM(
in_dim=self.obj_dim + self.embed_dim + 128,
out_dim=self.hidden_dim,
num_layer=self.nl_obj,
dropout=self.dropout_rate if self.nl_obj > 1 else 0)
self.decoder_rnn = DecoderTreeLSTM(self.cfg,
self.obj_classes,
embed_dim=self.embed_dim,
inputs_dim=self.hidden_dim +
self.obj_dim + self.embed_dim + 128,
hidden_dim=self.hidden_dim,
dropout=self.dropout_rate)
self.edge_ctx_rnn = MultiLayer_BTreeLSTM(
in_dim=self.embed_dim + self.hidden_dim + self.obj_dim,
out_dim=self.hidden_dim,
num_layer=self.nl_edge,
dropout=self.dropout_rate if self.nl_edge > 1 else 0,
)
# untreated average features
self.average_ratio = 0.0005
self.effect_analysis = self.cfg.causal_effect_analysis
if self.effect_analysis:
self.register_buffer(
'untreated_dcd_feat',
torch.zeros(self.hidden_dim + self.obj_dim + self.embed_dim +
128))
self.register_buffer(
'untreated_obj_feat',
torch.zeros(self.obj_dim + self.embed_dim + 128))
self.register_buffer('untreated_edg_feat',
torch.zeros(self.embed_dim + self.obj_dim))
def init_weights(self):
for module in [self.pos_embed, self.overlap_embed, self.box_embed]:
for m in module:
if isinstance(m, nn.Linear):
xavier_init(m)
xavier_init(self.obj_reduce)
xavier_init(self.emb_reduce)
xavier_init(self.score_pre)
xavier_init(self.score_sub)
xavier_init(self.score_obj)
xavier_init(self.vision_prior)
def obj_ctx(self,
num_objs,
obj_feats,
obj_labels=None,
vc_forest=None,
ctx_average=False):
"""Object context and object classification.
:param num_objs:
:param obj_feats: [num_obj, img_dim + object embedding0 dim]
:param det_result:
:param vc_forest:
:param: ctx_average:
:param obj_labels: [num_obj] the GT labels of the image
:return: obj_dists: [num_obj, #classes] new probability distribution.
obj_preds: argmax of that distribution.
obj_final_ctx: [num_obj, #feats] For later!
"""
obj_feats = obj_feats.split(num_objs, dim=0)
obj_labels = obj_labels.split(
num_objs, dim=0) if obj_labels is not None else None
obj_ctxs = []
obj_preds = []
obj_dists = []
for i, (feat, tree) in enumerate(zip(obj_feats, vc_forest)):
encod_rep = self.obj_ctx_rnn(tree, feat, num_objs[i])
obj_ctxs.append(encod_rep)
# Decode in order
if self.mode != 'predcls':
if (not self.training
) and self.effect_analysis and ctx_average:
decoder_inp = self.untreated_dcd_feat.view(1, -1).expand(
encod_rep.shape[0], -1)
else:
decoder_inp = torch.cat((feat, encod_rep), 1)
if self.training and self.effect_analysis:
self.untreated_dcd_feat = self.moving_average(
self.untreated_dcd_feat, decoder_inp)
obj_dist, obj_pred = self.decoder_rnn(tree, decoder_inp,
num_objs[i])
else:
assert obj_labels is not None
obj_pred = obj_labels[i]
obj_dist = to_onehot(obj_pred, self.num_obj_classes)
obj_preds.append(obj_pred)
obj_dists.append(obj_dist)
obj_ctxs = torch.cat(obj_ctxs, dim=0)
obj_preds = torch.cat(obj_preds, dim=0)
obj_dists = torch.cat(obj_dists, dim=0)
return obj_ctxs, obj_preds, obj_dists
def edge_ctx(self, num_objs, obj_feats, forest):
"""Object context and object classification.
:param obj_feats: [num_obj, img_dim + object embedding0 dim]
:return: edge_ctx: [num_obj, #feats] For later!
"""
inp_feats = obj_feats.split(num_objs, dim=0)
edge_ctxs = []
for feat, tree, num_obj in zip(inp_feats, forest, num_objs):
edge_rep = self.edge_ctx_rnn(tree, feat, num_obj)
edge_ctxs.append(edge_rep)
edge_ctxs = torch.cat(edge_ctxs, dim=0)
return edge_ctxs
def forward(self, x, det_result, all_average=False, ctx_average=False):
num_objs = [len(b) for b in det_result.bboxes]
# labels will be used in DecoderRNN during training (for nms)
if self.training or self.cfg.use_gt_box:
obj_labels = torch.cat(det_result.labels)
else:
obj_labels = None
if self.cfg.use_gt_label:
obj_embed = self.obj_embed1(obj_labels.long())
obj_dists = F.softmax(to_onehot(obj_labels, self.num_obj_classes))
else:
obj_dists = torch.cat(det_result.dists, dim=0).detach()
obj_embed = obj_dists @ self.obj_embed1.weight
box_info = encode_box_info(det_result)
pos_embed = self.pos_embed(box_info) # N x 128
batch_size = x.shape[0]
if all_average and self.effect_analysis and (not self.training):
obj_pre_rep = self.untreated_obj_feat.view(1, -1).expand(
batch_size, -1)
else:
obj_pre_rep = torch.cat((x, obj_embed, pos_embed), -1)
# construct VCTree
box_inp = self.box_embed(box_info)
pair_inp = self.overlap_embed(get_overlap_info(det_result))
# 128 + 128 + 128 + 128 = 512
bi_inp = torch.cat(
(self.obj_reduce(x.detach()), self.emb_reduce(
obj_embed.detach()), box_inp, pair_inp), -1)
bi_preds, vc_scores = self.vctree_score_net(num_objs, bi_inp,
obj_dists) # list of N x N
forest = generate_forest(vc_scores, det_result)
vc_forest = arbForest_to_biForest(forest)
# object level contextual feature
obj_ctxs, obj_preds, obj_dists = self.obj_ctx(num_objs,
obj_pre_rep,
obj_labels,
vc_forest,
ctx_average=ctx_average)
# edge level contextual feature
obj_embed2 = self.obj_embed2(obj_preds.long())
if (all_average or
ctx_average) and self.effect_analysis and (not self.training):
obj_rel_rep = torch.cat((self.untreated_edg_feat.view(
1, -1).expand(batch_size, -1), obj_ctxs),
dim=-1)
else:
obj_rel_rep = torch.cat((obj_embed2, x, obj_ctxs), -1)
edge_ctx = self.edge_ctx(num_objs, obj_rel_rep, vc_forest)
# memorize average feature
if self.training and self.effect_analysis:
self.untreated_obj_feat = self.moving_average(
self.untreated_obj_feat, obj_pre_rep)
self.untreated_edg_feat = self.moving_average(
self.untreated_edg_feat, torch.cat((obj_embed2, x), -1))
return obj_dists, obj_preds, edge_ctx, bi_preds
def moving_average(self, holder, input):
assert len(input.shape) == 2
with torch.no_grad():
holder = holder * (1 - self.average_ratio
) + self.average_ratio * input.mean(0).view(-1)
return holder
def vctree_score_net(self, num_objs, roi_feat, roi_dist):
roi_dist = roi_dist.detach()
# separate into each image
roi_feat = F.relu(self.score_pre(roi_feat)) # 512
sub_feat = F.relu(self.score_sub(roi_feat)) # 512
obj_feat = F.relu(self.score_obj(roi_feat)) # 512
sub_feats = sub_feat.split(num_objs, dim=0)
obj_feats = obj_feat.split(num_objs, dim=0)
roi_dists = roi_dist.split(num_objs, dim=0)
bi_preds = []
vc_scores = []
for sub, obj, dist in zip(sub_feats, obj_feats, roi_dists):
# only used to calculate loss
num_obj = sub.shape[0]
num_dim = sub.shape[-1]
sub = sub.view(1, num_obj, num_dim).expand(num_obj, num_obj,
num_dim) # N, N, 512
obj = obj.view(num_obj, 1, num_dim).expand(num_obj, num_obj,
num_dim) # N, N, 512
sub_dist = dist.view(1, num_obj,
-1).expand(num_obj, num_obj,
-1).unsqueeze(2) # N, N, 1, 151
obj_dist = dist.view(num_obj, 1,
-1).expand(num_obj, num_obj,
-1).unsqueeze(3) # N, N, 151, 1
joint_dist = (sub_dist * obj_dist).view(num_obj, num_obj,
-1) # N, N, (151, 151)
vis_prior = self.vision_prior(
torch.cat(
[sub * obj, sub, obj],
#co_prior.unsqueeze(-1)],
dim=-1).view(num_obj * num_obj,
-1)).view(num_obj, num_obj)
joint_pred = F.sigmoid(vis_prior) #* co_prior
bi_preds.append(vis_prior)
vc_scores.append(joint_pred)
return bi_preds, vc_scores
| 16,755 | 41.206549 | 79 | py |
OpenPSG | OpenPSG-main/openpsg/models/frameworks/dual_transformer.py | import torch
from mmcv.cnn import xavier_init
from mmcv.cnn.bricks.transformer import build_transformer_layer_sequence
from mmcv.runner.base_module import BaseModule
from mmdet.models.utils.builder import TRANSFORMER
@TRANSFORMER.register_module()
class DualTransformer(BaseModule):
"""Modify the DETR transformer with two decoders.
Args:
encoder (`mmcv.ConfigDict` | Dict): Config of
TransformerEncoder. Defaults to None.
decoder1 ((`mmcv.ConfigDict` | Dict)): Config of
TransformerDecoder. Defaults to None
decoder2 ((`mmcv.ConfigDict` | Dict)): Config of
TransformerDecoder. Defaults to None
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Defaults to None.
"""
def __init__(self,
encoder=None,
decoder1=None,
decoder2=None,
init_cfg=None):
super(DualTransformer, self).__init__(init_cfg=init_cfg)
self.encoder = build_transformer_layer_sequence(encoder)
self.decoder1 = build_transformer_layer_sequence(decoder1)
self.decoder2 = build_transformer_layer_sequence(decoder2)
self.embed_dims = self.encoder.embed_dims
def init_weights(self):
# follow the official DETR to init parameters
for m in self.modules():
if hasattr(m, 'weight') and m.weight.dim() > 1:
xavier_init(m, distribution='uniform')
self._is_init = True
def forward(self, x, mask, query1_embed, query2_embed, pos_embed):
"""Forward function for `Transformer`.
Args:
x (Tensor): Input query with shape [bs, c, h, w] where
c = embed_dims.
mask (Tensor): The key_padding_mask used for encoder and decoders,
with shape [bs, h, w].
query1_embed (Tensor): The first query embedding for decoder, with
shape [num_query, c].
query2_embed (Tensor): The second query embedding for decoder, with
shape [num_query, c].
pos_embed (Tensor): The positional encoding for encoder and
decoders, with the same shape as `x`.
Returns:
tuple[Tensor]: results of decoder containing the following tensor.
- out_dec: Output from decoder. If return_intermediate_dec \
is True output has shape [num_dec_layers, bs,
num_query, embed_dims], else has shape [1, bs, \
num_query, embed_dims].
- memory: Output results from encoder, with shape \
[bs, embed_dims, h, w].
"""
bs, c, h, w = x.shape
# use `view` instead of `flatten` for dynamically exporting to ONNX
x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c]
pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1)
query1_embed = query1_embed.unsqueeze(1).repeat(
1, bs, 1) # [num_query, dim] -> [num_query, bs, dim]
query2_embed = query2_embed.unsqueeze(1).repeat(
1, bs, 1) # [num_query, dim] -> [num_query, bs, dim]
mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w]
memory = self.encoder(query=x,
key=None,
value=None,
query_pos=pos_embed,
query_key_padding_mask=mask)
target1 = torch.zeros_like(query1_embed)
target2 = torch.zeros_like(query2_embed)
# out_dec: [num_layers, num_query, bs, dim]
# first decoder
out_dec1 = self.decoder1(query=target1,
key=memory,
value=memory,
key_pos=pos_embed,
query_pos=query1_embed,
key_padding_mask=mask)
out_dec1 = out_dec1.transpose(1, 2)
# second decoder
out_dec2 = self.decoder2(query=target2,
key=memory,
value=memory,
key_pos=pos_embed,
query_pos=query2_embed,
key_padding_mask=mask)
out_dec2 = out_dec2.transpose(1, 2)
memory = memory.permute(1, 2, 0).reshape(bs, c, h, w)
return out_dec1, out_dec2, memory
| 4,484 | 45.237113 | 79 | py |
OpenPSG | OpenPSG-main/openpsg/models/frameworks/psgtr.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
import numpy as np
import torch
import torch.nn.functional as F
from detectron2.utils.visualizer import VisImage, Visualizer
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from mmdet.models import DETECTORS, SingleStageDetector
from openpsg.models.relation_heads.approaches import Result
from openpsg.utils.utils import adjust_text_color, draw_text, get_colormap
def triplet2Result(triplets, use_mask, eval_pan_rels=True):
if use_mask:
bboxes, labels, rel_pairs, masks, pan_rel_pairs, pan_seg, complete_r_labels, complete_r_dists, \
r_labels, r_dists, pan_masks, rels, pan_labels \
= triplets
if isinstance(bboxes, torch.Tensor):
labels = labels.detach().cpu().numpy()
bboxes = bboxes.detach().cpu().numpy()
rel_pairs = rel_pairs.detach().cpu().numpy()
complete_r_labels = complete_r_labels.detach().cpu().numpy()
complete_r_dists = complete_r_dists.detach().cpu().numpy()
r_labels = r_labels.detach().cpu().numpy()
r_dists = r_dists.detach().cpu().numpy()
if isinstance(pan_seg, torch.Tensor):
pan_seg = pan_seg.detach().cpu().numpy()
pan_rel_pairs = pan_rel_pairs.detach().cpu().numpy()
masks = masks.detach().cpu().numpy()
pan_masks = pan_masks.detach().cpu().numpy()
rels = rels.detach().cpu().numpy()
pan_labels = pan_labels.detach().cpu().numpy()
if eval_pan_rels:
return Result(refine_bboxes=bboxes,
labels=pan_labels+1,
formatted_masks=dict(pan_results=pan_seg),
rel_pair_idxes=pan_rel_pairs,# elif not pan: rel_pairs,
rel_dists=r_dists,
rel_labels=r_labels,
pan_results=pan_seg,
masks=pan_masks,
rels=rels)
else:
return Result(refine_bboxes=bboxes,
labels=labels,
formatted_masks=dict(pan_results=pan_seg),
rel_pair_idxes=rel_pairs,
rel_dists=complete_r_dists,
rel_labels=complete_r_labels,
pan_results=pan_seg,
masks=masks)
else:
bboxes, labels, rel_pairs, r_labels, r_dists = triplets
labels = labels.detach().cpu().numpy()
bboxes = bboxes.detach().cpu().numpy()
rel_pairs = rel_pairs.detach().cpu().numpy()
r_labels = r_labels.detach().cpu().numpy()
r_dists = r_dists.detach().cpu().numpy()
return Result(
refine_bboxes=bboxes,
labels=labels,
formatted_masks=dict(pan_results=None),
rel_pair_idxes=rel_pairs,
rel_dists=r_dists,
rel_labels=r_labels,
pan_results=None,
)
@DETECTORS.register_module()
class PSGTr(SingleStageDetector):
def __init__(self,
backbone,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(PSGTr, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.CLASSES = self.bbox_head.object_classes
self.PREDICATES = self.bbox_head.predicate_classes
self.num_classes = self.bbox_head.num_classes
# over-write `forward_dummy` because:
# the forward of bbox_head requires img_metas
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
warnings.warn('Warning! MultiheadAttention in DETR does not '
'support flops computation! Do not use the '
'results in your papers!')
batch_size, _, height, width = img.shape
dummy_img_metas = [
dict(batch_input_shape=(height, width),
img_shape=(height, width, 3)) for _ in range(batch_size)
]
x = self.extract_feat(img)
outs = self.bbox_head(x, dummy_img_metas)
return outs
def forward_train(self,
img,
img_metas,
gt_rels,
gt_bboxes,
gt_labels,
gt_masks,
gt_bboxes_ignore=None):
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
if self.bbox_head.use_mask:
BS, C, H, W = img.shape
new_gt_masks = []
for each in gt_masks:
mask = torch.tensor(each.to_ndarray(), device=x[0].device)
_, h, w = mask.shape
padding = (0, W - w, 0, H - h)
mask = F.interpolate(F.pad(mask, padding).unsqueeze(1),
size=(H // 2, W // 2),
mode='nearest').squeeze(1)
# mask = F.pad(mask, padding)
new_gt_masks.append(mask)
gt_masks = new_gt_masks
losses = self.bbox_head.forward_train(x, img_metas, gt_rels, gt_bboxes,
gt_labels, gt_masks,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
feat = self.extract_feat(img)
results_list = self.bbox_head.simple_test(feat,
img_metas,
rescale=rescale)
sg_results = [
triplet2Result(triplets, self.bbox_head.use_mask)
for triplets in results_list
]
# print(time.time() - s)
return sg_results
| 6,062 | 39.152318 | 104 | py |
OpenPSG | OpenPSG-main/openpsg/models/frameworks/detr4seg.py | # Copyright (c) OpenMMLab. All rights reserved.
import imghdr
import random
import time
import warnings
from turtle import shape
import cv2
import matplotlib.pyplot as plt
import mmcv
import numpy as np
import torch
import torch.nn.functional as F
from detectron2.utils.visualizer import VisImage, Visualizer
from mmdet.core import bbox2result
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from mmdet.models import DETECTORS, SingleStageDetector
from openpsg.models.relation_heads.approaches import Result
from openpsg.utils.utils import adjust_text_color, draw_text, get_colormap
def seg2Result(segs):
bboxes, labels, pan_seg = segs
if isinstance(bboxes, torch.Tensor):
pan_seg = pan_seg.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
bboxes = bboxes.detach().cpu().numpy()
# return dict(pan_results=pan_seg)
return Result(
refine_bboxes=bboxes,
labels=labels,
formatted_masks=dict(pan_results=pan_seg),
pan_results=pan_seg,
)
@DETECTORS.register_module()
class DETR4seg(SingleStageDetector):
def __init__(self,
backbone,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(DETR4seg, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.obc = self.bbox_head.CLASSES
self.num_classes = len(self.obc)
print(self.num_classes)
# over-write `forward_dummy` because:
# the forward of bbox_head requires img_metas
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
warnings.warn('Warning! MultiheadAttention in DETR does not '
'support flops computation! Do not use the '
'results in your papers!')
batch_size, _, height, width = img.shape
dummy_img_metas = [
dict(batch_input_shape=(height, width),
img_shape=(height, width, 3)) for _ in range(batch_size)
]
x = self.extract_feat(img)
outs = self.bbox_head(x, dummy_img_metas)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_masks,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
BS, C, H, W = img.shape
new_gt_masks = []
for each in gt_masks:
mask = torch.tensor(each.resize(
(H // 2, W // 2), interpolation='bilinear').to_ndarray(),
device=x[0].device)
_, h, w = mask.shape
# padding = (
# 0,W-w,
# 0,H-h
# )
# mask = F.pad(mask,padding)
new_gt_masks.append(mask)
gt_masks = new_gt_masks
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_masks,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation.
Args:
img (torch.Tensor): Images with shape (N, C, H, W).
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
feat = self.extract_feat(img)
results_list = self.bbox_head.simple_test(feat,
img_metas,
rescale=rescale)
bbox_results = [seg2Result(segs) for segs in results_list]
return bbox_results
###### TODO: nms in test
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
imgs (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
assert hasattr(self.bbox_head, 'aug_test'), \
f'{self.bbox_head.__class__.__name__}' \
' does not support test-time augmentation'
feats = self.extract_feats(imgs)
results_list = self.bbox_head.aug_test(feats,
img_metas,
rescale=rescale)
sg_results = [seg2Result(triplets) for triplets in results_list]
return sg_results
# TODO
# over-write `onnx_export` because:
# (1) the forward of bbox_head requires img_metas
# (2) the different behavior (e.g. construction of `masks`) between
# torch and ONNX model, during the forward of bbox_head
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
# forward of this head requires img_metas
outs = self.bbox_head.forward_onnx(x, img_metas)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
def show_result(
self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None,
):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None
thickness (int): Thickness of lines. Default: 2
font_size (int): Font size of texts. Default: 13
win_name (str): The window name. Default: ''
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
# Load image
img = mmcv.imread(img)
img = img.copy() # (H, W, 3)
img_h, img_w = img.shape[:-1]
if True:
# Draw masks
pan_results = result.pan_results
ids = np.unique(pan_results)[::-1]
legal_indices = ids != self.num_classes # for VOID label
ids = ids[legal_indices]
# # Get predicted labels
# labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
# labels = [self.obc[l] for l in labels]
# (N_m, H, W)
segms = pan_results[None] == ids[:, None, None]
# Resize predicted masks
segms = [
mmcv.image.imresize(m.astype(float), (img_w, img_h))
for m in segms
]
# Choose colors for each instance in coco
colormap_coco = get_colormap(len(segms))
colormap_coco = (np.array(colormap_coco) / 255).tolist()
viz = Visualizer(img)
viz.overlay_instances(
# labels=labels,
masks=segms,
assigned_colors=colormap_coco,
)
viz_img = viz.get_output().get_image()
else:
# Draw bboxes
bboxes = result.refine_bboxes[:, :4]
# Choose colors for each instance in coco
colormap_coco = get_colormap(len(bboxes))
colormap_coco = (np.array(colormap_coco) / 255).tolist()
# 1-index
labels = [self.CLASSES[l - 1] for l in result.labels]
viz = Visualizer(img)
viz.overlay_instances(
labels=labels,
boxes=bboxes,
assigned_colors=colormap_coco,
)
viz_img = viz.get_output().get_image()
viz_final = viz_img
if out_file is not None:
mmcv.imwrite(viz_final, out_file)
if not (show or out_file):
return viz_final
| 11,322 | 35.525806 | 85 | py |
OpenPSG | OpenPSG-main/openpsg/models/frameworks/sg_panoptic_fpn.py | import mmcv
import numpy as np
import torch
import torch.nn.functional as F
from detectron2.utils.visualizer import VisImage, Visualizer
from mmdet.core import BitmapMasks, bbox2roi, build_assigner, multiclass_nms
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
from mmdet.models import DETECTORS, PanopticFPN
from mmdet.models.builder import build_head
from openpsg.models.relation_heads.approaches import Result
from openpsg.utils.utils import adjust_text_color, draw_text, get_colormap
@DETECTORS.register_module()
class SceneGraphPanopticFPN(PanopticFPN):
def __init__(
self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
# for panoptic segmentation
semantic_head=None,
panoptic_fusion_head=None,
# for scene graph
relation_head=None,
):
super(SceneGraphPanopticFPN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
semantic_head=semantic_head,
panoptic_fusion_head=panoptic_fusion_head,
)
# Init relation head
if relation_head is not None:
self.relation_head = build_head(relation_head)
# Cache the detection results to speed up the sgdet training.
self.rpn_results = dict()
self.det_results = dict()
@property
def with_relation(self):
return hasattr(self,
'relation_head') and self.relation_head is not None
def simple_test_sg_bboxes(self,
x,
img_metas,
proposals=None,
rescale=False):
"""Test without Augmentation; convert panoptic segments to bounding
boxes."""
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
bboxes, scores = self.roi_head.simple_test_bboxes(x,
img_metas,
proposal_list,
None,
rescale=rescale)
pan_cfg = self.test_cfg.panoptic
# class-wise predictions
det_bboxes = []
det_labels = []
for bbox, score in zip(bboxes, scores):
det_bbox, det_label = multiclass_nms(bbox, score,
pan_cfg.score_thr,
pan_cfg.nms,
pan_cfg.max_per_img)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
mask_results = self.simple_test_mask(x,
img_metas,
det_bboxes,
det_labels,
rescale=rescale)
masks = mask_results['masks'] # List[(1, H, W)]
# (B, N_stuff_classes, H, W)
# Resizes all to same size; uses img_metas[0] only
# seg_preds_alt = self.semantic_head.simple_test(x, img_metas, rescale)
# seg_preds = self.semantic_head.forward(x)['seg_preds']
seg_preds = [
self.semantic_head.simple_test(
[f[i][None, ...] for f in x],
[img_metas[i]],
rescale,
)[0] for i in range(len(img_metas))
]
results = []
for i in range(len(det_bboxes)):
# Shape mismatch
# i: 1
# img: [16, 3, 800, 1216]
# masks[i]: [1, 800, 1120]
# seg_preds[i]: [54, 800, 1216], problem here??
pan_results = self.panoptic_fusion_head.simple_test(
det_bboxes[i], det_labels[i], masks[i], seg_preds[i])
pan_results = pan_results.int().detach().cpu().numpy() # (H, W)
# Convert panoptic segments to bboxes
ids = np.unique(pan_results)[::-1]
legal_indices = ids != self.num_classes # for VOID label
ids = ids[legal_indices] # exclude VOID label
# Extract class labels, (N), 1-index?
labels = np.array([id % INSTANCE_OFFSET for id in ids],
dtype=np.int64) + 1
# labels = np.array([id % INSTANCE_OFFSET for id in ids],
# dtype=np.int64)
# Binary masks for each object, (N, H, W)
segms = pan_results[None] == ids[:, None, None]
# Convert to bboxes
height, width = segms.shape[1:]
masks_to_bboxes = BitmapMasks(segms, height, width).get_bboxes()
# Convert to torch tensor
# (N_b, 4)
masks_to_bboxes = torch.tensor(masks_to_bboxes).to(det_bboxes[0])
labels = torch.tensor(labels).to(det_labels[0])
result = dict(pan_results=pan_results,
masks=segms,
bboxes=masks_to_bboxes,
labels=labels)
results.append(result)
return results
def forward_train(
self,
img,
img_metas,
# all_gt_bboxes,
# all_gt_labels,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
gt_rels=None,
gt_keyrels=None,
gt_relmaps=None,
gt_scenes=None,
rescale=False,
**kwargs,
):
# img: (B, C, H, W)
x = self.extract_feat(img)
################################################################
# Specifically for Relation Prediction / SGG. #
# The detector part must perform as if at test mode. #
################################################################
if self.with_relation:
# # assert gt_rels is not None and gt_relmaps is not None
# if self.relation_head.with_visual_mask and (not self.with_mask):
# raise ValueError("The basic detector did not provide masks.")
# NOTE: Change gt to 1-index here:
gt_labels = [label + 1 for label in gt_labels]
"""
NOTE: (for VG) When the gt masks is None, but the head needs mask,
we use the gt_box and gt_label (if needed) to generate the fake
mask.
"""
(
bboxes,
labels,
target_labels,
dists, # Can this be `None`?
pan_masks,
pan_results,
points,
) = self.detector_simple_test(
x,
img_metas,
# all_gt_bboxes,
# all_gt_labels,
gt_bboxes,
gt_labels,
gt_masks,
proposals,
use_gt_box=self.relation_head.use_gt_box,
use_gt_label=self.relation_head.use_gt_label,
rescale=rescale,
)
# Filter out empty predictions
idxes_to_filter = [i for i, b in enumerate(bboxes) if len(b) == 0]
param_need_filter = [
bboxes, labels, dists, target_labels, gt_bboxes, gt_labels,
gt_rels, img_metas, gt_scenes, gt_keyrels, points, pan_results,
gt_masks, gt_relmaps, pan_masks
]
for idx, param in enumerate(param_need_filter):
if param_need_filter[idx]:
param_need_filter[idx] = [
x for i, x in enumerate(param)
if i not in idxes_to_filter
]
(bboxes, labels, dists, target_labels, gt_bboxes, gt_labels,
gt_rels, img_metas, gt_scenes, gt_keyrels, points, pan_results,
gt_masks, gt_relmaps, pan_masks) = param_need_filter
# Filter done
if idxes_to_filter and len(gt_bboxes) == 16:
print('sg_panoptic_fpn: not filtered!')
filtered_x = []
for idx in range(len(x)):
filtered_x.append(
torch.stack([
e for i, e in enumerate(x[idx])
if i not in idxes_to_filter
]))
x = filtered_x
gt_result = Result(
# bboxes=all_gt_bboxes,
# labels=all_gt_labels,
bboxes=gt_bboxes,
labels=gt_labels,
rels=gt_rels,
relmaps=gt_relmaps,
masks=gt_masks,
rel_pair_idxes=[rel[:, :2].clone() for rel in gt_rels]
if gt_rels is not None else None,
rel_labels=[rel[:, -1].clone() for rel in gt_rels]
if gt_rels is not None else None,
key_rels=gt_keyrels if gt_keyrels is not None else None,
img_shape=[meta['img_shape'] for meta in img_metas],
scenes=gt_scenes,
)
det_result = Result(
bboxes=bboxes,
labels=labels,
dists=dists,
masks=pan_masks,
pan_results=pan_results,
points=points,
target_labels=target_labels,
target_scenes=gt_scenes,
img_shape=[meta['img_shape'] for meta in img_metas],
)
det_result = self.relation_head(x, img_metas, det_result,
gt_result)
# Loss performed here
return self.relation_head.loss(det_result)
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
# NOTE the batched image size information may be useful, e.g.
# in DETR, this is needed for the construction of masks, which is
# then used for the transformer_head.
for img, img_meta in zip(imgs, img_metas):
batch_size = len(img_meta)
for img_id in range(batch_size):
img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:])
key_first = kwargs.pop('key_first', False)
# if relation_mode:
assert num_augs == 1
return self.relation_simple_test(imgs[0],
img_metas[0],
key_first=key_first,
**kwargs)
# if num_augs == 1:
# # proposals (List[List[Tensor]]): the outer list indicates
# # test-time augs (multiscale, flip, etc.) and the inner list
# # indicates images in a batch.
# # The Tensor should have a shape Px4, where P is the number of
# # proposals.
# if "proposals" in kwargs:
# kwargs["proposals"] = kwargs["proposals"][0]
# return self.simple_test(imgs[0], img_metas[0], **kwargs)
# else:
# assert imgs[0].size(0) == 1, (
# "aug test does not support "
# "inference with batch size "
# f"{imgs[0].size(0)}"
# )
# # TODO: support test augmentation for predefined proposals
# assert "proposals" not in kwargs
# return self.aug_test(imgs, img_metas, **kwargs)
def detector_simple_test(
self,
x,
img_meta,
gt_bboxes,
gt_labels,
gt_masks,
proposals=None,
use_gt_box=False,
use_gt_label=False,
rescale=False,
is_testing=False,
):
"""Test without augmentation. Used in SGG.
Return:
det_bboxes: (list[Tensor]): The boxes may have 5 columns (sgdet)
or 4 columns (predcls/sgcls).
det_labels: (list[Tensor]): 1D tensor, det_labels (sgdet) or
gt_labels (predcls/sgcls).
det_dists: (list[Tensor]): 2D tensor, N x Nc, the bg column is 0.
detected dists (sgdet/sgcls), or None (predcls).
masks: (list[list[Tensor]]): Mask is associated with box. Thus,
in predcls/sgcls mode, it will firstly return the gt_masks.
But some datasets do not contain gt_masks. We try to use the
gt box to obtain the masks.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
pan_seg_masks = None
if use_gt_box and use_gt_label: # predcls
# if is_testing:
# det_results = self.simple_test_sg_bboxes(x, img_meta,
# rescale=True)
# pan_results = [r['pan_results'] for r in det_results]
target_labels = gt_labels
pan_seg_masks = gt_masks
return gt_bboxes, gt_labels, target_labels, None, gt_masks, None, None
# NOTE: Sgcls should not be performed
elif use_gt_box and not use_gt_label: # sgcls
"""The self implementation return 1-based det_labels."""
target_labels = gt_labels
_, det_labels, det_dists = self.detector_simple_test_det_bbox(
x, img_meta, proposals=gt_bboxes, rescale=rescale)
return gt_bboxes, det_labels, target_labels, det_dists, gt_masks, None, None
elif not use_gt_box and not use_gt_label: # sgdet
"""It returns 1-based det_labels."""
# get target labels for the det bboxes: make use of the
# bbox head assigner
if not is_testing: # excluding the testing phase
det_results = self.simple_test_sg_bboxes(x,
img_meta,
rescale=rescale)
det_bboxes = [r['bboxes'] for r in det_results]
det_labels = [r['labels'] for r in det_results] # 1-index
pan_results = None
target_labels = []
# MaxIOUAssigner
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
for i in range(len(img_meta)):
assign_result = bbox_assigner.assign(
# gt_labels: 1-index
det_bboxes[i],
gt_bboxes[i],
gt_labels=gt_labels[i] - 1,
)
target_labels.append(assign_result.labels + 1)
# assign_result.labels[assign_result.labels == -1] = 0
# target_labels.append(assign_result.labels)
else:
det_results = self.simple_test_sg_bboxes(x,
img_meta,
rescale=rescale)
det_bboxes = [r['bboxes'] for r in det_results]
det_labels = [r['labels'] for r in det_results] # 1-index
pan_seg_masks = [r['masks'] for r in det_results]
# to reshape pan_seg_masks
mask_size = (img_meta[0]['ori_shape'][0],
img_meta[0]['ori_shape'][1])
pan_seg_masks = F.interpolate(
torch.Tensor(pan_seg_masks[0]).unsqueeze(1),
size=mask_size).squeeze(1).bool()
pan_seg_masks = [pan_seg_masks.numpy()]
# TODO: why number of bboxes/masks will differ between 2 tests?
det_results_for_pan = self.simple_test_sg_bboxes(x,
img_meta,
rescale=True)
pan_results = [r['pan_results'] for r in det_results_for_pan]
# pan_seg_masks = [r['masks'] for r in det_results_for_pan]
target_labels = None
# det_dists: Tuple[(N_b, N_c + 1)]
# Temp set as one-hot labels
det_dists = [
F.one_hot(det_label,
num_classes=self.num_classes + 1).to(det_bboxes[0])
for det_label in det_labels
]
# det_dists = [
# F.one_hot(det_label - 1, num_classes=self.num_classes)
# .to(det_bboxes[0])
# for det_label in det_labels
# ]
det_bboxes = [
torch.cat([b, b.new_ones(len(b), 1)], dim=-1)
for b in det_bboxes
]
# det_bboxes: List[B x (N_b, 5)]
# det_labels: List[B x (N_b)]
# target_labels: List[B x (N_b)]
# det_dists: List[B x (N_b, N_c + 1)]
return det_bboxes, det_labels, target_labels, \
det_dists, pan_seg_masks, pan_results, None
def detector_simple_test_det_bbox(self,
x,
img_meta,
proposals=None,
rescale=False):
"""Run the detector in test mode, given gt_bboxes, return the labels, dists
Return:
det_labels: 1 based.
"""
num_levels = len(x)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_meta)
else:
proposal_list = proposals
"""Support multi-image per batch"""
det_bboxes, det_labels, score_dists = [], [], []
for img_id in range(len(img_meta)):
x_i = tuple([x[i][img_id][None] for i in range(num_levels)])
# img_meta_i = [img_meta[img_id]]
proposal_list_i = [proposal_list[img_id]]
det_labels_i, score_dists_i = self.simple_test_given_bboxes(
x_i, proposal_list_i)
det_bboxes.append(proposal_list[img_id])
det_labels.append(det_labels_i)
score_dists.append(score_dists_i)
return det_bboxes, det_labels, score_dists
def detector_simple_test_det_bbox_mask(self, x, img_meta, rescale=False):
"""Run the detector in test mode, return the detected boxes, labels,
dists, and masks."""
"""RPN phase"""
# num_levels = len(x)
# proposal_list =
# self.rpn_head.simple_test_rpn(x, img_meta, self.test_cfg.rpn)
proposal_list = self.rpn_head.simple_test_rpn(x, img_meta)
# List[Tensor(1000, 5)]
"""Support multi-image per batch"""
# det_bboxes, det_labels, score_dists = [], [], []
# # img_meta: List[metadata]
# for img_id in range(len(img_meta)):
# x_i = tuple([x[i][img_id][None] for i in range(num_levels)])
# img_meta_i = [img_meta[img_id]]
# proposal_list_i = [proposal_list[img_id]]
# (
# det_bboxes_i,
# det_labels_i,
# score_dists_i,
# ) = self.roi_head.simple_test_bboxes(
# x_i,
# img_meta_i,
# proposal_list_i,
# self.test_cfg.rcnn,
# rescale=rescale,
# # return_dist=True,
# )
# det_bboxes.append(det_bboxes_i)
# det_labels.append(det_labels_i + 1)
# score_dists.append(score_dists_i)
det_bboxes, det_labels = self.roi_head.simple_test_bboxes(
x,
img_meta,
proposal_list,
self.test_cfg.rcnn,
rescale=rescale,
# return_dist=True,
)
det_labels, score_dists = zip(*det_labels)
# det_bboxes: (N_b, 5)
# det_labels: (N_b)
# score_dists: (N_b, N_c + 1)
return det_bboxes, det_labels, score_dists, None, None
def simple_test_given_bboxes(self, x, proposals):
"""For SGG~SGCLS mode: Given gt boxes, extract its predicted scores and
score dists.
Without any post-process.
"""
rois = bbox2roi(proposals)
roi_feats = self.roi_head.bbox_roi_extractor(
x[:len(self.roi_head.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.roi_head.shared_head(roi_feats)
cls_score, _ = self.roi_head.bbox_head(roi_feats)
cls_score[:, 1:] = F.softmax(cls_score[:, 1:], dim=1)
_, labels = torch.max(cls_score[:, 1:], dim=1)
labels += 1
cls_score[:, 0] = 0
return labels, cls_score
def relation_simple_test(
self,
img,
img_meta,
# all_gt_bboxes=None,
# all_gt_labels=None,
gt_bboxes=None,
gt_labels=None,
gt_rels=None,
gt_masks=None,
gt_scenes=None,
rescale=False,
ignore_classes=None,
key_first=False,
):
"""
:param img:
:param img_meta:
:param gt_bboxes: Usually, under the forward (train/val/test),
it should not be None. But when for demo (inference), it should
be None. The same for gt_labels.
:param gt_labels:
:param gt_rels: You should make sure that the gt_rels should not
be passed into the forward process in any mode. It is only used to
visualize the results.
:param gt_masks:
:param rescale:
:param ignore_classes: For practice, you may want to ignore some
object classes
:return:
"""
# Extract the outer list: Since the aug test is
# temporarily not supported.
# if all_gt_bboxes is not None:
# all_gt_bboxes = all_gt_bboxes[0]
# if all_gt_labels is not None:
# all_gt_labels = all_gt_labels[0]
if gt_bboxes is not None:
gt_bboxes = gt_bboxes[0]
if gt_labels is not None:
gt_labels = gt_labels[0]
if gt_masks is not None:
gt_masks = gt_masks[0]
x = self.extract_feat(img)
"""
NOTE: (for VG) When the gt masks is None, but the head needs mask,
we use the gt_box and gt_label (if needed) to generate the fake mask.
"""
# NOTE: Change to 1-index here:
gt_labels = [label + 1 for label in gt_labels]
# Rescale should be forbidden here since the bboxes and masks will
# be used in relation module.
bboxes, labels, target_labels, dists, pan_masks, pan_results, points \
= self.detector_simple_test(
x,
img_meta,
gt_bboxes,
gt_labels,
gt_masks,
use_gt_box=self.relation_head.use_gt_box,
use_gt_label=self.relation_head.use_gt_label,
rescale=False,
is_testing=True,
)
# saliency_maps = (
# self.saliency_detector_test(img, img_meta) if \
# self.with_saliency else None
# )
det_result = Result(
bboxes=bboxes,
labels=labels,
dists=dists,
masks=pan_masks,
pan_results=pan_results,
points=points,
target_labels=target_labels,
# saliency_maps=saliency_maps,
img_shape=[meta['img_shape'] for meta in img_meta],
)
# If empty prediction
if len(bboxes[0]) == 0:
return det_result
det_result = self.relation_head(x,
img_meta,
det_result,
is_testing=True,
ignore_classes=ignore_classes)
"""
Transform the data type, and rescale the bboxes and masks if needed
(for visual, do not rescale, for evaluation, rescale).
"""
scale_factor = img_meta[0]['scale_factor']
det_result = self.relation_head.get_result(det_result,
scale_factor,
rescale=rescale,
key_first=key_first)
if pan_masks is not None:
det_result.masks = np.array(pan_masks[0])
return det_result
def show_result(
self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None,
):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None
thickness (int): Thickness of lines. Default: 2
font_size (int): Font size of texts. Default: 13
win_name (str): The window name. Default: ''
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
self.CLASSES = [
'airplane',
'apple',
'backpack',
'banana',
'baseball bat',
'baseball glove',
'bear',
'bed',
'bench',
'bicycle',
'bird',
'boat',
'book',
'bottle',
'bowl',
'broccoli',
'bus',
'cake',
'car',
'carrot',
'cat',
'cell phone',
'chair',
'clock',
'couch',
'cow',
'cup',
'dining table',
'dog',
'donut',
'elephant',
'fire hydrant',
'fork',
'frisbee',
'giraffe',
'hair drier',
'handbag',
'horse',
'hot dog',
'keyboard',
'kite',
'knife',
'laptop',
'microwave',
'motorcycle',
'mouse',
'orange',
'oven',
'parking meter',
'person',
'pizza',
'potted plant',
'refrigerator',
'remote',
'sandwich',
'scissors',
'sheep',
'sink',
'skateboard',
'skis',
'snowboard',
'spoon',
'sports ball',
'stop sign',
'suitcase',
'surfboard',
'teddy bear',
'tennis racket',
'tie',
'toaster',
'toilet',
'toothbrush',
'traffic light',
'train',
'truck',
'tv',
'umbrella',
'vase',
'wine glass',
'zebra',
'banner',
'blanket',
'bridge',
'building',
'cabinet',
'cardboard',
'ceiling',
'counter',
'curtain',
'dirt',
'door',
'fence',
'floor',
'floor-wood',
'flower',
'food',
'fruit',
'grass',
'gravel',
'house',
'light',
'mirror',
'mountain',
'net',
'paper',
'pavement',
'pillow',
'platform',
'playingfield',
'railroad',
'river',
'road',
'rock',
'roof',
'rug',
'sand',
'sea',
'shelf',
'sky',
'snow',
'stairs',
'table',
'tent',
'towel',
'tree',
'wall-brick',
'wall',
'wall-stone',
'wall-tile',
'wall-wood',
'water',
'window-blind',
'window',
]
# Load image
img = mmcv.imread(img)
img = img.copy() # (H, W, 3)
img_h, img_w = img.shape[:-1]
if True:
# Draw masks
pan_results = result.pan_results
ids = np.unique(pan_results)[::-1]
legal_indices = ids != self.num_classes # for VOID label
ids = ids[legal_indices]
# Get predicted labels
labels = np.array([id % INSTANCE_OFFSET for id in ids],
dtype=np.int64)
labels = [self.CLASSES[label] for label in labels]
# (N_m, H, W)
segms = pan_results[None] == ids[:, None, None]
# Resize predicted masks
segms = [
mmcv.image.imresize(m.astype(float), (img_w, img_h))
for m in segms
]
# Choose colors for each instance in coco
colormap_coco = get_colormap(len(segms))
colormap_coco = (np.array(colormap_coco) / 255).tolist()
viz = Visualizer(img)
viz.overlay_instances(
labels=labels,
masks=segms,
assigned_colors=colormap_coco,
)
viz_img = viz.get_output().get_image()
else:
# Draw bboxes
bboxes = result.refine_bboxes[:, :4]
# Choose colors for each instance in coco
colormap_coco = get_colormap(len(bboxes))
colormap_coco = (np.array(colormap_coco) / 255).tolist()
# 1-index
labels = [self.CLASSES[label - 1] for label in result.labels]
viz = Visualizer(img)
viz.overlay_instances(
labels=labels,
boxes=bboxes,
assigned_colors=colormap_coco,
)
viz_img = viz.get_output().get_image()
# Draw relations
# Filter out relations
n_rel_topk = 20
# Exclude background class
rel_dists = result.rel_dists[:, 1:]
# rel_dists = result.rel_dists
rel_scores = rel_dists.max(1)
# rel_scores = result.triplet_scores
# Extract relations with top scores
rel_topk_idx = np.argpartition(rel_scores, -n_rel_topk)[-n_rel_topk:]
rel_labels_topk = rel_dists[rel_topk_idx].argmax(1)
rel_pair_idxes_topk = result.rel_pair_idxes[rel_topk_idx]
relations = np.concatenate(
[rel_pair_idxes_topk, rel_labels_topk[..., None]], axis=1)
n_rels = len(relations)
top_padding = 20
bottom_padding = 20
left_padding = 20
text_size = 10
text_padding = 5
text_height = text_size + 2 * text_padding
row_padding = 10
height = (top_padding + bottom_padding + n_rels *
(text_height + row_padding) - row_padding)
width = viz_img.shape[1]
curr_x = left_padding
curr_y = top_padding
# Adjust colormaps
colormap_coco = [adjust_text_color(c, viz) for c in colormap_coco]
viz_graph = VisImage(np.full((height, width, 3), 255))
for i, r in enumerate(relations):
s_idx, o_idx, rel_id = r
s_label = labels[s_idx]
o_label = labels[o_idx]
# Becomes 0-index
rel_label = self.PREDICATES[rel_id]
# Draw subject text
text_width = draw_text(
viz_img=viz_graph,
text=s_label,
x=curr_x,
y=curr_y,
color=colormap_coco[s_idx],
size=text_size,
padding=text_padding,
# font=font,
)
curr_x += text_width
# Draw relation text
text_width = draw_text(
viz_img=viz_graph,
text=rel_label,
x=curr_x,
y=curr_y,
size=text_size,
padding=text_padding,
box_color='gainsboro',
# font=font,
)
curr_x += text_width
# Draw object text
text_width = draw_text(
viz_img=viz_graph,
text=o_label,
x=curr_x,
y=curr_y,
color=colormap_coco[o_idx],
size=text_size,
padding=text_padding,
# font=font,
)
curr_x += text_width
curr_x = left_padding
curr_y += text_height + row_padding
viz_graph = viz_graph.get_image()
viz_final = np.vstack([viz_img, viz_graph])
if out_file is not None:
mmcv.imwrite(viz_final, out_file)
if not (show or out_file):
return viz_final
| 35,038 | 34.003996 | 88 | py |
OpenPSG | OpenPSG-main/openpsg/models/frameworks/sg_rcnn.py | import mmcv
import numpy as np
import torch
import torch.nn.functional as F
from detectron2.utils.visualizer import VisImage, Visualizer
from mmdet.core import bbox2roi, build_assigner
from mmdet.models import DETECTORS, TwoStageDetector
from mmdet.models.builder import build_head
from openpsg.models.relation_heads.approaches import Result
from openpsg.utils.utils import adjust_text_color, draw_text, get_colormap
@DETECTORS.register_module()
class SceneGraphRCNN(TwoStageDetector):
def __init__(
self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None,
relation_head=None,
):
super(SceneGraphRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
)
# Init relation head
if relation_head is not None:
self.relation_head = build_head(relation_head)
# Cache the detection results to speed up the sgdet training.
self.rpn_results = dict()
self.det_results = dict()
@property
def with_relation(self):
return hasattr(self,
'relation_head') and self.relation_head is not None
def forward_train(
self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
gt_rels=None,
gt_keyrels=None,
gt_relmaps=None,
gt_scenes=None,
rescale=False,
**kwargs,
):
x = self.extract_feat(img)
################################################################
# Specifically for Relation Prediction / SGG. #
# The detector part must perform as if at test mode. #
################################################################
if self.with_relation:
# # assert gt_rels is not None and gt_relmaps is not None
# if self.relation_head.with_visual_mask and (not self.with_mask):
# raise ValueError("The basic detector did not provide masks.")
# Change to 1-index here:
gt_labels = [l + 1 for l in gt_labels]
"""
NOTE: (for VG) When the gt masks is None, but the head needs mask,
we use the gt_box and gt_label (if needed) to generate the fake mask.
"""
(
bboxes,
labels,
target_labels,
dists,
masks,
points,
) = self.detector_simple_test(
x,
img_metas,
gt_bboxes,
gt_labels,
gt_masks,
proposals,
use_gt_box=self.relation_head.use_gt_box,
use_gt_label=self.relation_head.use_gt_label,
rescale=rescale,
)
# saliency_maps = (
# self.saliency_detector_test(img, img_meta)
# if self.with_saliency
# else None
# )
gt_result = Result(
bboxes=gt_bboxes,
labels=gt_labels,
rels=gt_rels,
relmaps=gt_relmaps,
masks=gt_masks,
rel_pair_idxes=[rel[:, :2].clone() for rel in gt_rels]
if gt_rels is not None else None,
rel_labels=[rel[:, -1].clone() for rel in gt_rels]
if gt_rels is not None else None,
key_rels=gt_keyrels if gt_keyrels is not None else None,
img_shape=[meta['img_shape'] for meta in img_metas],
scenes=gt_scenes,
)
det_result = Result(
bboxes=bboxes,
labels=labels,
dists=dists,
masks=masks,
points=points,
target_labels=target_labels,
target_scenes=gt_scenes,
img_shape=[meta['img_shape'] for meta in img_metas],
)
det_result = self.relation_head(x, img_metas, det_result,
gt_result)
return self.relation_head.loss(det_result)
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
# NOTE the batched image size information may be useful, e.g.
# in DETR, this is needed for the construction of masks, which is
# then used for the transformer_head.
for img, img_meta in zip(imgs, img_metas):
batch_size = len(img_meta)
for img_id in range(batch_size):
img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:])
key_first = kwargs.pop('key_first', False)
# if relation_mode:
assert num_augs == 1
return self.relation_simple_test(imgs[0],
img_metas[0],
key_first=key_first,
**kwargs)
# if num_augs == 1:
# # proposals (List[List[Tensor]]): the outer list indicates
# # test-time augs (multiscale, flip, etc.) and the inner list
# # indicates images in a batch.
# # The Tensor should have a shape Px4, where P is the number of
# # proposals.
# if "proposals" in kwargs:
# kwargs["proposals"] = kwargs["proposals"][0]
# return self.simple_test(imgs[0], img_metas[0], **kwargs)
# else:
# assert imgs[0].size(0) == 1, (
# "aug test does not support "
# "inference with batch size "
# f"{imgs[0].size(0)}"
# )
# # TODO: support test augmentation for predefined proposals
# assert "proposals" not in kwargs
# return self.aug_test(imgs, img_metas, **kwargs)
def detector_simple_test(
self,
x,
img_meta,
gt_bboxes,
gt_labels,
gt_masks,
proposals=None,
use_gt_box=False,
use_gt_label=False,
rescale=False,
is_testing=False,
):
"""Test without augmentation. Used in SGG.
Return:
det_bboxes: (list[Tensor]): The boxes may have 5 columns (sgdet) or 4 columns (predcls/sgcls).
det_labels: (list[Tensor]): 1D tensor, det_labels (sgdet) or gt_labels (predcls/sgcls).
det_dists: (list[Tensor]): 2D tensor, N x Nc, the bg column is 0. detected dists (sgdet/sgcls), or
None (predcls).
masks: (list[list[Tensor]]): Mask is associated with box. Thus, in predcls/sgcls mode, it will
firstly return the gt_masks. But some datasets do not contain gt_masks. We try to use the gt box
to obtain the masks.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
if use_gt_box and use_gt_label: # predcls
target_labels = gt_labels
return gt_bboxes, gt_labels, target_labels, None, None, None
elif use_gt_box and not use_gt_label: # sgcls
"""The self implementation return 1-based det_labels."""
target_labels = gt_labels
_, det_labels, det_dists = self.detector_simple_test_det_bbox(
x, img_meta, proposals=gt_bboxes, rescale=rescale)
return gt_bboxes, det_labels, target_labels, det_dists, None, None
elif not use_gt_box and not use_gt_label:
"""It returns 1-based det_labels."""
(
det_bboxes,
det_labels,
det_dists,
_,
_,
) = self.detector_simple_test_det_bbox_mask(x,
img_meta,
rescale=rescale)
# get target labels for the det bboxes: make use of the bbox head assigner
if not is_testing: # excluding the testing phase
target_labels = []
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
for i in range(len(img_meta)):
assign_result = bbox_assigner.assign(
det_bboxes[i],
gt_bboxes[i],
gt_labels=gt_labels[i] - 1)
target_labels.append(assign_result.labels + 1)
else:
target_labels = None
# det_bboxes: List[B x (N_b, 5)], last dim is probability
# det_labels: List[B x (N_b)]
# target_labels: List[B x (N_b)]
# det_dists: List[B x (N_b, N_c + 1)], doesn't sum to 1
return det_bboxes, det_labels, target_labels, det_dists, None, None
def detector_simple_test_det_bbox(self,
x,
img_meta,
proposals=None,
rescale=False):
"""Run the detector in test mode, given gt_bboxes, return the labels, dists
Return:
det_labels: 1 based.
"""
num_levels = len(x)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_meta)
else:
proposal_list = proposals
"""Support multi-image per batch"""
det_bboxes, det_labels, score_dists = [], [], []
for img_id in range(len(img_meta)):
x_i = tuple([x[i][img_id][None] for i in range(num_levels)])
img_meta_i = [img_meta[img_id]]
proposal_list_i = [proposal_list[img_id]]
det_labels_i, score_dists_i = self.simple_test_given_bboxes(
x_i, proposal_list_i)
det_bboxes.append(proposal_list[img_id])
det_labels.append(det_labels_i)
score_dists.append(score_dists_i)
return det_bboxes, det_labels, score_dists
def detector_simple_test_det_bbox_mask(self, x, img_meta, rescale=False):
"""Run the detector in test mode, return the detected boxes, labels,
dists, and masks."""
"""RPN phase"""
num_levels = len(x)
# proposal_list = self.rpn_head.simple_test_rpn(x, img_meta, self.test_cfg.rpn)
proposal_list = self.rpn_head.simple_test_rpn(x, img_meta)
# List[Tensor(1000, 5)]
"""Support multi-image per batch"""
# det_bboxes, det_labels, score_dists = [], [], []
# # img_meta: List[metadata]
# for img_id in range(len(img_meta)):
# x_i = tuple([x[i][img_id][None] for i in range(num_levels)])
# img_meta_i = [img_meta[img_id]]
# proposal_list_i = [proposal_list[img_id]]
# (
# det_bboxes_i,
# det_labels_i,
# score_dists_i,
# ) = self.roi_head.simple_test_bboxes(
# x_i,
# img_meta_i,
# proposal_list_i,
# self.test_cfg.rcnn,
# rescale=rescale,
# # return_dist=True,
# )
# det_bboxes.append(det_bboxes_i)
# det_labels.append(det_labels_i + 1)
# score_dists.append(score_dists_i)
det_bboxes, det_labels = self.roi_head.simple_test_bboxes(
x,
img_meta,
proposal_list,
self.test_cfg.rcnn,
rescale=rescale,
# return_dist=True,
)
det_labels, score_dists = zip(*det_labels)
# det_labels = [l + 1 for l in det_labels]
# det_bboxes: (N_b, 5)
# det_labels: (N_b)
# score_dists: (N_b, N_c + 1)
return det_bboxes, det_labels, score_dists, None, None
# if not self.with_mask:
# return det_bboxes, det_labels, score_dists, None, None
# else:
# if self.bbox_head.__class__.__name__ == "ExtrDetWeightSharedFCBBoxHead":
# det_weight = self.bbox_head.det_weight_hook()
# else:
# det_weight = None
# segm_masks = []
# points = []
# for img_id in range(len(img_meta)):
# x_i = tuple([x[i][img_id][None] for i in range(num_levels)])
# img_meta_i = [img_meta[img_id]]
# test_result = self.simple_test_mask(
# x_i,
# img_meta_i,
# det_bboxes[img_id],
# det_labels[img_id] - 1,
# rescale=rescale,
# det_weight=det_weight,
# with_point=self.with_point,
# )
# if isinstance(test_result, tuple):
# segm_masks_i, points_i = test_result
# points.append(points_i)
# else:
# segm_masks_i = test_result
# segm_masks.append(segm_masks_i)
# return det_bboxes, det_labels, score_dists, segm_masks, points
def simple_test_given_bboxes(self, x, proposals):
"""For SGG~SGCLS mode: Given gt boxes, extract its predicted scores and
score dists.
Without any post-process.
"""
rois = bbox2roi(proposals)
roi_feats = self.roi_head.bbox_roi_extractor(
x[:len(self.roi_head.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.roi_head.shared_head(roi_feats)
cls_score, _ = self.roi_head.bbox_head(roi_feats)
cls_score[:, 1:] = F.softmax(cls_score[:, 1:], dim=1)
_, labels = torch.max(cls_score[:, 1:], dim=1)
labels += 1
cls_score[:, 0] = 0
return labels, cls_score
def relation_simple_test(
self,
img,
img_meta,
gt_bboxes=None,
gt_labels=None,
gt_rels=None,
gt_masks=None,
gt_scenes=None,
rescale=False,
ignore_classes=None,
key_first=False,
):
"""
:param img:
:param img_meta:
:param gt_bboxes: Usually, under the forward (train/val/test), it should not be None. But
when for demo (inference), it should be None. The same for gt_labels.
:param gt_labels:
:param gt_rels: You should make sure that the gt_rels should not be passed into the forward
process in any mode. It is only used to visualize the results.
:param gt_masks:
:param rescale:
:param ignore_classes: For practice, you may want to ignore some object classes
:return:
"""
# Extract the outer list: Since the aug test is temporarily not supported.
if gt_bboxes is not None:
gt_bboxes = gt_bboxes[0]
if gt_labels is not None:
gt_labels = gt_labels[0]
if gt_masks is not None:
gt_masks = gt_masks[0]
x = self.extract_feat(img)
# if self.relation_head.with_visual_mask and (not self.with_mask):
# raise ValueError("The basic detector did not provide masks.")
"""
NOTE: (for VG) When the gt masks is None, but the head needs mask,
we use the gt_box and gt_label (if needed) to generate the fake mask.
"""
# Change to 1-index here:
gt_labels = [l + 1 for l in gt_labels]
# Rescale should be forbidden here since the bboxes and masks will be used in relation module.
bboxes, labels, target_labels, dists, masks, points = self.detector_simple_test(
x,
img_meta,
gt_bboxes,
gt_labels,
gt_masks,
use_gt_box=self.relation_head.use_gt_box,
use_gt_label=self.relation_head.use_gt_label,
rescale=False,
is_testing=True,
)
# saliency_maps = (
# self.saliency_detector_test(img, img_meta) if self.with_saliency else None
# )
det_result = Result(
bboxes=bboxes,
labels=labels,
dists=dists,
masks=masks,
points=points,
target_labels=target_labels,
# saliency_maps=saliency_maps,
img_shape=[meta['img_shape'] for meta in img_meta],
)
det_result = self.relation_head(x,
img_meta,
det_result,
is_testing=True,
ignore_classes=ignore_classes)
"""
Transform the data type, and rescale the bboxes and masks if needed
(for visual, do not rescale, for evaluation, rescale).
"""
scale_factor = img_meta[0]['scale_factor']
return self.relation_head.get_result(det_result,
scale_factor,
rescale=rescale,
key_first=key_first)
def show_result(
self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None,
):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None
thickness (int): Thickness of lines. Default: 2
font_size (int): Font size of texts. Default: 13
win_name (str): The window name. Default: ''
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy() # (H, W, 3)
# TODO: Use threshold to filter out
# Draw bboxes
bboxes = result.refine_bboxes[:, :4]
# Choose colors for each instance in coco
colormap_coco = get_colormap(len(bboxes))
colormap_coco = (np.array(colormap_coco) / 255).tolist()
# 1-index
labels = [self.CLASSES[l - 1] for l in result.labels]
viz = Visualizer(img)
viz.overlay_instances(
labels=labels,
boxes=bboxes,
assigned_colors=colormap_coco,
)
viz_img = viz.get_output().get_image()
# Draw relations
# Filter out relations
n_rel_topk = 20
# To exclude background class?
rel_dists = result.rel_dists[:, 1:]
# rel_dists = result.rel_dists
rel_scores = rel_dists.max(1)
# rel_scores = result.triplet_scores
# Extract relations with top scores
rel_topk_idx = np.argpartition(rel_scores, -n_rel_topk)[-n_rel_topk:]
rel_labels_topk = rel_dists[rel_topk_idx].argmax(1)
rel_pair_idxes_topk = result.rel_pair_idxes[rel_topk_idx]
relations = np.concatenate(
[rel_pair_idxes_topk, rel_labels_topk[..., None]], axis=1)
n_rels = len(relations)
top_padding = 20
bottom_padding = 20
left_padding = 20
text_size = 10
text_padding = 5
text_height = text_size + 2 * text_padding
row_padding = 10
height = (top_padding + bottom_padding + n_rels *
(text_height + row_padding) - row_padding)
width = viz_img.shape[1]
curr_x = left_padding
curr_y = top_padding
# Adjust colormaps
colormap_coco = [adjust_text_color(c, viz) for c in colormap_coco]
viz_graph = VisImage(np.full((height, width, 3), 255))
for i, r in enumerate(relations):
s_idx, o_idx, rel_id = r
# # Filter for specific relations
# if rel_ids_to_keep:
# if rel_id not in rel_ids_to_keep:
# continue
# elif rel_ids_to_filter:
# if rel_id in rel_ids_to_filter:
# continue
s_label = labels[s_idx]
o_label = labels[o_idx]
# Becomes 0-index
rel_label = self.PREDICATES[rel_id]
# Draw subject text
text_width = draw_text(
viz_img=viz_graph,
text=s_label,
x=curr_x,
y=curr_y,
color=colormap_coco[s_idx],
size=text_size,
padding=text_padding,
# font=font,
)
curr_x += text_width
# Draw relation text
text_width = draw_text(
viz_img=viz_graph,
text=rel_label,
x=curr_x,
y=curr_y,
size=text_size,
padding=text_padding,
box_color='gainsboro',
# font=font,
)
curr_x += text_width
# Draw object text
text_width = draw_text(
viz_img=viz_graph,
text=o_label,
x=curr_x,
y=curr_y,
color=colormap_coco[o_idx],
size=text_size,
padding=text_padding,
# font=font,
)
curr_x += text_width
curr_x = left_padding
curr_y += text_height + row_padding
viz_graph = viz_graph.get_image()
viz_final = np.vstack([viz_img, viz_graph])
if out_file is not None:
mmcv.imwrite(viz_final, out_file)
if not (show or out_file):
return viz_final
| 23,736 | 35.462366 | 112 | py |
OpenPSG | OpenPSG-main/openpsg/models/frameworks/__init__.py | from .sg_panoptic_fpn import SceneGraphPanopticFPN
from .sg_rcnn import SceneGraphRCNN
| 87 | 28.333333 | 50 | py |
OpenPSG | OpenPSG-main/openpsg/models/roi_heads/scene_graph_roi_head.py | """
See: https://mmdetection.readthedocs.io/en/v2.19.0/tutorials/customize_models.html
"""
from mmdet.models import HEADS, StandardRoIHead
@HEADS.register_module()
class SceneGraphRoIHead(StandardRoIHead):
def __init__(self, param, **kwargs):
super().__init__(**kwargs)
self.param = self.param
| 316 | 25.416667 | 82 | py |
OpenPSG | OpenPSG-main/openpsg/models/roi_heads/__init__.py | from .bbox_heads import SceneGraphBBoxHead
| 43 | 21 | 42 | py |
OpenPSG | OpenPSG-main/openpsg/models/roi_heads/bbox_heads/__init__.py | from .sg_bbox_head import SceneGraphBBoxHead
| 45 | 22 | 44 | py |
OpenPSG | OpenPSG-main/openpsg/models/roi_heads/bbox_heads/sg_bbox_head.py | import torch.nn.functional as F
from mmcv.runner import force_fp32
from mmdet.models import Shared2FCBBoxHead
from mmdet.models.builder import HEADS
from openpsg.utils.utils import multiclass_nms_alt
@HEADS.register_module()
class SceneGraphBBoxHead(Shared2FCBBoxHead):
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def get_bboxes(
self,
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None,
):
"""Transform network output for a batch into bbox predictions.
Args:
rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
cls_score (Tensor): Box scores, has shape
(num_boxes, num_classes + 1).
bbox_pred (Tensor, optional): Box energies / deltas.
has shape (num_boxes, num_classes * 4).
img_shape (Sequence[int], optional): Maximum bounds for boxes,
specifies (H, W, C) or (H, W).
scale_factor (ndarray): Scale factor of the
image arrange as (w_scale, h_scale, w_scale, h_scale).
rescale (bool): If True, return boxes in original image space.
Default: False.
cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None
Returns:
tuple[Tensor, Tensor]:
First tensor is `det_bboxes`, has the shape
(num_boxes, 5) and last
dimension 5 represent (tl_x, tl_y, br_x, br_y, score).
Second tensor is the labels with shape (num_boxes, ).
"""
# some loss (Seesaw loss..) may have custom activation
if self.custom_cls_channels:
scores = self.loss_cls.get_activation(cls_score)
else:
scores = F.softmax(cls_score,
dim=-1) if cls_score is not None else None
# bbox_pred would be None in some detector when with_reg is False,
# e.g. Grid R-CNN.
if bbox_pred is not None:
bboxes = self.bbox_coder.decode(rois[..., 1:],
bbox_pred,
max_shape=img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])
if rescale and bboxes.size(0) > 0:
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view(
bboxes.size()[0], -1)
if cfg is None:
return bboxes, scores
else:
det_bboxes, det_labels = multiclass_nms_alt(
bboxes,
scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
return_dist=True,
)
return det_bboxes, det_labels
| 3,107 | 36.445783 | 78 | py |
OpenPSG | OpenPSG-main/openpsg/models/losses/seg_losses.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.builder import LOSSES
from mmdet.models.losses.utils import weighted_loss
#@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def dice_loss(input, target, mask=None, eps=0.001):
N, H, W = input.shape
input = input.contiguous().view(N, H * W)
target = target.contiguous().view(N, H * W).float()
if mask is not None:
mask = mask.contiguous().view(N, H * W).float()
input = input * mask
target = target * mask
a = torch.sum(input * target, 1)
b = torch.sum(input * input, 1) + eps
c = torch.sum(target * target, 1) + eps
d = (2 * a) / (b + c)
#print('1-d max',(1-d).max())
return 1 - d
@LOSSES.register_module()
class psgtrDiceLoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(psgtrDiceLoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
self.count = 0
def forward(self, inputs, targets, num_matches):
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
targets = targets.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return self.loss_weight * loss.sum() / num_matches
@LOSSES.register_module()
class MultilabelCrossEntropy(torch.nn.Module):
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, inputs, targets):
assert (targets.sum(1) != 0).all()
loss = -(F.log_softmax(inputs, dim=1) *
targets).sum(1) / targets.sum(1)
loss = loss.mean()
return self.loss_weight * loss
@LOSSES.register_module()
class MultilabelLogRegression(torch.nn.Module):
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, inputs, targets):
assert (targets.sum(1) != 0).all()
loss_1 = -(torch.log((inputs + 1) / 2 + 1e-14) * targets).sum()
loss_0 = -(torch.log(1 - (inputs + 1) / 2 + 1e-14) *
(1 - targets)).sum()
# loss = loss.mean()
return self.loss_weight * (loss_1 + loss_0) / (targets.sum() +
(1 - targets).sum())
@LOSSES.register_module()
class LogRegression(torch.nn.Module):
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, inputs, targets):
positive_rate = 50
loss_1 = -(torch.log(
(inputs + 1) / 2 + 1e-14) * targets).sum() * positive_rate
loss_0 = -(torch.log(1 - (inputs + 1) / 2 + 1e-14) *
(1 - targets)).sum()
return self.loss_weight * (loss_1 + loss_0) / (targets.sum() +
(1 - targets).sum())
# def forward(self, inputs, targets):
# loss_1 = -(torch.log((inputs + 1) / 2 + 1e-14) * targets).sum()
# return self.loss_weight * loss_1
# def forward(self, inputs, targets):
# inputs = (inputs + 1) / 2 + 1e-14
# loss = F.mse_loss(inputs, targets.float(), reduction='mean')
# return self.loss_weight * loss
@LOSSES.register_module()
class BCEFocalLoss(torch.nn.Module):
def __init__(self, gamma=2, alpha=0.25, reduction='sum', loss_weight=1.0):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, inputs, targets, num_matches):
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs,
targets,
reduction='none')
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t)**self.gamma)
if self.alpha >= 0:
alpha_t = self.alpha * targets + (1 - self.alpha) * (1 - targets)
loss = alpha_t * loss
return self.loss_weight * loss.mean(1).sum() / num_matches
# pt = torch.sigmoid(_input)
# bs = len(pt)
# target = target.type(torch.long)
# # print(pt.shape, target.shape)
# alpha = self.alpha
# loss = - alpha * (1 - pt) ** self.gamma * target * torch.log(pt) - \
# (1 - alpha) * pt ** self.gamma * (1 - target) * torch.log(1 - pt)
# # print('loss_shape',loss.shape)
# if self.reduction == 'elementwise_mean':
# loss = torch.mean(loss)
# elif self.reduction == 'sum':
# loss = torch.sum(loss)
# return loss*self.loss_weight/bs
| 5,046 | 34.542254 | 79 | py |
OpenPSG | OpenPSG-main/openpsg/models/losses/__init__.py | from .seg_losses import BCEFocalLoss, dice_loss, psgtrDiceLoss
__all__ = ['BCEFocalLoss', 'dice_loss', 'psgtrDiceLoss']
| 121 | 29.5 | 62 | py |
OpenPSG | OpenPSG-main/openpsg/datasets/psg.py | import os.path as osp
import random
from collections import defaultdict
import mmcv
import numpy as np
import torch
from detectron2.data.detection_utils import read_image
from mmdet.datasets import DATASETS, CocoPanopticDataset
from mmdet.datasets.coco_panoptic import COCOPanoptic
from mmdet.datasets.pipelines import Compose
from panopticapi.utils import rgb2id
from openpsg.evaluation import sgg_evaluation
from openpsg.models.relation_heads.approaches import Result
@DATASETS.register_module()
class PanopticSceneGraphDataset(CocoPanopticDataset):
def __init__(
self,
ann_file,
pipeline,
classes=None,
data_root=None,
img_prefix='',
seg_prefix=None,
proposal_file=None,
test_mode=False,
filter_empty_gt=True,
file_client_args=dict(backend='disk'),
# New args
split: str = 'train', # {"train", "test"}
all_bboxes: bool = False, # load all bboxes (thing, stuff) for SG
):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.file_client = mmcv.FileClient(**file_client_args)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.ann_file):
self.ann_file = osp.join(self.data_root, self.ann_file)
if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if not (self.proposal_file is None
or osp.isabs(self.proposal_file)):
self.proposal_file = osp.join(self.data_root,
self.proposal_file)
self.proposal_file = None
self.proposals = None
self.all_bboxes = all_bboxes
self.split = split
# Load dataset
dataset = mmcv.load(ann_file)
for d in dataset['data']:
# NOTE: 0-index for object class labels
# for s in d['segments_info']:
# s['category_id'] += 1
# for a in d['annotations']:
# a['category_id'] += 1
# NOTE: 1-index for predicate class labels
for r in d['relations']:
r[2] += 1
# NOTE: Filter out images with zero relations.
# Comment out this part for competition files
dataset['data'] = [
d for d in dataset['data'] if len(d['relations']) != 0
]
# Get split
assert split in {'train', 'test'}
if split == 'train':
self.data = [
d for d in dataset['data']
if d['image_id'] not in dataset['test_image_ids']
]
# self.data = self.data[:1000] # for quick debug
elif split == 'test':
self.data = [
d for d in dataset['data']
if d['image_id'] in dataset['test_image_ids']
]
# self.data = self.data[:1000] # for quick debug
# Init image infos
self.data_infos = []
for d in self.data:
self.data_infos.append({
'filename': d['file_name'],
'height': d['height'],
'width': d['width'],
'id': d['image_id'],
})
self.img_ids = [d['id'] for d in self.data_infos]
# Define classes, 0-index
# NOTE: Class ids should range from 0 to (num_classes - 1)
self.THING_CLASSES = dataset['thing_classes']
self.STUFF_CLASSES = dataset['stuff_classes']
self.CLASSES = self.THING_CLASSES + self.STUFF_CLASSES
self.PREDICATES = dataset['predicate_classes']
# NOTE: For evaluation
self.coco = self._init_cocoapi()
self.cat_ids = self.coco.get_cat_ids()
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.categories = self.coco.cats
# processing pipeline
self.pipeline = Compose(pipeline)
if not self.test_mode:
self._set_group_flag()
def _init_cocoapi(self):
auxcoco = COCOPanoptic()
annotations = []
# Create mmdet coco panoptic data format
for d in self.data:
annotation = {
'file_name': d['pan_seg_file_name'],
'image_id': d['image_id'],
}
segments_info = []
for a, s in zip(d['annotations'], d['segments_info']):
segments_info.append({
'id':
s['id'],
'category_id':
s['category_id'],
'iscrowd':
s['iscrowd'],
'area':
int(s['area']),
# Convert from xyxy to xywh
'bbox': [
a['bbox'][0],
a['bbox'][1],
a['bbox'][2] - a['bbox'][0],
a['bbox'][3] - a['bbox'][1],
],
})
annotation['segments_info'] = segments_info
annotations.append(annotation)
thing_categories = [{
'id': i,
'name': name,
'isthing': 1
} for i, name in enumerate(self.THING_CLASSES)]
stuff_categories = [{
'id': i + len(self.THING_CLASSES),
'name': name,
'isthing': 0
} for i, name in enumerate(self.STUFF_CLASSES)]
# Create `dataset` attr for for `createIndex` method
auxcoco.dataset = {
'images': self.data_infos,
'annotations': annotations,
'categories': thing_categories + stuff_categories,
}
auxcoco.createIndex()
auxcoco.img_ann_map = auxcoco.imgToAnns
auxcoco.cat_img_map = auxcoco.catToImgs
return auxcoco
def get_ann_info(self, idx):
d = self.data[idx]
# Process bbox annotations
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
if self.all_bboxes:
# NOTE: Get all the bbox annotations (thing + stuff)
gt_bboxes = np.array([a['bbox'] for a in d['annotations']],
dtype=np.float32)
gt_labels = np.array([a['category_id'] for a in d['annotations']],
dtype=np.int64)
else:
gt_bboxes = []
gt_labels = []
# FIXME: Do we have to filter out `is_crowd`?
# Do not train on `is_crowd`,
# i.e just follow the mmdet dataset classes
# Or treat them as stuff classes?
# Can try and train on datasets with iscrowd
# and without and see the difference
for a, s in zip(d['annotations'], d['segments_info']):
# NOTE: Only thing bboxes are loaded
if s['isthing']:
gt_bboxes.append(a['bbox'])
gt_labels.append(a['category_id'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
# Process segment annotations
gt_mask_infos = []
for s in d['segments_info']:
gt_mask_infos.append({
'id': s['id'],
'category': s['category_id'],
'is_thing': s['isthing']
})
# Process relationship annotations
gt_rels = d['relations'].copy()
# Filter out dupes!
if self.split == 'train':
all_rel_sets = defaultdict(list)
for (o0, o1, r) in gt_rels:
all_rel_sets[(o0, o1)].append(r)
gt_rels = [(k[0], k[1], np.random.choice(v))
for k, v in all_rel_sets.items()]
gt_rels = np.array(gt_rels, dtype=np.int32)
else:
# for test or val set, filter the duplicate triplets,
# but allow multiple labels for each pair
all_rel_sets = []
for (o0, o1, r) in gt_rels:
if (o0, o1, r) not in all_rel_sets:
all_rel_sets.append((o0, o1, r))
gt_rels = np.array(all_rel_sets, dtype=np.int32)
# add relation to target
num_box = len(gt_mask_infos)
relation_map = np.zeros((num_box, num_box), dtype=np.int64)
for i in range(gt_rels.shape[0]):
# If already exists a relation?
if relation_map[int(gt_rels[i, 0]), int(gt_rels[i, 1])] > 0:
if random.random() > 0.5:
relation_map[int(gt_rels[i, 0]),
int(gt_rels[i, 1])] = int(gt_rels[i, 2])
else:
relation_map[int(gt_rels[i, 0]),
int(gt_rels[i, 1])] = int(gt_rels[i, 2])
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
rels=gt_rels,
rel_maps=relation_map,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_mask_infos,
seg_map=d['pan_seg_file_name'],
)
return ann
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
super().pre_pipeline(results)
results['rel_fields'] = []
def prepare_test_img(self, idx):
# For SGG, since the forward process may need gt_bboxes/gt_labels,
# we should also load annotation as if in the training mode.
return self.prepare_train_img(idx)
def evaluate(
self,
results,
metric='predcls',
logger=None,
jsonfile_prefix=None,
classwise=True,
multiple_preds=False,
iou_thrs=0.5,
nogc_thres_num=None,
detection_method='bbox',
**kwargs,
):
"""Overwritten evaluate API:
For each metric in metrics, it checks whether to invoke ps or sg
evaluation. if the metric is not 'sg', the evaluate method of super
class is invoked to perform Panoptic Segmentation evaluation. else,
perform scene graph evaluation.
"""
metrics = metric if isinstance(metric, list) else [metric]
# Available metrics
allowed_sg_metrics = ['predcls', 'sgcls', 'sgdet']
allowed_od_metrics = ['PQ']
sg_metrics, od_metrics = [], []
for m in metrics:
if m in allowed_od_metrics:
od_metrics.append(m)
elif m in allowed_sg_metrics:
sg_metrics.append(m)
else:
raise ValueError('Unknown metric {}.'.format(m))
if len(od_metrics) > 0:
# invoke object detection evaluation.
# Temporarily for bbox
if not isinstance(results[0], Result):
# it may be the results from the son classes
od_results = results
else:
od_results = [{'pan_results': r.pan_results} for r in results]
return super().evaluate(
od_results,
metric,
logger,
jsonfile_prefix,
classwise=classwise,
**kwargs,
)
if len(sg_metrics) > 0:
"""Invoke scene graph evaluation.
prepare the groundtruth and predictions. Transform the predictions
of key-wise to image-wise. Both the value in gt_results and
det_results are numpy array.
"""
if not hasattr(self, 'test_gt_results'):
print('\nLoading testing groundtruth...\n')
prog_bar = mmcv.ProgressBar(len(self))
gt_results = []
for i in range(len(self)):
ann = self.get_ann_info(i)
# NOTE: Change to object class labels 1-index here
ann['labels'] += 1
# load gt pan_seg masks
segment_info = ann['masks']
gt_img = read_image(self.img_prefix + '/' + ann['seg_map'],
format='RGB')
gt_img = gt_img.copy() # (H, W, 3)
seg_map = rgb2id(gt_img)
# get separate masks
gt_masks = []
labels_coco = []
for _, s in enumerate(segment_info):
label = self.CLASSES[s['category']]
labels_coco.append(label)
gt_masks.append(seg_map == s['id'])
# load gt pan seg masks done
gt_results.append(
Result(
bboxes=ann['bboxes'],
labels=ann['labels'],
rels=ann['rels'],
relmaps=ann['rel_maps'],
rel_pair_idxes=ann['rels'][:, :2],
rel_labels=ann['rels'][:, -1],
masks=gt_masks,
))
prog_bar.update()
print('\n')
self.test_gt_results = gt_results
return sgg_evaluation(
sg_metrics,
groundtruths=self.test_gt_results,
predictions=results,
iou_thrs=iou_thrs,
logger=logger,
ind_to_predicates=['__background__'] + self.PREDICATES,
multiple_preds=multiple_preds,
# predicate_freq=self.predicate_freq,
nogc_thres_num=nogc_thres_num,
detection_method=detection_method,
)
def get_statistics(self):
freq_matrix = self.get_freq_matrix()
eps = 1e-3
freq_matrix += eps
pred_dist = np.log(freq_matrix / freq_matrix.sum(2)[:, :, None] + eps)
result = {
'freq_matrix': torch.from_numpy(freq_matrix),
'pred_dist': torch.from_numpy(pred_dist).float(),
}
if result['pred_dist'].isnan().any():
print('check pred_dist: nan')
return result
def get_freq_matrix(self):
num_obj_classes = len(self.CLASSES)
num_rel_classes = len(self.PREDICATES)
freq_matrix = np.zeros(
(num_obj_classes, num_obj_classes, num_rel_classes + 1),
dtype=np.float)
progbar = mmcv.ProgressBar(len(self.data))
for d in self.data:
segments = d['segments_info']
relations = d['relations']
for rel in relations:
object_index = segments[rel[0]]['category_id']
subject_index = segments[rel[1]]['category_id']
relation_index = rel[2]
freq_matrix[object_index, subject_index, relation_index] += 1
progbar.update()
return freq_matrix
| 15,541 | 34.083521 | 79 | py |
OpenPSG | OpenPSG-main/openpsg/datasets/sg.py | import os.path as osp
import random
from collections import defaultdict
import mmcv
import numpy as np
from mmdet.datasets import DATASETS, CocoDataset
from mmdet.datasets.api_wrappers import COCO
from mmdet.datasets.pipelines import Compose
from openpsg.evaluation import sgg_evaluation
from openpsg.models.relation_heads.approaches import Result
@DATASETS.register_module()
class SceneGraphDataset(CocoDataset):
def __init__(
self,
ann_file,
pipeline,
classes=None,
data_root=None,
img_prefix='',
seg_prefix=None,
proposal_file=None,
test_mode=False,
filter_empty_gt=True,
file_client_args=dict(backend='disk'),
# New args
split: str = 'train', # {"train", "test"}
):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.file_client = mmcv.FileClient(**file_client_args)
# join paths if data_root is specified
if self.data_root is not None:
if not osp.isabs(self.ann_file):
self.ann_file = osp.join(self.data_root, self.ann_file)
if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if not (self.proposal_file is None
or osp.isabs(self.proposal_file)):
self.proposal_file = osp.join(self.data_root,
self.proposal_file)
self.proposal_file = None
self.proposals = None
self.split = split
# Load dataset
dataset = mmcv.load(ann_file)
for d in dataset['data']:
# NOTE: 0-index for object class labels
# for s in d['segments_info']:
# s['category_id'] += 1
# for a in d['annotations']:
# a['category_id'] += 1
# NOTE: 1-index for predicate class labels
for r in d['relations']:
r[2] += 1
# NOTE: Filter out images with zero relations
dataset['data'] = [
d for d in dataset['data'] if len(d['relations']) != 0
]
# Get split
assert split in {'train', 'test'}
if split == 'train':
self.data = [
d for d in dataset['data']
if d['image_id'] not in dataset['test_image_ids']
]
elif split == 'test':
self.data = [
d for d in dataset['data']
if d['image_id'] in dataset['test_image_ids']
]
# Init image infos
self.data_infos = []
for d in self.data:
self.data_infos.append({
'filename': d['file_name'],
'height': d['height'],
'width': d['width'],
'id': d['image_id'],
})
self.img_ids = [d['id'] for d in self.data_infos]
# Define classes, 0-index
# NOTE: Class ids should range from 0 to (num_classes - 1)
self.CLASSES = dataset['thing_classes'] + dataset['stuff_classes']
self.PREDICATES = dataset['predicate_classes']
# NOTE: For od evaluation
self.cat_ids = list(range(0, len(self.CLASSES)))
self.coco = self._init_cocoapi()
# processing pipeline
self.pipeline = Compose(pipeline)
if not self.test_mode:
self._set_group_flag()
def _init_cocoapi(self):
auxcoco = COCO()
anns = []
# Create COCO data format
for d in self.data:
for a in d['annotations']:
anns.append({
'area':
float((a['bbox'][2] - a['bbox'][0] + 1) *
(a['bbox'][3] - a['bbox'][1] + 1)),
# Convert from xyxy to xywh
'bbox': [
a['bbox'][0],
a['bbox'][1],
a['bbox'][2] - a['bbox'][0],
a['bbox'][3] - a['bbox'][1],
],
'category_id':
a['category_id'],
'id':
len(anns),
'image_id':
d['image_id'],
'iscrowd':
0,
})
auxcoco.dataset = {
'images':
self.data_infos,
'categories': [{
'id': i,
'name': name
} for i, name in enumerate(self.CLASSES)],
'annotations':
anns,
}
auxcoco.createIndex()
return auxcoco
def get_ann_info(self, idx):
d = self.data[idx]
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
# Process bbox annotations
gt_bboxes = np.array([a['bbox'] for a in d['annotations']],
dtype=np.float32)
gt_labels = np.array([a['category_id'] for a in d['annotations']],
dtype=np.int64)
# Process relationship annotations
gt_rels = d['relations'].copy()
# Filter out dupes!
if self.split == 'train':
all_rel_sets = defaultdict(list)
for (o0, o1, r) in gt_rels:
all_rel_sets[(o0, o1)].append(r)
gt_rels = [(k[0], k[1], np.random.choice(v))
for k, v in all_rel_sets.items()]
gt_rels = np.array(gt_rels, dtype=np.int32)
else:
# for test or val set, filter the duplicate triplets, but allow multiple labels for each pair
all_rel_sets = []
for (o0, o1, r) in gt_rels:
if (o0, o1, r) not in all_rel_sets:
all_rel_sets.append((o0, o1, r))
gt_rels = np.array(all_rel_sets, dtype=np.int32)
# add relation to target
num_box = len(gt_bboxes)
relation_map = np.zeros((num_box, num_box), dtype=np.int64)
for i in range(gt_rels.shape[0]):
# If already exists a relation?
if relation_map[int(gt_rels[i, 0]), int(gt_rels[i, 1])] > 0:
if random.random() > 0.5:
relation_map[int(gt_rels[i, 0]),
int(gt_rels[i, 1])] = int(gt_rels[i, 2])
else:
relation_map[int(gt_rels[i, 0]),
int(gt_rels[i, 1])] = int(gt_rels[i, 2])
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
rels=gt_rels,
rel_maps=relation_map,
bboxes_ignore=gt_bboxes_ignore,
masks=None,
seg_map=None,
)
return ann
def pre_pipeline(self, results):
"""Prepare results dict for pipeline."""
super().pre_pipeline(results)
results['rel_fields'] = []
def prepare_test_img(self, idx):
# For SGG, since the forward process may need gt_bboxes/gt_labels,
# we should also load annotation as if in the training mode.
return self.prepare_train_img(idx)
def evaluate(
self,
results,
metric='predcls',
logger=None,
jsonfile_prefix=None,
classwise=False,
multiple_preds=False,
iou_thrs=0.5,
nogc_thres_num=None,
**kwargs,
):
"""
**kwargs: contain the paramteters specifically for OD, e.g., proposal_nums.
Overwritten evaluate API:
For each metric in metrics, it checks whether to invoke od or sg evaluation.
if the metric is not 'sg', the evaluate method of super class is invoked
to perform Object Detection evaluation.
else, perform scene graph evaluation.
"""
metrics = metric if isinstance(metric, list) else [metric]
# Available metrics
allowed_sg_metrics = ['predcls', 'sgcls', 'sgdet']
allowed_od_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
sg_metrics, od_metrics = [], []
for m in metrics:
if m in allowed_od_metrics:
od_metrics.append(m)
elif m in allowed_sg_metrics:
sg_metrics.append(m)
else:
raise ValueError('Unknown metric {}.'.format(m))
if len(od_metrics) > 0:
# invoke object detection evaluation.
# Temporarily for bbox
if not isinstance(results[0], Result):
# it may be the reuslts from the son classes
od_results = results
else:
od_results = [(r.formatted_bboxes, r.formatted_masks)
for r in results]
return super().evaluate(
od_results,
metric,
logger,
jsonfile_prefix,
classwise=classwise,
iou_thrs=None,
**kwargs,
)
if len(sg_metrics) > 0:
"""Invoke scenen graph evaluation.
prepare the groundtruth and predictions. Transform the predictions
of key-wise to image-wise. Both the value in gt_results and
det_results are numpy array.
"""
if not hasattr(self, 'test_gt_results'):
print('\nLooading testing groundtruth...\n')
prog_bar = mmcv.ProgressBar(len(self))
gt_results = []
for i in range(len(self)):
ann = self.get_ann_info(i)
# NOTE: Change to object class labels 1-index here
ann['labels'] += 1
gt_results.append(
Result(
bboxes=ann['bboxes'],
labels=ann['labels'],
rels=ann['rels'],
relmaps=ann['rel_maps'],
rel_pair_idxes=ann['rels'][:, :2],
rel_labels=ann['rels'][:, -1],
))
prog_bar.update()
print('\n')
self.test_gt_results = gt_results
return sgg_evaluation(
sg_metrics,
groundtruths=self.test_gt_results,
predictions=results,
iou_thrs=iou_thrs,
logger=logger,
ind_to_predicates=['__background__'] + self.PREDICATES,
multiple_preds=multiple_preds,
# predicate_freq=self.predicate_freq,
nogc_thres_num=nogc_thres_num,
)
| 11,107 | 33.390093 | 105 | py |
OpenPSG | OpenPSG-main/openpsg/datasets/__init__.py | from .builder import DATASETS, PIPELINES, build_dataset
from .pipelines import (LoadPanopticSceneGraphAnnotations,
LoadSceneGraphAnnotations,
PanopticSceneGraphFormatBundle, SceneGraphFormatBundle)
from .psg import PanopticSceneGraphDataset
from .sg import SceneGraphDataset
__all__ = [
'PanopticSceneGraphFormatBundle', 'SceneGraphFormatBundle',
'build_dataset', 'LoadPanopticSceneGraphAnnotations',
'LoadSceneGraphAnnotations', 'PanopticSceneGraphDataset',
'SceneGraphDataset', 'DATASETS', 'PIPELINES'
]
| 571 | 39.857143 | 79 | py |
OpenPSG | OpenPSG-main/openpsg/datasets/builder.py | # Copyright (c) OpenMMLab. All rights reserved.
import platform
from mmcv.utils import Registry, build_from_cfg
from mmdet.datasets import DATASETS as MMDET_DATASETS
from mmdet.datasets.builder import _concat_dataset
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
OBJECTSAMPLERS = Registry('Object sampler')
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
def build_dataset(cfg, default_args=None):
from mmdet.datasets.dataset_wrappers import (ClassBalancedDataset,
ConcatDataset, RepeatDataset)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'ConcatDataset':
dataset = ConcatDataset(
[build_dataset(c, default_args) for c in cfg['datasets']],
cfg.get('separate_eval', True))
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args),
cfg['times'])
elif cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
elif cfg['type'] in DATASETS._module_dict.keys():
dataset = build_from_cfg(cfg, DATASETS, default_args)
else:
dataset = build_from_cfg(cfg, MMDET_DATASETS, default_args)
return dataset
| 1,841 | 40.863636 | 79 | py |
OpenPSG | OpenPSG-main/openpsg/datasets/pipelines/formatting.py | from mmcv.parallel import DataContainer as DC
from mmdet.datasets import PIPELINES
from mmdet.datasets.pipelines import DefaultFormatBundle, to_tensor
@PIPELINES.register_module()
class SceneGraphFormatBundle(DefaultFormatBundle):
def __call__(self, results):
results = super().__call__(results)
if 'rel_fields' in results and len(results['rel_fields']) > 0:
for key in results['rel_fields']:
results[key] = DC(to_tensor(results[key]))
if 'gt_scenes' in results:
results['gt_scenes'] = DC(to_tensor(results['gt_scenes']))
return results
@PIPELINES.register_module()
class PanopticSceneGraphFormatBundle(SceneGraphFormatBundle):
def __call__(self, results):
results = super().__call__(results)
for key in ['all_gt_bboxes', 'all_gt_labels']:
if key not in results:
continue
results[key] = DC(to_tensor(results[key]))
return results
| 982 | 30.709677 | 70 | py |
OpenPSG | OpenPSG-main/openpsg/datasets/pipelines/loading.py | import os.path as osp
import mmcv
import numpy as np
from mmcv.parallel import DataContainer as DC
from mmdet.core import BitmapMasks
from mmdet.datasets import PIPELINES
from mmdet.datasets.pipelines import (DefaultFormatBundle, LoadAnnotations,
to_tensor)
from mmdet.datasets.pipelines.loading import LoadPanopticAnnotations
try:
from panopticapi.utils import rgb2id
except ImportError:
rgb2id = None
@PIPELINES.register_module()
class RelsFormatBundle(DefaultFormatBundle):
"""Transfer gt_rels to tensor too."""
def __call__(self, results):
results = super().__call__(results)
if 'gt_rels' in results:
results['gt_rels'] = DC(to_tensor(results['gt_rels']))
return results
@PIPELINES.register_module()
class LoadSceneGraphAnnotations(LoadAnnotations):
def __init__(
self,
with_bbox=True,
with_label=True,
with_mask=False,
with_seg=False,
poly2mask=True,
file_client_args=dict(backend='disk'),
# New args
with_rel=False,
):
super().__init__(
with_bbox=with_bbox,
with_label=with_label,
with_mask=with_mask,
with_seg=with_seg,
poly2mask=poly2mask,
file_client_args=dict(backend='disk'),
)
self.with_rel = with_rel
def _load_rels(self, results):
ann_info = results['ann_info']
results['gt_rels'] = ann_info['rels']
results['gt_relmaps'] = ann_info['rel_maps']
assert 'rel_fields' in results
results['rel_fields'] += ['gt_rels', 'gt_relmaps']
return results
def __call__(self, results):
results = super().__call__(results)
if self.with_rel:
results = self._load_rels(results)
return results
def __repr__(self):
repr_str = super().__repr__()
repr_str += f', with_rel={self.with_rel})'
return repr_str
@PIPELINES.register_module()
class LoadPanopticSceneGraphAnnotations(LoadPanopticAnnotations):
def __init__(
self,
with_bbox=True,
with_label=True,
with_mask=True,
with_seg=True,
file_client_args=dict(backend='disk'),
# New args
with_rel=False,
):
super().__init__(
with_bbox=with_bbox,
with_label=with_label,
with_mask=with_mask,
with_seg=with_seg,
file_client_args=dict(backend='disk'),
)
self.with_rel = with_rel
def _load_rels(self, results):
ann_info = results['ann_info']
results['gt_rels'] = ann_info['rels']
results['gt_relmaps'] = ann_info['rel_maps']
assert 'rel_fields' in results
results['rel_fields'] += ['gt_rels', 'gt_relmaps']
return results
def _load_masks_and_semantic_segs(self, results):
"""Private function to load mask and semantic segmentation annotations.
In gt_semantic_seg, the foreground label is from `0` to
`num_things - 1`, the background label is from `num_things` to
`num_things + num_stuff - 1`, 255 means the ignored label (`VOID`).
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded mask and semantic segmentation
annotations. `BitmapMasks` is used for mask annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
filename = osp.join(results['seg_prefix'],
results['ann_info']['seg_map'])
img_bytes = self.file_client.get(filename)
pan_png = mmcv.imfrombytes(img_bytes,
flag='color',
channel_order='rgb').squeeze()
pan_png = rgb2id(pan_png)
gt_masks = []
gt_seg = np.zeros_like(pan_png) + 255 # 255 as ignore
for mask_info in results['ann_info']['masks']:
mask = (pan_png == mask_info['id'])
gt_seg = np.where(mask, mask_info['category'], gt_seg)
# # The legal thing masks
# if mask_info.get('is_thing'):
# gt_masks.append(mask.astype(np.uint8))
gt_masks.append(mask.astype(np.uint8)) # get all masks
if self.with_mask:
h, w = results['img_info']['height'], results['img_info']['width']
gt_masks = BitmapMasks(gt_masks, h, w)
# print('origin_size')
# print(gt_masks)
results['gt_masks'] = gt_masks
results['mask_fields'].append('gt_masks')
if self.with_seg:
results['gt_semantic_seg'] = gt_seg
results['seg_fields'].append('gt_semantic_seg')
return results
def __call__(self, results):
results = super().__call__(results)
if self.with_rel:
results = self._load_rels(results)
return results
def __repr__(self):
repr_str = super().__repr__()
repr_str += f', with_rel={self.with_rel})'
return repr_str
| 5,278 | 29.33908 | 79 | py |
OpenPSG | OpenPSG-main/openpsg/datasets/pipelines/rel_randomcrop.py | import random
import numpy as np
from mmdet.datasets import PIPELINES
from mmdet.datasets.pipelines import RandomCrop
@PIPELINES.register_module()
class RelRandomCrop(RandomCrop):
"""Random crop the image & bboxes & masks & scene relations."""
def _crop_data(self, results, crop_size, allow_negative_crop):
"""Function to randomly crop images, bounding boxes, masks, semantic
segmentation maps, relations.
Filter those incomplete relations and adjust annotations of the remaining relations.
Args:
results (dict): Result dict from loading pipeline.
crop_size (tuple): Expected absolute size after cropping, (h, w).
allow_negative_crop (bool): Whether to allow a crop that does not
contain any bbox area. Default to False.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
assert crop_size[0] > 0 and crop_size[1] > 0
for key in results.get('img_fields', ['img']):
img = results[key]
margin_h = max(img.shape[0] - crop_size[0], 0)
margin_w = max(img.shape[1] - crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
# crop the image
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
img_shape = img.shape
results[key] = img
results['img_shape'] = img_shape
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
# e.g. gt_bboxes and gt_bboxes_ignore
bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
dtype=np.float32)
bboxes = results[key] - bbox_offset
if self.bbox_clip_border:
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (bboxes[:, 3] >
bboxes[:, 1])
if key == 'gt_bboxes':
rels_left = []
for rel in results.get('gt_rels', []):
if valid_inds[rel[0]] and valid_inds[rel[1]]:
rels_left.append([
sum(valid_inds[:rel[0]]),
sum(valid_inds[:rel[1]]), rel[2]
])
rels_left = np.array(rels_left)
# If the crop does not contain any triplets and
# allow_negative_crop is False, skip this image.
if len(rels_left) == 0 and not allow_negative_crop:
return None
results['gt_rels'] = rels_left
results[key] = bboxes[valid_inds, :]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][
valid_inds.nonzero()[0]].crop(
np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
if self.recompute_bbox:
results[key] = results[mask_key].get_bboxes()
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
return results
| 3,923 | 44.627907 | 92 | py |
OpenPSG | OpenPSG-main/openpsg/datasets/pipelines/__init__.py | from .formatting import PanopticSceneGraphFormatBundle, SceneGraphFormatBundle
from .loading import (LoadPanopticSceneGraphAnnotations,
LoadSceneGraphAnnotations)
__all__ = [
'PanopticSceneGraphFormatBundle', 'SceneGraphFormatBundle',
'LoadPanopticSceneGraphAnnotations', 'LoadSceneGraphAnnotations'
]
| 333 | 36.111111 | 78 | py |
OpenPSG | OpenPSG-main/openpsg/utils/utils.py | from typing import Tuple
import os.path as osp
import PIL
import mmcv
import mmcv.ops as ops
import numpy as np
import torch
from detectron2.utils.colormap import colormap
from detectron2.utils.visualizer import VisImage, Visualizer
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET
import matplotlib.pyplot as plt
# from mmcv.ops.nms import batched_nms
def enumerate_by_image(im_inds):
im_inds_np = im_inds.cpu().numpy()
initial_ind = int(im_inds_np[0])
s = 0
for i, val in enumerate(im_inds_np):
if val != initial_ind:
yield initial_ind, s, i
initial_ind = int(val)
s = i
yield initial_ind, s, len(im_inds_np)
def get_colormap(num_colors: int):
return (np.resize(colormap(), (num_colors, 3))).tolist()
def adjust_text_color(color: Tuple[float, float, float],
viz: Visualizer) -> Tuple[float, float, float]:
color = viz._change_color_brightness(color, brightness_factor=0.7)
color = np.maximum(color, 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
return color
def draw_text(
viz_img: VisImage = None,
text: str = None,
x: float = None,
y: float = None,
color: Tuple[float, float, float] = [0, 0, 0],
size: float = 10,
padding: float = 5,
box_color: str = 'black',
font: str = None,
) -> float:
text_obj = viz_img.ax.text(
x,
y,
text,
size=size,
# family="sans-serif",
bbox={
'facecolor': box_color,
'alpha': 0.8,
'pad': padding,
'edgecolor': 'none',
},
verticalalignment='top',
horizontalalignment='left',
color=color,
zorder=10,
rotation=0,
)
viz_img.get_image()
text_dims = text_obj.get_bbox_patch().get_extents()
return text_dims.width
def multiclass_nms_alt(
multi_bboxes,
multi_scores,
score_thr,
nms_cfg,
max_num=-1,
score_factors=None,
return_dist=False,
):
"""NMS for multi-class bboxes.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class), where the last column
contains scores of the background class, but this will be ignored.
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_cfg (float): NMS IoU threshold
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept.
score_factors (Tensor): The factors multiplied to scores before
applying NMS
return_dist (bool): whether to return score dist.
Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k). Labels
are 0-based.
"""
num_classes = multi_scores.size(1) - 1
# exclude background category
if multi_bboxes.shape[1] > 4:
# (N_b, N_c, 4)
bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)
else:
bboxes = multi_bboxes[:, None].expand(-1, num_classes, 4)
scores = multi_scores[:, :-1]
# filter out boxes with low scores
valid_mask = scores > score_thr # (N_b, N_c)
valid_box_idxes = torch.nonzero(valid_mask)[:, 0].view(-1)
bboxes = bboxes[valid_mask]
if score_factors is not None:
scores = scores * score_factors[:, None]
score_dists = scores[valid_box_idxes, :]
# add bg column for later use.
score_dists = torch.cat(
(torch.zeros(score_dists.size(0), 1).to(score_dists), score_dists),
dim=-1)
scores = scores[valid_mask]
labels = valid_mask.nonzero()[:, 1]
if bboxes.numel() == 0:
bboxes = multi_bboxes.new_zeros((0, 5))
labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)
if return_dist:
return bboxes, (labels, multi_bboxes.new_zeros(
(0, num_classes + 1)))
else:
return bboxes, labels
# Modified from https://github.com/pytorch/vision/blob
# /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = bboxes.max()
offsets = labels.to(bboxes) * (max_coordinate + 1)
bboxes_for_nms = bboxes + offsets[:, None]
nms_cfg_ = nms_cfg.copy()
nms_type = nms_cfg_.pop('type', 'nms')
nms_op = getattr(ops, nms_type)
dets, keep = nms_op(bboxes_for_nms, scores, **nms_cfg_)
bboxes = bboxes[keep]
scores = dets[:, -1] # soft_nms will modify scores
labels = labels[keep]
score_dists = score_dists[keep]
if keep.size(0) > max_num:
_, inds = scores.sort(descending=True)
inds = inds[:max_num]
bboxes = bboxes[inds]
scores = scores[inds]
labels = labels[inds]
score_dists = score_dists[inds]
if return_dist:
# score_dists has bg_column
return torch.cat([bboxes, scores[:, None]],
1), (labels.view(-1), score_dists)
else:
return torch.cat([bboxes, scores[:, None]], 1), labels.view(-1)
CLASSES = [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard',
'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit',
'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform',
'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea',
'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone',
'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other',
'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',
'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged',
'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged',
'food-other-merged', 'building-other-merged', 'rock-merged',
'wall-other-merged', 'rug-merged', 'background'
]
PREDICATES = [
'over',
'in front of',
'beside',
'on',
'in',
'attached to',
'hanging from',
'on back of',
'falling off',
'going down',
'painted on',
'walking on',
'running on',
'crossing',
'standing on',
'lying on',
'sitting on',
'flying over',
'jumping over',
'jumping from',
'wearing',
'holding',
'carrying',
'looking at',
'guiding',
'kissing',
'eating',
'drinking',
'feeding',
'biting',
'catching',
'picking',
'playing with',
'chasing',
'climbing',
'cleaning',
'playing',
'touching',
'pushing',
'pulling',
'opening',
'cooking',
'talking to',
'throwing',
'slicing',
'driving',
'riding',
'parked on',
'driving on',
'about to hit',
'kicking',
'swinging',
'entering',
'exiting',
'enclosing',
'leaning on',
]
def show_result(img,
result,
is_one_stage,
num_rel=20,
show=False,
out_dir=None,
out_file=None):
# Load image
img = mmcv.imread(img)
img = img.copy() # (H, W, 3)
img_h, img_w = img.shape[:-1]
# Decrease contrast
img = PIL.Image.fromarray(img)
converter = PIL.ImageEnhance.Color(img)
img = converter.enhance(0.01)
if out_file is not None:
mmcv.imwrite(np.asarray(img), 'bw'+out_file)
# Draw masks
pan_results = result.pan_results
ids = np.unique(pan_results)[::-1]
num_classes = 133
legal_indices = (ids != num_classes) # for VOID label
ids = ids[legal_indices]
# Get predicted labels
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
labels = [CLASSES[l] for l in labels]
#For psgtr
rel_obj_labels = result.labels
rel_obj_labels = [CLASSES[l - 1] for l in rel_obj_labels]
# (N_m, H, W)
segms = pan_results[None] == ids[:, None, None]
# Resize predicted masks
segms = [
mmcv.image.imresize(m.astype(float), (img_w, img_h)) for m in segms
]
# One stage segmentation
masks = result.masks
# Choose colors for each instance in coco
colormap_coco = get_colormap(len(masks)) if is_one_stage else get_colormap(len(segms))
colormap_coco = (np.array(colormap_coco) / 255).tolist()
# Viualize masks
viz = Visualizer(img)
viz.overlay_instances(
labels=rel_obj_labels if is_one_stage else labels,
masks=masks if is_one_stage else segms,
assigned_colors=colormap_coco,
)
viz_img = viz.get_output().get_image()
if out_file is not None:
mmcv.imwrite(viz_img, out_file)
# Draw relations
# Filter out relations
n_rel_topk = num_rel
# Exclude background class
rel_dists = result.rel_dists[:, 1:]
# rel_dists = result.rel_dists
rel_scores = rel_dists.max(1)
# rel_scores = result.triplet_scores
# Extract relations with top scores
rel_topk_idx = np.argpartition(rel_scores, -n_rel_topk)[-n_rel_topk:]
rel_labels_topk = rel_dists[rel_topk_idx].argmax(1)
rel_pair_idxes_topk = result.rel_pair_idxes[rel_topk_idx]
relations = np.concatenate(
[rel_pair_idxes_topk, rel_labels_topk[..., None]], axis=1)
n_rels = len(relations)
top_padding = 20
bottom_padding = 20
left_padding = 20
text_size = 10
text_padding = 5
text_height = text_size + 2 * text_padding
row_padding = 10
height = (top_padding + bottom_padding + n_rels *
(text_height + row_padding) - row_padding)
width = img_w
curr_x = left_padding
curr_y = top_padding
# # Adjust colormaps
# colormap_coco = [adjust_text_color(c, viz) for c in colormap_coco]
viz_graph = VisImage(np.full((height, width, 3), 255))
for i, r in enumerate(relations):
s_idx, o_idx, rel_id = r
s_label = rel_obj_labels[s_idx]
o_label = rel_obj_labels[o_idx]
rel_label = PREDICATES[rel_id]
viz = Visualizer(img)
viz.overlay_instances(
labels=[s_label, o_label],
masks=[masks[s_idx], masks[o_idx]],
assigned_colors=[colormap_coco[s_idx], colormap_coco[o_idx]],
)
viz_masked_img = viz.get_output().get_image()
viz_graph = VisImage(np.full((40, width, 3), 255))
curr_x = 2
curr_y = 2
text_size = 25
text_padding = 20
font = 36
text_width = draw_text(
viz_img=viz_graph,
text=s_label,
x=curr_x,
y=curr_y,
color=colormap_coco[s_idx],
size=text_size,
padding=text_padding,
font=font,
)
curr_x += text_width
# Draw relation text
text_width = draw_text(
viz_img=viz_graph,
text=rel_label,
x=curr_x,
y=curr_y,
size=text_size,
padding=text_padding,
box_color='gainsboro',
font=font,
)
curr_x += text_width
# Draw object text
text_width = draw_text(
viz_img=viz_graph,
text=o_label,
x=curr_x,
y=curr_y,
color=colormap_coco[o_idx],
size=text_size,
padding=text_padding,
font=font,
)
output_viz_graph = np.vstack([viz_masked_img, viz_graph.get_image()])
if out_file is not None:
mmcv.imwrite(output_viz_graph, osp.join(out_dir, '{}.jpg'.format(i)))
# if out_file is not None:
# mmcv.imwrite(output_viz_graph, out_file)
if not (show or out_file):
return viz_final
# def multiclass_nms_alt(
# multi_bboxes,
# multi_scores,
# score_thr,
# nms_cfg,
# max_num=-1,
# score_factors=None,
# return_inds=False,
# return_dist=False,
# ):
# """NMS for multi-class bboxes.
# Args:
# multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
# multi_scores (Tensor): shape (n, #class), where the last column
# contains scores of the background class, but this will be ignored.
# score_thr (float): bbox threshold, bboxes with scores lower than it
# will not be considered.
# nms_thr (float): NMS IoU threshold
# max_num (int, optional): if there are more than max_num bboxes after
# NMS, only top max_num will be kept. Default to -1.
# score_factors (Tensor, optional): The factors multiplied to scores
# before applying NMS. Default to None.
# return_inds (bool, optional): Whether return the indices of kept
# bboxes. Default to False.
# Returns:
# tuple: (dets, labels, indices (optional)), tensors of shape (k, 5),
# (k), and (k). Dets are boxes with scores. Labels are 0-based.
# """
# num_classes = multi_scores.size(1) - 1
# # exclude background category
# if multi_bboxes.shape[1] > 4:
# bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)
# else:
# bboxes = multi_bboxes[:, None].expand(multi_scores.size(0), num_classes, 4)
# scores = multi_scores[:, :-1]
# valid_box_idxes = torch.nonzero(scores > score_thr)[:, 0].view(-1)
# labels = torch.arange(num_classes, dtype=torch.long, device=scores.device)
# labels = labels.view(1, -1).expand_as(scores)
# bboxes = bboxes.reshape(-1, 4)
# scores = scores.reshape(-1) # flattened
# labels = labels.reshape(-1)
# if not torch.onnx.is_in_onnx_export():
# # NonZero not supported in TensorRT
# # remove low scoring boxes
# valid_mask = scores > score_thr
# # multiply score_factor after threshold to preserve more bboxes, improve
# # mAP by 1% for YOLOv3
# if score_factors is not None:
# # expand the shape to match original shape of score
# score_factors = score_factors.view(-1, 1).expand(
# multi_scores.size(0), num_classes
# )
# score_factors = score_factors.reshape(-1)
# scores = scores * score_factors
# score_dists = scores.reshape(-1, num_classes)
# scores = score_dists[valid_box_idxes, :]
# # add bg column for later use.
# score_dists = torch.cat(
# (torch.zeros(score_dists.size(0), 1).to(score_dists), score_dists), dim=-1
# )
# if not torch.onnx.is_in_onnx_export():
# # NonZero not supported in TensorRT
# inds = valid_mask.nonzero(as_tuple=False).squeeze(1)
# bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds]
# else:
# # TensorRT NMS plugin has invalid output filled with -1
# # add dummy data to make detection output correct.
# bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0)
# scores = torch.cat([scores, scores.new_zeros(1)], dim=0)
# labels = torch.cat([labels, labels.new_zeros(1)], dim=0)
# if bboxes.numel() == 0:
# if torch.onnx.is_in_onnx_export():
# raise RuntimeError(
# "[ONNX Error] Can not record NMS "
# "as it has not been executed this time"
# )
# dets = torch.cat([bboxes, scores[:, None]], -1)
# if return_inds:
# return dets, labels, inds
# elif return_dist:
# return dets, (labels, multi_bboxes.new_zeros((0, num_classes + 1)))
# else:
# return dets, labels
# dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)
# # NOTE: `keep` is for each class independently right?
# if max_num > 0:
# dets = dets[:max_num]
# keep = keep[:max_num]
# if return_inds:
# return dets, labels[keep], inds[keep]
# else:
# return dets, labels[keep]
| 16,860 | 31.676357 | 90 | py |
OpenPSG | OpenPSG-main/openpsg/utils/__init__.py | from .utils import * # noqa: F401,F403
from .vis_tools import * # noqa: F401,F403
| 85 | 20.5 | 43 | py |