repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
BVQI | BVQI-master/prompt_tuning.py | import os, glob
import argparse
import pickle as pkl
import random
from copy import deepcopy
import open_clip
import numpy as np
import torch
import torch.nn as nn
import yaml
from scipy.stats import pearsonr, spearmanr
from scipy.stats import kendalltau as kendallr
from tqdm import tqdm
from buona_vista import datasets
from load_features import get_features
class TextEncoder(nn.Module):
def __init__(self, clip_model):
super().__init__()
self.transformer = clip_model.transformer
self.positional_embedding = clip_model.positional_embedding
self.ln_final = clip_model.ln_final
self.text_projection = clip_model.text_projection
self.dtype = clip_model.transformer.get_cast_dtype()
self.attn_mask = clip_model.attn_mask
def forward(self, prompts, tokenized_prompts):
x = prompts + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, attn_mask=self.attn_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), tokenized_prompts.argmax(dim=-1)] @ self.text_projection
return x
class MLP(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super().__init__()
self.in_ln = nn.Linear(in_channels, hidden_channels, bias=False)
self.out_ln = nn.Linear(hidden_channels, out_channels, bias=False)
self.gelu = nn.GELU()
self.dropout = nn.Dropout(0.5)
self.bn = nn.BatchNorm2d(1, affine=False)
def forward(self, x):
bef_norm = self.out_ln(self.dropout(self.gelu(self.in_ln(x)))).squeeze(-1)
return (torch.sigmoid(self.bn(bef_norm[:, None, :, :])))
class FFN(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.ln = nn.Linear(in_channels, 1, bias=False)
self.bn = nn.BatchNorm2d(1, affine=False)
def forward(self, x):
bef_norm = self.ln(x).squeeze(-1)
return (torch.sigmoid(self.bn(bef_norm[:, None, :, :])))
class VisualFeatureDataset(torch.utils.data.Dataset):
def __init__(self, dataset_name, indices=None):
super().__init__()
if indices == None:
indices = range(len(sn[dataset_name]))
print("Using all indices:", indices)
self.temporal = [tn2[dataset_name][ind] for ind in indices]
self.spatial = [sn[dataset_name][ind] for ind in indices]
self.clip_visual_features = [visual_features[dataset_name][ind] for ind in indices]
self.gts = [gts[dataset_name][ind] for ind in indices]
def __getitem__(self, index):
return self.clip_visual_features[index], self.spatial[index], self.temporal[index], self.gts[index]
def __len__(self):
return len(self.gts)
class FastVisualFeatureDataset(torch.utils.data.Dataset):
def __init__(self, dataset_name, indices=None):
super().__init__()
if indices == None:
indices = range(len(sn[dataset_name]))
print("Using all indices:", indices)
self.temporal = [tn2[dataset_name][ind] for ind in indices]
self.spatial = [sn[dataset_name][ind] for ind in indices]
self.clip_visual_features = [visual_features[dataset_name][ind] for ind in indices]
self.fast_visual_features = [fast_visual_features[dataset_name]["feats"][ind] for ind in indices]
self.gts = [gts[dataset_name][ind] for ind in indices]
def __getitem__(self, index):
return self.clip_visual_features[index], self.spatial[index], self.temporal[index], self.gts[index], self.fast_visual_features[index].reshape(4,1,768)
def __len__(self):
return len(self.gts)
class SimpleFeatureDataset(torch.utils.data.Dataset):
def __init__(self, dataset_name, indices):
super().__init__()
#self.temporal = [tn2[dataset_name][ind] for ind in indices]
#self.spatial = [sn[dataset_name][ind] for ind in indices]
self.clip_visual_features = [visual_features[dataset_name][ind] for ind in indices]
self.gts = [gts[dataset_name][ind] for ind in indices]
def __getitem__(self, index):
return self.clip_visual_features[index], self.gts[index]
def __len__(self):
return len(self.gts)
class BVQI(nn.Module):
"""
Modified CLIP, which combined prompt tuning and feature adaptation.
The spatial and temporal naturalnesses are fed as final features.
Implcit features is also optional fed into the model.
"""
def __init__(self, text_tokens, embedding, n_pairs=2,implicit=False, optimizable_encoder=None):
super().__init__()
self.n_pairs = n_pairs
self.device = "cuda"
self.implicit = implicit
if self.implicit:
self.implicit_mlp = MLP(1024,64,1)
self.tokenized_prompts = text_tokens
#self.text_encoder = TextEncoder(clip_model)
if optimizable_encoder is not None:
print("Optimizing the text encoder.")
self.optimizable_encoder = deepcopy(text_encoder)
for param in self.optimizable_encoder.parameters():
param.requires_grad = True
if n_ctx > 0:
self.ctx = nn.Parameter(embedding[:, 1:1+n_ctx].clone())
else:
self.register_buffer("ctx", embedding[:, 1:1, :])
print("Disabled Context Prompt")
self.register_buffer("prefix", embedding[:, :1, :].clone()) # SOS
self.register_buffer("suffix", embedding[:, 1 + n_ctx:, :].clone())# CLS, EOS
self.prefix.requires_grad = False
self.suffix.requires_grad = False
self.dropout = nn.Dropout(0.5)
self.final_ln = nn.Linear(n_pairs+2+implicit,1,bias=False)
print(self.final_ln)
torch.nn.init.constant_(self.final_ln.weight, 1)
n_prompts = self.get_text_prompts()
self.text_feats = text_encoder(n_prompts.cuda(), self.tokenized_prompts)
def get_text_prompts(self):
return torch.cat(
[
self.prefix, # (n_cls, 1, dim)
self.ctx, # (n_cls, n_ctx, dim)
self.suffix, # (n_cls, *, dim)
],
dim=1,
)
def forward(self, vis_feat, sn_ind=None, tn_ind=None, train=True):
n_prompts = self.get_text_prompts()
if train:
if hasattr(self, "optimizable_encoder"):
text_feats = self.optimizable_encoder(n_prompts, self.tokenized_prompts)
else:
text_feats = text_encoder(n_prompts, self.tokenized_prompts)
self.text_feats = text_feats
else:
text_feats = self.text_feats
vis_feats = vis_feat[:,1:].to(self.device)
if self.implicit:
sa_ind = [self.implicit_mlp(vis_feats).mean((-1,-2,-3))]
else:
sa_ind = []
self.vis_feats = vis_feats
logits = 2 * self.dropout(self.vis_feats) @ text_feats.T
final_feats = [sn_ind.to(self.device), tn_ind.to(self.device)]
for k in range(self.n_pairs):
pn_pair = logits[..., 2 * k : 2 * k + 2].float() #.softmax(-1)[...,0]
sa_ind += [torch.sigmoid(pn_pair[...,0] - pn_pair[...,1]).mean((-1,-2))]
final_feats += sa_ind
final_feats = torch.stack(final_feats, -1).float()
return final_feats, self.final_ln(final_feats).flatten()
def metrics(self, feats, outputs, gt):
np_feats = feats.mean(-1).detach().cpu().numpy()
np_outputs = outputs.detach().cpu().numpy()
np_gt = gt.numpy()
return spearmanr(np_feats, np_gt)[0], spearmanr(np_outputs, np_gt)[0]
def plcc_loss(y_pred, y):
sigma_hat, m_hat = torch.std_mean(y_pred, unbiased=False)
y_pred = (y_pred - m_hat) / (sigma_hat + 1e-8)
sigma, m = torch.std_mean(y, unbiased=False)
y = (y - m) / (sigma + 1e-8)
loss0 = torch.nn.functional.mse_loss(y_pred, y) / 4
rho = torch.mean(y_pred * y)
loss1 = torch.nn.functional.mse_loss(rho * y_pred, y) / 4
return ((loss0 + loss1) / 2).float()
def max_plcc_loss(y_pred, y):
return sum(plcc_loss(y_pred[:,i], y) for i in range(y_pred.shape[-1])) / y_pred.shape[-1]
def rescale(x):
x = np.array(x)
print("Mean:", x.mean(), "Std", x.std())
x = (x - x.mean()) / x.std()
return 1 / (1 + np.exp(-x))
def count_parameters(model):
for name, module in model.named_children():
print(name, "|", sum(p.numel() for p in module.parameters() if p.requires_grad))
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def encode_text_prompts(prompts):
text_tokens = tokenizer(prompts).to("cuda")
with torch.no_grad():
embedding = model.token_embedding(text_tokens)
text_features = model.encode_text(text_tokens).float()
return text_tokens, embedding, text_features
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Hyper-parameters')
parser.add_argument('--n_pairs', type=int, default=2, help='Number of pairs')
parser.add_argument("-i", '--implicit', action="store_true", help='Use implicit prompts')
parser.add_argument('-c', '--n_ctx', type=int, default=1, help='Number of context')
args = parser.parse_args()
n_pairs = args.n_pairs
implicit = args.implicit
n_ctx = args.n_ctx
with open("buona_vista_sa_index.yml", "r") as f:
opt = yaml.safe_load(f)
val_datasets = {}
for name, dataset in opt["data"].items():
val_datasets[name] = getattr(datasets, dataset["type"])(dataset["args"])
print("Loading model")
model, _, preprocess = open_clip.create_model_and_transforms("RN50",pretrained="openai")
model = model.to("cuda")
tokenizer = open_clip.get_tokenizer("RN50")
print("Loading features")
results = {}
gts, paths = {}, {}
for val_name, val_dataset in val_datasets.items():
gts[val_name] = [val_dataset.video_infos[i]["label"] for i in range(len(val_dataset))]
for val_name, val_dataset in val_datasets.items():
paths[val_name] = [val_dataset.video_infos[i]["filename"] for i in range(len(val_dataset))]
if not glob.glob("CLIP_vis_features.pt"):
visual_features = get_features()
visual_features = torch.load("CLIP_vis_features.pt")
backend = "Matlab" # Matlab | Pytorch
if backend == "Matlab":
with open("naturalnesses_matlab_results.pkl","rb") as f:
matlab_results = pkl.load(f)
sn = matlab_results["spatial"]
tn2 = matlab_results["temporal"]
else:
sn, tn2 = {}, {}
for val_name in visual_features:
with open(f"spatial_naturalness_{val_name}.pkl","rb") as infile:
sn[val_name] = pkl.load(infile)["pr_labels"]
with open("temporal_naturalness_pubs.pkl","rb") as infile:
tn = pkl.load(infile)
tn2[val_name] = tn[f"{val_name}"]["tn_index"]
context = " ".join(["X"] * n_ctx)
prompts = [
f"a {context} high quality photo",
f"a {context} low quality photo",
f"a {context} good photo",
f"a {context} bad photo",
]
print(n_pairs, implicit)
text_encoder = TextEncoder(model)
print(f'The model has {count_parameters(model):,} trainable parameters')
text_tokens, embedding, text_feats = encode_text_prompts(prompts)
snames = ["val-cvd2014", "val-kv1k", "val-livevqc", "val-ytugc", ]
print("Start training")
for sname in snames:
best_srccs, best_plccs = [], []
cross_snames = [] #name for name in snames if name != sname]
best_srccs_cross, best_plccs_cross = {}, {}
for cname in cross_snames:
best_srccs_cross[cname], best_plccs_cross[cname] = [], []
for split in range(10):
bvqi = BVQI(text_tokens, embedding, n_pairs=n_pairs, implicit=implicit).cuda()
print(f'The model has {count_parameters(bvqi):,} trainable parameters')
optimizer = torch.optim.AdamW(bvqi.parameters(),lr=1e-3)
random.seed((split+1)*42)
train_indices = random.sample(range(len(gts[sname])), int(0.8 * len(gts[sname])))
train_dataset = VisualFeatureDataset(sname, indices=train_indices)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=16, shuffle=True)
val_indices = [ind for ind in range(len(gts[sname])) if ind not in train_indices]
val_dataset = VisualFeatureDataset(sname, indices=val_indices)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=16)
cross_test_dataloaders = {}
for cname in cross_snames:
test_dataset = VisualFeatureDataset(cname)
cross_test_dataloaders[cname] = torch.utils.data.DataLoader(test_dataset, batch_size=16)
val_prs, val_gts = [], []
for data in (val_dataloader):
with torch.no_grad():
vis_feat, sn_ind, tn_ind, gt = data
_, res = bvqi(vis_feat, sn_ind, tn_ind, train=False)
val_prs.extend(list(res.cpu().numpy()))
val_gts.extend(list(gt.cpu().numpy()))
print(f"Split {split}, Bef Training SRCC:", spearmanr(val_prs,val_gts)[0], "Bef Training PLCC:", pearsonr(val_prs,val_gts)[0])
best_srcc, best_plcc = -1, -1
srccs_cross, plccs_cross = {}, {}
for epoch in tqdm(range(30)):
#print(f"Epoch {epoch}:")
bvqi.train()
for data in (train_dataloader):
optimizer.zero_grad()
vis_feat, sn_ind, tn_ind, gt = data
feats, res = bvqi(vis_feat, sn_ind, tn_ind)
loss = plcc_loss(res, gt.cuda().float()) #+ 0.3 * rank_loss(res, gt.cuda().float())
#aux_loss = max_plcc_loss(feats[...,2:], gt.cuda().float())
#loss += 0.3 * aux_loss
loss.backward()
optimizer.step()
bvqi.eval()
#val_prs, val_gts = [], []
#for data in (train_dataloader):
# with torch.no_grad():
# vis_feat, sn_ind, tn_ind, gt = data
# _, res = bvqi(vis_feat, sn_ind, tn_ind)
# val_prs.extend(list(res.cpu().numpy()))
# val_gts.extend(list(gt.cpu().numpy()))
#print("Train Spearman:", spearmanr(val_prs,val_gts)[0], "Train Pearson:", pearsonr(val_prs,val_gts)[0])
val_prs, val_gts = [], []
for data in (val_dataloader):
with torch.no_grad():
vis_feat, sn_ind, tn_ind, gt = data
_, res = bvqi(vis_feat, sn_ind, tn_ind, train=False)
val_prs.extend(list(res.cpu().numpy()))
val_gts.extend(list(gt.cpu().numpy()))
srcc, plcc = spearmanr(val_prs,val_gts)[0], pearsonr(val_prs,val_gts)[0]
if srcc + plcc > best_srcc + best_plcc:
best_srcc = srcc
best_plcc = plcc
test_prs, test_gts = {}, {}
for cname, test_dataloader in cross_test_dataloaders.items():
test_prs[cname], test_gts[cname] = [], []
for data in (test_dataloader):
with torch.no_grad():
vis_feat, sn_ind, tn_ind, gt = data
_, res = bvqi(vis_feat, sn_ind, tn_ind, train=False)
test_prs[cname].extend(list(res.cpu().numpy()))
test_gts[cname].extend(list(gt.cpu().numpy()))
csrcc, cplcc = spearmanr(test_prs[cname],test_gts[cname])[0], pearsonr(test_prs[cname],test_gts[cname])[0]
srccs_cross[cname] = csrcc
plccs_cross[cname] = cplcc
#print("Val Spearman:", srcc, "Val Pearson:", plcc, "Best Spearman:", best_srcc, "Best Pearson:", best_plcc, )
best_srccs.append(best_srcc)
best_plccs.append(best_plcc)
print("Best SRCC:", best_srcc, "Best PLCC:", best_plcc)
for cname in cross_snames:
print(f"{cname} SRCC:", srccs_cross[cname], f"{cname} PLCC:", plccs_cross[cname])
best_srccs_cross[cname] += [srccs_cross[cname]]
best_plccs_cross[cname] += [plccs_cross[cname]]
print(f"After training in 10 splits with seeds {[(i+1)*42 for i in range(10)]}:")
print(sname, "Avg Best SRCC:", np.mean(best_srccs), "Avg Best PLCC:", np.mean(best_plccs))
print(f"Cross dataset performance:")
print("Cross SRCC", [(key, np.mean(values)) for key, values in best_srccs_cross.items()])
print("Cross PLCC", [(key, np.mean(values)) for key, values in best_plccs_cross.items()])
| 17,790 | 39.251131 | 158 | py |
BVQI | BVQI-master/load_features.py | import os
import argparse
import pickle as pkl
import random
import open_clip
import numpy as np
import torch
import torch.nn as nn
import yaml
from scipy.stats import pearsonr, spearmanr
from scipy.stats import kendalltau as kendallr
from tqdm import tqdm
from buona_vista import datasets
import wandb
def rescale(x):
x = np.array(x)
print("Mean:", x.mean(), "Std", x.std())
x = (x - x.mean()) / x.std()
return 1 / (1 + np.exp(-x))
def get_features(save_features=True):
with open("buona_vista_sa_index.yml", "r") as f:
opt = yaml.safe_load(f)
val_datasets = {}
for name, dataset in opt["data"].items():
val_datasets[name] = getattr(datasets, dataset["type"])(dataset["args"])
print(open_clip.list_pretrained())
model, _, _ = open_clip.create_model_and_transforms("RN50",pretrained="openai")
model = model.to("cuda")
print("loading succeed")
texts = [
"a high quality photo",
"a low quality photo",
"a good photo",
"a bad photo",
]
tokenizer = open_clip.get_tokenizer("RN50")
text_tokens = tokenizer(texts).to("cuda")
print(f"Prompt_loading_succeed, {texts}")
results = {}
gts, paths = {}, {}
for val_name, val_dataset in val_datasets.items():
gts[val_name] = [val_dataset.video_infos[i]["label"] for i in range(len(val_dataset))]
for val_name, val_dataset in val_datasets.items():
paths[val_name] = [val_dataset.video_infos[i]["filename"] for i in range(len(val_dataset))]
visual_features = {}
for val_name, val_dataset in val_datasets.items():
if val_name != "val-ltrain" and val_name != "val-l1080p":
visual_features[val_name] = []
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=1, num_workers=opt["num_workers"], pin_memory=True,
)
for i, data in enumerate(tqdm(val_loader, desc=f"Evaluating in dataset [{val_name}].")):
video_input = data["aesthetic"].to("cuda").squeeze(0).transpose(0,1)
with torch.no_grad():
video_features = model.encode_image(video_input)
visual_features[val_name].append(video_features.cpu())
if save_features:
torch.save(visual_features, "CLIP_vis_features.pt")
return visual_features
if __name__ == "__main__":
get_features() | 2,446 | 28.841463 | 100 | py |
BVQI | BVQI-master/pyiqa/test.py | import logging
from os import path as osp
import torch
from pyiqa.data import build_dataloader, build_dataset
from pyiqa.models import build_model
from pyiqa.utils import get_env_info, get_root_logger, get_time_str, make_exp_dirs
from pyiqa.utils.options import dict2str, parse_options
def test_pipeline(root_path):
# parse options, set distributed setting, set ramdom seed
opt, _ = parse_options(root_path, is_train=False)
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# mkdir and initialize loggers
make_exp_dirs(opt)
log_file = osp.join(opt["path"]["log"], f"test_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(
logger_name="pyiqa", log_level=logging.INFO, log_file=log_file
)
logger.info(get_env_info())
logger.info(dict2str(opt))
# create test dataset and dataloader
test_loaders = []
for _, dataset_opt in sorted(opt["datasets"].items()):
test_set = build_dataset(dataset_opt)
test_loader = build_dataloader(
test_set,
dataset_opt,
num_gpu=opt["num_gpu"],
dist=opt["dist"],
sampler=None,
seed=opt["manual_seed"],
)
logger.info(f"Number of test images in {dataset_opt['name']}: {len(test_set)}")
test_loaders.append(test_loader)
# create model
model = build_model(opt)
for test_loader in test_loaders:
test_set_name = test_loader.dataset.opt["name"]
logger.info(f"Testing {test_set_name}...")
model.validation(
test_loader,
current_iter=opt["name"],
tb_logger=None,
save_img=opt["val"]["save_img"],
)
if __name__ == "__main__":
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
test_pipeline(root_path)
| 1,864 | 30.083333 | 87 | py |
BVQI | BVQI-master/pyiqa/train_nsplits.py | import datetime
import logging
import os
import time
from os import path as osp
import numpy as np
import torch
from pyiqa.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
from pyiqa.models import build_model
from pyiqa.train import create_train_val_dataloader, init_tb_loggers, train_pipeline
from pyiqa.utils import (
AvgTimer,
MessageLogger,
get_env_info,
get_root_logger,
get_time_str,
make_exp_dirs,
mkdir_and_rename,
)
from pyiqa.utils.options import copy_opt_file, dict2str, make_paths, parse_options
def train_nsplits(root_path):
torch.backends.cudnn.benchmark = True
opt, args = parse_options(root_path, is_train=True)
n_splits = opt["split_num"]
save_path = opt["save_final_results_path"]
os.makedirs(os.path.dirname(save_path), exist_ok=True)
all_split_results = []
prefix_name = opt["name"]
for i in range(n_splits):
# update split specific options
opt["name"] = prefix_name + f"_Split{i:02d}"
make_paths(opt, root_path)
for k in opt["datasets"].keys():
opt["datasets"][k]["split_index"] = i + 1
tmp_results = train_pipeline(root_path, opt, args)
all_split_results.append(tmp_results)
with open(save_path, "w") as sf:
datasets = list(all_split_results[0].keys())
metrics = list(all_split_results[0][datasets[0]].keys())
print(datasets, metrics)
sf.write("Val Datasets\tSplits\t{}\n".format("\t".join(metrics)))
for ds in datasets:
all_results = []
for i in range(n_splits):
results_msg = f"{ds}\t{i:02d}\t"
tmp_metric_results = []
for mt in metrics:
tmp_metric_results.append(all_split_results[i][ds][mt]["val"])
results_msg += f"{all_split_results[i][ds][mt]['val']:04f}\t"
results_msg += f"@{all_split_results[i][ds][mt]['iter']:05d}\n"
sf.write(results_msg)
all_results.append(tmp_metric_results)
results_avg = np.array(all_results).mean(axis=0)
results_std = np.array(all_results).std(axis=0)
sf.write(f"Overall results in {ds}: {results_avg}\t{results_std}\n")
if __name__ == "__main__":
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
train_nsplits(root_path)
| 2,387 | 34.641791 | 84 | py |
BVQI | BVQI-master/pyiqa/train.py | import datetime
import logging
import math
import os
import time
from os import path as osp
import torch
from pyiqa.data import build_dataloader, build_dataset
from pyiqa.data.data_sampler import EnlargedSampler
from pyiqa.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
from pyiqa.models import build_model
from pyiqa.utils import (
AvgTimer,
MessageLogger,
check_resume,
get_env_info,
get_root_logger,
get_time_str,
init_tb_logger,
init_wandb_logger,
make_exp_dirs,
mkdir_and_rename,
scandir,
)
from pyiqa.utils.options import copy_opt_file, dict2str, parse_options
def init_tb_loggers(opt):
# initialize wandb logger before tensorboard logger to allow proper sync
if (
(opt["logger"].get("wandb") is not None)
and (opt["logger"]["wandb"].get("project") is not None)
and ("debug" not in opt["name"])
):
assert (
opt["logger"].get("use_tb_logger") is True
), "should turn on tensorboard when using wandb"
init_wandb_logger(opt)
tb_logger = None
if opt["logger"].get("use_tb_logger") and "debug" not in opt["name"]:
tb_logger = init_tb_logger(
log_dir=osp.join(opt["root_path"], "tb_logger", opt["name"])
)
return tb_logger
def create_train_val_dataloader(opt, logger):
# create train and val dataloaders
train_loader, val_loaders = None, []
for phase, dataset_opt in opt["datasets"].items():
if phase == "train":
dataset_enlarge_ratio = dataset_opt.get("dataset_enlarge_ratio", 1)
train_set = build_dataset(dataset_opt)
train_sampler = EnlargedSampler(
train_set,
opt["world_size"],
opt["rank"],
dataset_enlarge_ratio,
dataset_opt.get("use_shuffle", True),
)
train_loader = build_dataloader(
train_set,
dataset_opt,
num_gpu=opt["num_gpu"],
dist=opt["dist"],
sampler=train_sampler,
seed=opt["manual_seed"],
)
num_iter_per_epoch = math.ceil(
len(train_set)
* dataset_enlarge_ratio
/ (dataset_opt["batch_size_per_gpu"] * opt["world_size"])
)
total_epochs = opt["train"].get("total_epoch", None)
if total_epochs is not None:
total_epochs = int(total_epochs)
total_iters = total_epochs * (num_iter_per_epoch)
opt["train"]["total_iter"] = total_iters
else:
total_iters = int(opt["train"]["total_iter"])
total_epochs = math.ceil(total_iters / (num_iter_per_epoch))
logger.info(
"Training statistics:"
f"\n\tNumber of train images: {len(train_set)}"
f"\n\tDataset enlarge ratio: {dataset_enlarge_ratio}"
f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}'
f'\n\tWorld size (gpu number): {opt["world_size"]}'
f"\n\tRequire iter number per epoch: {num_iter_per_epoch}"
f"\n\tTotal epochs: {total_epochs}; iters: {total_iters}."
)
elif phase.split("_")[0] == "val":
val_set = build_dataset(dataset_opt)
val_loader = build_dataloader(
val_set,
dataset_opt,
num_gpu=opt["num_gpu"],
dist=opt["dist"],
sampler=None,
seed=opt["manual_seed"],
)
logger.info(
f'Number of val images/folders in {dataset_opt["name"]}: {len(val_set)}'
)
val_loaders.append(val_loader)
else:
raise ValueError(f"Dataset phase {phase} is not recognized.")
return train_loader, train_sampler, val_loaders, total_epochs, total_iters
def load_resume_state(opt):
resume_state_path = None
if opt["auto_resume"]:
state_path = osp.join("experiments", opt["name"], "training_states")
if osp.isdir(state_path):
states = list(
scandir(state_path, suffix="state", recursive=False, full_path=False)
)
if len(states) != 0:
states = [float(v.split(".state")[0]) for v in states]
resume_state_path = osp.join(state_path, f"{max(states):.0f}.state")
opt["path"]["resume_state"] = resume_state_path
else:
if opt["path"].get("resume_state"):
resume_state_path = opt["path"]["resume_state"]
if resume_state_path is None:
resume_state = None
else:
device_id = torch.cuda.current_device()
resume_state = torch.load(
resume_state_path, map_location=lambda storage, loc: storage.cuda(device_id)
)
check_resume(opt, resume_state["iter"])
return resume_state
def train_pipeline(root_path, opt=None, args=None):
# parse options, set distributed setting, set random seed
if opt is None and args is None:
opt, args = parse_options(root_path, is_train=True)
opt["root_path"] = root_path
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# load resume states if necessary
resume_state = load_resume_state(opt)
# mkdir for experiments and logger
if resume_state is None:
make_exp_dirs(opt)
if (
opt["logger"].get("use_tb_logger")
and "debug" not in opt["name"]
and opt["rank"] == 0
):
os.makedirs(osp.join(opt["root_path"], "tb_logger_archived"), exist_ok=True)
mkdir_and_rename(osp.join(opt["root_path"], "tb_logger", opt["name"]))
# copy the yml file to the experiment root
copy_opt_file(args.opt, opt["path"]["experiments_root"])
# WARNING: should not use get_root_logger in the above codes, including the called functions
# Otherwise the logger will not be properly initialized
log_file = osp.join(opt["path"]["log"], f"train_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(
logger_name="pyiqa", log_level=logging.INFO, log_file=log_file
)
logger.info(get_env_info())
logger.info(dict2str(opt))
# initialize wandb and tb loggers
tb_logger = init_tb_loggers(opt)
# create train and validation dataloaders
result = create_train_val_dataloader(opt, logger)
train_loader, train_sampler, val_loaders, total_epochs, total_iters = result
# create model
model = build_model(opt)
if resume_state: # resume training
model.resume_training(resume_state) # handle optimizers and schedulers
logger.info(
f"Resuming training from epoch: {resume_state['epoch']}, "
f"iter: {resume_state['iter']}."
)
start_epoch = resume_state["epoch"]
current_iter = resume_state["iter"]
else:
start_epoch = 0
current_iter = 0
# create message logger (formatted outputs)
msg_logger = MessageLogger(opt, current_iter, tb_logger)
# dataloader prefetcher
prefetch_mode = opt["datasets"]["train"].get("prefetch_mode")
if prefetch_mode is None or prefetch_mode == "cpu":
prefetcher = CPUPrefetcher(train_loader)
elif prefetch_mode == "cuda":
prefetcher = CUDAPrefetcher(train_loader, opt)
logger.info(f"Use {prefetch_mode} prefetch dataloader")
if opt["datasets"]["train"].get("pin_memory") is not True:
raise ValueError("Please set pin_memory=True for CUDAPrefetcher.")
else:
raise ValueError(
f"Wrong prefetch_mode {prefetch_mode}."
"Supported ones are: None, 'cuda', 'cpu'."
)
# training
logger.info(f"Start training from epoch: {start_epoch}, iter: {current_iter}")
data_timer, iter_timer = AvgTimer(), AvgTimer()
start_time = time.time()
for epoch in range(start_epoch, total_epochs + 1):
train_sampler.set_epoch(epoch)
prefetcher.reset()
train_data = prefetcher.next()
while train_data is not None:
data_timer.record()
current_iter += 1
if current_iter > total_iters:
break
# update learning rate
# model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', -1))
# training
model.feed_data(train_data)
model.optimize_parameters(current_iter)
iter_timer.record()
if current_iter == 1:
# reset start time in msg_logger for more accurate eta_time
# not work in resume mode
msg_logger.reset_start_time()
# log
if current_iter % opt["logger"]["print_freq"] == 0:
log_vars = {"epoch": epoch, "iter": current_iter}
log_vars.update({"lrs": model.get_current_learning_rate()})
log_vars.update(
{
"time": iter_timer.get_avg_time(),
"data_time": data_timer.get_avg_time(),
}
)
log_vars.update(model.get_current_log())
msg_logger(log_vars)
# log images
log_img_freq = opt["logger"].get("log_imgs_freq", 1e99)
if current_iter % log_img_freq == 0:
visual_imgs = model.get_current_visuals()
if tb_logger and visual_imgs is not None:
for k, v in visual_imgs.items():
tb_logger.add_images(
f"ckpt_imgs/{k}", v.clamp(0, 1), current_iter
)
# save models and training states
save_ckpt_freq = opt["logger"].get("save_checkpoint_freq", 9e9)
if current_iter % save_ckpt_freq == 0:
logger.info("Saving models and training states.")
model.save(epoch, current_iter)
if current_iter % opt["logger"]["save_latest_freq"] == 0:
logger.info("Saving latest models and training states.")
model.save(epoch, -1)
# validation
if opt.get("val") is not None and (
current_iter % opt["val"]["val_freq"] == 0
):
if len(val_loaders) > 1:
logger.warning(
"Multiple validation datasets are *only* supported by SRModel."
)
for val_loader in val_loaders:
model.validation(
val_loader, current_iter, tb_logger, opt["val"]["save_img"]
)
data_timer.start()
iter_timer.start()
train_data = prefetcher.next()
# end of iter
# use epoch based learning rate scheduler
model.update_learning_rate(
epoch + 2, warmup_iter=opt["train"].get("warmup_iter", -1)
)
# end of epoch
consumed_time = str(datetime.timedelta(seconds=int(time.time() - start_time)))
logger.info(f"End of training. Time consumed: {consumed_time}")
logger.info("Save the latest model.")
model.save(epoch=-1, current_iter=-1) # -1 stands for the latest
if opt.get("val") is not None:
for val_loader in val_loaders:
model.validation(
val_loader, current_iter, tb_logger, opt["val"]["save_img"]
)
if tb_logger:
tb_logger.close()
return model.best_metric_results
if __name__ == "__main__":
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
train_pipeline(root_path)
| 11,816 | 36.39557 | 103 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/functions.py | import math
import numpy as np
import torch
import torch.nn.functional as F
from pyiqa.archs.arch_util import ExactPadding2d, symm_pad, to_2tuple
def fspecial(size=None, sigma=None, channels=1, filter_type="gaussian"):
r"""Function same as 'fspecial' in MATLAB, only support gaussian now.
Args:
size (int or tuple): size of window
sigma (float): sigma of gaussian
channels (int): channels of output
"""
if filter_type == "gaussian":
shape = to_2tuple(size)
m, n = [(ss - 1.0) / 2.0 for ss in shape]
y, x = np.ogrid[-m : m + 1, -n : n + 1]
h = np.exp(-(x * x + y * y) / (2.0 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
h = torch.from_numpy(h).float().repeat(channels, 1, 1, 1)
return h
else:
raise NotImplementedError(
f"Only support gaussian filter now, got {filter_type}"
)
def conv2d(input, weight, bias=None, stride=1, padding="same", dilation=1, groups=1):
"""Matlab like conv2, weights needs to be flipped.
Args:
input (tensor): (b, c, h, w)
weight (tensor): (out_ch, in_ch, kh, kw), conv weight
bias (bool or None): bias
stride (int or tuple): conv stride
padding (str): padding mode
dilation (int): conv dilation
"""
kernel_size = weight.shape[2:]
pad_func = ExactPadding2d(kernel_size, stride, dilation, mode=padding)
weight = torch.flip(weight, dims=(-1, -2))
return F.conv2d(
pad_func(input), weight, bias, stride, dilation=dilation, groups=groups
)
def imfilter(input, weight, bias=None, stride=1, padding="same", dilation=1, groups=1):
"""imfilter same as matlab.
Args:
input (tensor): (b, c, h, w) tensor to be filtered
weight (tensor): (out_ch, in_ch, kh, kw) filter kernel
padding (str): padding mode
dilation (int): dilation of conv
groups (int): groups of conv
"""
kernel_size = weight.shape[2:]
pad_func = ExactPadding2d(kernel_size, stride, dilation, mode=padding)
return F.conv2d(
pad_func(input), weight, bias, stride, dilation=dilation, groups=groups
)
def filter2(input, weight, shape="same"):
if shape == "same":
return imfilter(input, weight, groups=input.shape[1])
elif shape == "valid":
return F.conv2d(input, weight, stride=1, padding=0, groups=input.shape[1])
else:
raise NotImplementedError(f"Shape type {shape} is not implemented.")
def dct(x, norm=None):
"""
Discrete Cosine Transform, Type II (a.k.a. the DCT)
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
Args:
x: the input signal
norm: the normalization, None or 'ortho'
Return:
the DCT-II of the signal over the last dimension
"""
x_shape = x.shape
N = x_shape[-1]
x = x.contiguous().view(-1, N)
v = torch.cat([x[:, ::2], x[:, 1::2].flip([1])], dim=-1)
Vc = torch.view_as_real(torch.fft.fft(v, dim=-1))
k = -torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V = Vc[:, :, 0] * W_r - Vc[:, :, 1] * W_i
if norm == "ortho":
V[:, 0] /= np.sqrt(N) * 2
V[:, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V.view(*x_shape)
return V
def dct2d(x, norm="ortho"):
"""
2-dimentional Discrete Cosine Transform, Type II (a.k.a. the DCT)
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
:param x: the input signal
:param norm: the normalization, None or 'ortho'
:return: the DCT-II of the signal over the last 2 dimensions
"""
X1 = dct(x, norm=norm)
X2 = dct(X1.transpose(-1, -2), norm=norm)
return X2.transpose(-1, -2)
def fitweibull(x, iters=50, eps=1e-2):
"""Simulate wblfit function in matlab.
ref: https://github.com/mlosch/python-weibullfit/blob/master/weibull/backend_pytorch.py
Fits a 2-parameter Weibull distribution to the given data using maximum-likelihood estimation.
:param x (tensor): (B, N), batch of samples from an (unknown) distribution. Each value must satisfy x > 0.
:param iters: Maximum number of iterations
:param eps: Stopping criterion. Fit is stopped ff the change within two iterations is smaller than eps.
:param use_cuda: Use gpu
:return: Tuple (Shape, Scale) which can be (NaN, NaN) if a fit is impossible.
Impossible fits may be due to 0-values in x.
"""
ln_x = torch.log(x)
k = 1.2 / torch.std(ln_x, dim=1, keepdim=True)
k_t_1 = k
for t in range(iters):
# Partial derivative df/dk
x_k = x ** k.repeat(1, x.shape[1])
x_k_ln_x = x_k * ln_x
ff = torch.sum(x_k_ln_x, dim=-1, keepdim=True)
fg = torch.sum(x_k, dim=-1, keepdim=True)
f1 = torch.mean(ln_x, dim=-1, keepdim=True)
f = ff / fg - f1 - (1.0 / k)
ff_prime = torch.sum(x_k_ln_x * ln_x, dim=-1, keepdim=True)
fg_prime = ff
f_prime = (ff_prime / fg - (ff / fg * fg_prime / fg)) + (1.0 / (k * k))
# Newton-Raphson method k = k - f(k;x)/f'(k;x)
k = k - f / f_prime
error = torch.abs(k - k_t_1).max().item()
if error < eps:
break
k_t_1 = k
# Lambda (scale) can be calculated directly
lam = torch.mean(x ** k.repeat(1, x.shape[1]), dim=-1, keepdim=True) ** (1.0 / k)
return torch.cat((k, lam), dim=1) # Shape (SC), Scale (FE)
def cov(tensor, rowvar=True, bias=False):
r"""Estimate a covariance matrix (np.cov)
Ref: https://gist.github.com/ModarTensai/5ab449acba9df1a26c12060240773110
"""
tensor = tensor if rowvar else tensor.transpose(-1, -2)
tensor = tensor - tensor.mean(dim=-1, keepdim=True)
if tensor.shape[-1] > 1:
factor = 1 / (tensor.shape[-1] - int(not bool(bias)))
else:
factor = 1
return factor * tensor @ tensor.transpose(-1, -2)
def nancov(x):
r"""Calculate nancov for batched tensor, rows that contains nan value
will be removed.
Args:
x (tensor): (B, row_num, feat_dim)
Return:
cov (tensor): (B, feat_dim, feat_dim)
"""
assert (
len(x.shape) == 3
), f"Shape of input should be (batch_size, row_num, feat_dim), but got {x.shape}"
b, rownum, feat_dim = x.shape
nan_mask = torch.isnan(x).any(dim=2, keepdim=True)
cov_x = []
for i in range(b):
x_no_nan = x[i].masked_select(~nan_mask[i]).reshape(-1, feat_dim)
cov_x.append(cov(x_no_nan, rowvar=False))
return torch.stack(cov_x)
def nanmean(v, *args, inplace=False, **kwargs):
r"""nanmean same as matlab function: calculate mean values by removing all nan."""
if not inplace:
v = v.clone()
is_nan = torch.isnan(v)
v[is_nan] = 0
return v.sum(*args, **kwargs) / (~is_nan).float().sum(*args, **kwargs)
def im2col(x, kernel, mode="sliding"):
r"""simple im2col as matlab
Args:
x (Tensor): shape (b, c, h, w)
kernel (int): kernel size
mode (string):
- sliding (default): rearranges sliding image neighborhoods of kernel size into columns with no zero-padding
- distinct: rearranges discrete image blocks of kernel size into columns, zero pad right and bottom if necessary
Return:
flatten patch (Tensor): (b, h * w / kernel **2, kernel * kernel)
"""
b, c, h, w = x.shape
kernel = to_2tuple(kernel)
if mode == "sliding":
stride = 1
elif mode == "distinct":
stride = kernel
h2 = math.ceil(h / stride[0])
w2 = math.ceil(w / stride[1])
pad_row = (h2 - 1) * stride[0] + kernel[0] - h
pad_col = (w2 - 1) * stride[1] + kernel[1] - w
x = F.pad(x, (0, pad_col, 0, pad_row))
else:
raise NotImplementedError(f"Type {mode} is not implemented yet.")
patches = F.unfold(x, kernel, dilation=1, stride=stride)
b, _, pnum = patches.shape
patches = patches.transpose(1, 2).reshape(b, pnum, -1)
return patches
def blockproc(
x, kernel, fun, border_size=None, pad_partial=False, pad_method="zero", **func_args
):
r"""blockproc function like matlab
Difference:
- Partial blocks is discarded (if exist) for fast GPU process.
Args:
x (tensor): shape (b, c, h, w)
kernel (int or tuple): block size
func (function): function to process each block
border_size (int or tuple): border pixels to each block
pad_partial: pad partial blocks to make them full-sized, default False
pad_method: [zero, replicate, symmetric] how to pad partial block when pad_partial is set True
Return:
results (tensor): concatenated results of each block
"""
assert len(x.shape) == 4, f"Shape of input has to be (b, c, h, w) but got {x.shape}"
kernel = to_2tuple(kernel)
if pad_partial:
b, c, h, w = x.shape
stride = kernel
h2 = math.ceil(h / stride[0])
w2 = math.ceil(w / stride[1])
pad_row = (h2 - 1) * stride[0] + kernel[0] - h
pad_col = (w2 - 1) * stride[1] + kernel[1] - w
padding = (0, pad_col, 0, pad_row)
if pad_method == "zero":
x = F.pad(x, padding, mode="constant")
elif pad_method == "symmetric":
x = symm_pad(x, padding)
else:
x = F.pad(x, padding, mode=pad_method)
if border_size is not None:
raise NotImplementedError("Blockproc with border is not implemented yet")
else:
b, c, h, w = x.shape
block_size_h, block_size_w = kernel
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
# extract blocks in (row, column) manner, i.e., stored with column first
blocks = F.unfold(x, kernel, stride=kernel)
blocks = blocks.reshape(b, c, *kernel, num_block_h, num_block_w)
blocks = blocks.permute(5, 4, 0, 1, 2, 3).reshape(
num_block_h * num_block_w * b, c, *kernel
)
results = fun(blocks, func_args)
results = results.reshape(
num_block_h * num_block_w, b, *results.shape[1:]
).transpose(0, 1)
return results
| 10,439 | 33.569536 | 124 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/math_util.py | r"""Mathematical utilities
Created by: https://github.com/tomrunia/PyTorchSteerablePyramid/blob/master/steerable/math_utils.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
def abs(x):
return torch.sqrt(x[..., 0] ** 2 + x[..., 1] ** 2 + 1e-12)
def roll_n(X, axis, n):
f_idx = tuple(
slice(None, None, None) if i != axis else slice(0, n, None)
for i in range(X.dim())
)
b_idx = tuple(
slice(None, None, None) if i != axis else slice(n, None, None)
for i in range(X.dim())
)
front = X[f_idx]
back = X[b_idx]
return torch.cat([back, front], axis)
def batch_fftshift2d(x):
"""Args:
x: An complex tensor. Shape :math:`(N, C, H, W)`.
Pytroch version >= 1.8.0
"""
real, imag = x.real, x.imag
for dim in range(1, len(real.size())):
n_shift = real.size(dim) // 2
if real.size(dim) % 2 != 0:
n_shift += 1 # for odd-sized images
real = roll_n(real, axis=dim, n=n_shift)
imag = roll_n(imag, axis=dim, n=n_shift)
return torch.stack((real, imag), -1) # last dim=2 (real&imag)
def batch_ifftshift2d(x):
"""Args:
x: An input tensor. Shape :math:`(N, C, H, W, 2)`.
Return:
An complex tensor. Shape :math:`(N, C, H, W)`.
"""
real, imag = torch.unbind(x, -1)
for dim in range(len(real.size()) - 1, 0, -1):
real = roll_n(real, axis=dim, n=real.size(dim) // 2)
imag = roll_n(imag, axis=dim, n=imag.size(dim) // 2)
return torch.complex(real, imag) # convert to complex (real&imag)
def prepare_grid(m, n):
x = np.linspace(
-(m // 2) / (m / 2), (m // 2) / (m / 2) - (1 - m % 2) * 2 / m, num=m
)
y = np.linspace(
-(n // 2) / (n / 2), (n // 2) / (n / 2) - (1 - n % 2) * 2 / n, num=n
)
xv, yv = np.meshgrid(y, x)
angle = np.arctan2(yv, xv)
rad = np.sqrt(xv ** 2 + yv ** 2)
rad[m // 2][n // 2] = rad[m // 2][n // 2 - 1]
log_rad = np.log2(rad)
return log_rad, angle
def rcosFn(width, position):
N = 256 # abritrary
X = np.pi * np.array(range(-N - 1, 2)) / 2 / N
Y = np.cos(X) ** 2
Y[0] = Y[1]
Y[N + 2] = Y[N + 1]
X = position + 2 * width / np.pi * (X + np.pi / 4)
return X, Y
def pointOp(im, Y, X):
out = np.interp(im.flatten(), X, Y)
return np.reshape(out, im.shape)
def getlist(coeff):
straight = [bands for scale in coeff[1:-1] for bands in scale]
straight = [coeff[0]] + straight + [coeff[-1]]
return straight
| 2,611 | 26.787234 | 99 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/scfpyr_util.py | r"""Complex-valued steerable pyramid
Created by: https://github.com/tomrunia/PyTorchSteerablePyramid
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
- Offical Matlab code from https://github.com/LabForComputationalVision/matlabPyrTools/blob/master/buildSCFpyr.m;
- Original Python code from https://github.com/LabForComputationalVision/pyPyrTools/blob/master/pyPyrTools/SCFpyr.py;
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
from scipy.special import factorial
from . import math_util
pointOp = math_util.pointOp
################################################################################
################################################################################
class SCFpyr_PyTorch(object):
"""
This is a modified version of buildSFpyr, that constructs a
complex-valued steerable pyramid using Hilbert-transform pairs
of filters. Note that the imaginary parts will *not* be steerable.
Pytorch version >= 1.8.0
"""
def __init__(self, height=5, nbands=4, scale_factor=2, device=None):
self.height = height # including low-pass and high-pass
self.nbands = nbands # number of orientation bands
self.scale_factor = scale_factor
self.device = torch.device("cpu") if device is None else device
# Cache constants
self.lutsize = 1024
self.Xcosn = (
np.pi
* np.array(range(-(2 * self.lutsize + 1), (self.lutsize + 2)))
/ self.lutsize
)
self.alpha = (self.Xcosn + np.pi) % (2 * np.pi) - np.pi
self.complex_fact_construct = np.power(np.complex(0, -1), self.nbands - 1)
self.complex_fact_reconstruct = np.power(np.complex(0, 1), self.nbands - 1)
################################################################################
# Construction of Steerable Pyramid
def build(self, im_batch):
"""Decomposes a batch of images into a complex steerable pyramid.
The pyramid typically has ~4 levels and 4-8 orientations.
Args:
im_batch (torch.Tensor): Batch of images of shape [N,C,H,W]
Returns:
pyramid: list containing torch.Tensor objects storing the pyramid
"""
assert (
im_batch.device == self.device
), "Devices invalid (pyr = {}, batch = {})".format(self.device, im_batch.device)
assert im_batch.dtype == torch.float32, "Image batch must be torch.float32"
assert im_batch.dim() == 4, "Image batch must be of shape [N,C,H,W]"
assert (
im_batch.shape[1] == 1
), "Second dimension must be 1 encoding grayscale image"
im_batch = im_batch.squeeze(1) # flatten channels dim
height, width = im_batch.shape[1], im_batch.shape[2]
# Check whether image size is sufficient for number of levels
if self.height > int(np.floor(np.log2(min(width, height))) - 2):
raise RuntimeError(
"Cannot build {} levels, image too small.".format(self.height)
)
# Prepare a grid
log_rad, angle = math_util.prepare_grid(height, width)
# Radial transition function (a raised cosine in log-frequency):
Xrcos, Yrcos = math_util.rcosFn(1, -0.5)
Yrcos = np.sqrt(Yrcos)
YIrcos = np.sqrt(1 - Yrcos ** 2)
lo0mask = pointOp(log_rad, YIrcos, Xrcos)
hi0mask = pointOp(log_rad, Yrcos, Xrcos)
# Note that we expand dims to support broadcasting later
lo0mask = torch.from_numpy(lo0mask).float()[None, :, :, None].to(self.device)
hi0mask = torch.from_numpy(hi0mask).float()[None, :, :, None].to(self.device)
# Fourier transform (2D) and shifting
batch_dft = torch.fft.fft2(im_batch)
batch_dft = math_util.batch_fftshift2d(batch_dft)
# Low-pass
lo0dft = batch_dft * lo0mask
# Start recursively building the pyramids
coeff = self._build_levels(lo0dft, log_rad, angle, Xrcos, Yrcos, self.height)
# High-pass
hi0dft = batch_dft * hi0mask
hi0 = math_util.batch_ifftshift2d(hi0dft)
hi0 = torch.fft.ifft2(hi0)
hi0_real = hi0.real
coeff.insert(0, hi0_real)
return coeff
def _build_levels(self, lodft, log_rad, angle, Xrcos, Yrcos, height):
if height <= 0:
# Low-pass
lo0 = math_util.batch_ifftshift2d(lodft)
lo0 = torch.fft.ifft2(lo0)
lo0_real = lo0.real
coeff = [lo0_real]
else:
Xrcos = Xrcos - np.log2(self.scale_factor)
####################################################################
####################### Orientation bandpass #######################
####################################################################
himask = pointOp(log_rad, Yrcos, Xrcos)
himask = torch.from_numpy(himask[None, :, :, None]).float().to(self.device)
order = self.nbands - 1
const = (
np.power(2, 2 * order)
* np.square(factorial(order))
/ (self.nbands * factorial(2 * order))
)
Ycosn = (
2
* np.sqrt(const)
* np.power(np.cos(self.Xcosn), order)
* (np.abs(self.alpha) < np.pi / 2)
) # [n,]
# Loop through all orientation bands
orientations = []
for b in range(self.nbands):
anglemask = pointOp(angle, Ycosn, self.Xcosn + np.pi * b / self.nbands)
anglemask = anglemask[None, :, :, None] # for broadcasting
anglemask = torch.from_numpy(anglemask).float().to(self.device)
# Bandpass filtering
banddft = lodft * anglemask * himask
# Now multiply with complex number
# (x+yi)(u+vi) = (xu-yv) + (xv+yu)i
banddft = torch.unbind(banddft, -1)
banddft_real = (
self.complex_fact_construct.real * banddft[0]
- self.complex_fact_construct.imag * banddft[1]
)
banddft_imag = (
self.complex_fact_construct.real * banddft[1]
+ self.complex_fact_construct.imag * banddft[0]
)
banddft = torch.stack((banddft_real, banddft_imag), -1)
band = math_util.batch_ifftshift2d(banddft)
band = torch.fft.ifft2(band)
orientations.append(torch.stack((band.real, band.imag), -1))
####################################################################
######################## Subsample lowpass #########################
####################################################################
# Don't consider batch_size and imag/real dim
dims = np.array(lodft.shape[1:3])
# Both are tuples of size 2
low_ind_start = (
np.ceil((dims + 0.5) / 2)
- np.ceil((np.ceil((dims - 0.5) / 2) + 0.5) / 2)
).astype(int)
low_ind_end = (low_ind_start + np.ceil((dims - 0.5) / 2)).astype(int)
# Subsampling indices
log_rad = log_rad[
low_ind_start[0] : low_ind_end[0], low_ind_start[1] : low_ind_end[1]
]
angle = angle[
low_ind_start[0] : low_ind_end[0], low_ind_start[1] : low_ind_end[1]
]
# Actual subsampling
lodft = lodft[
:,
low_ind_start[0] : low_ind_end[0],
low_ind_start[1] : low_ind_end[1],
:,
]
# Filtering
YIrcos = np.abs(np.sqrt(1 - Yrcos ** 2))
lomask = pointOp(log_rad, YIrcos, Xrcos)
lomask = torch.from_numpy(lomask[None, :, :, None]).float()
lomask = lomask.to(self.device)
# Convolution in spatial domain
lodft = lomask * lodft
####################################################################
####################### Recursion next level #######################
####################################################################
coeff = self._build_levels(lodft, log_rad, angle, Xrcos, Yrcos, height - 1)
coeff.insert(0, orientations)
return coeff
| 8,552 | 36.678414 | 121 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/__init__.py | """This folder contains pytorch implementations of matlab functions.
And should produce the same results as matlab.
Note: to enable GPU acceleration, all functions take batched tensors as inputs,
and return batched results.
"""
from .functions import *
from .resize import imresize
from .scfpyr_util import SCFpyr_PyTorch
__all__ = [
"imresize",
"fspecial",
"SCFpyr_PyTorch",
"imfilter",
"dct2d",
"conv2d",
"filter2",
"fitweibull",
"nancov",
"nanmean",
"im2col",
"blockproc",
]
| 529 | 19.384615 | 79 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/resize.py | """
A standalone PyTorch implementation for fast and efficient bicubic resampling.
The resulting values are the same to MATLAB function imresize('bicubic').
## Author: Sanghyun Son
## Email: sonsang35@gmail.com (primary), thstkdgus35@snu.ac.kr (secondary)
## Version: 1.2.0
## Last update: July 9th, 2020 (KST)
Dependency: torch
Example::
>>> import torch
>>> import core
>>> x = torch.arange(16).float().view(1, 1, 4, 4)
>>> y = core.imresize(x, sizes=(3, 3))
>>> print(y)
tensor([[[[ 0.7506, 2.1004, 3.4503],
[ 6.1505, 7.5000, 8.8499],
[11.5497, 12.8996, 14.2494]]]])
"""
import math
import typing
import torch
from torch.nn import functional as F
__all__ = ["imresize"]
_I = typing.Optional[int]
_D = typing.Optional[torch.dtype]
def nearest_contribution(x: torch.Tensor) -> torch.Tensor:
range_around_0 = torch.logical_and(x.gt(-0.5), x.le(0.5))
cont = range_around_0.to(dtype=x.dtype)
return cont
def linear_contribution(x: torch.Tensor) -> torch.Tensor:
ax = x.abs()
range_01 = ax.le(1)
cont = (1 - ax) * range_01.to(dtype=x.dtype)
return cont
def cubic_contribution(x: torch.Tensor, a: float = -0.5) -> torch.Tensor:
ax = x.abs()
ax2 = ax * ax
ax3 = ax * ax2
range_01 = ax.le(1)
range_12 = torch.logical_and(ax.gt(1), ax.le(2))
cont_01 = (a + 2) * ax3 - (a + 3) * ax2 + 1
cont_01 = cont_01 * range_01.to(dtype=x.dtype)
cont_12 = (a * ax3) - (5 * a * ax2) + (8 * a * ax) - (4 * a)
cont_12 = cont_12 * range_12.to(dtype=x.dtype)
cont = cont_01 + cont_12
return cont
def gaussian_contribution(x: torch.Tensor, sigma: float = 2.0) -> torch.Tensor:
range_3sigma = x.abs() <= 3 * sigma + 1
# Normalization will be done after
cont = torch.exp(-x.pow(2) / (2 * sigma ** 2))
cont = cont * range_3sigma.to(dtype=x.dtype)
return cont
def discrete_kernel(
kernel: str, scale: float, antialiasing: bool = True
) -> torch.Tensor:
"""
For downsampling with integer scale only.
"""
downsampling_factor = int(1 / scale)
if kernel == "cubic":
kernel_size_orig = 4
else:
raise ValueError("Pass!")
if antialiasing:
kernel_size = kernel_size_orig * downsampling_factor
else:
kernel_size = kernel_size_orig
if downsampling_factor % 2 == 0:
a = kernel_size_orig * (0.5 - 1 / (2 * kernel_size))
else:
kernel_size -= 1
a = kernel_size_orig * (0.5 - 1 / (kernel_size + 1))
with torch.no_grad():
r = torch.linspace(-a, a, steps=kernel_size)
k = cubic_contribution(r).view(-1, 1)
k = torch.matmul(k, k.t())
k /= k.sum()
return k
def reflect_padding(
x: torch.Tensor, dim: int, pad_pre: int, pad_post: int
) -> torch.Tensor:
"""
Apply reflect padding to the given Tensor.
Note that it is slightly different from the PyTorch functional.pad,
where boundary elements are used only once.
Instead, we follow the MATLAB implementation
which uses boundary elements twice.
For example,
[a, b, c, d] would become [b, a, b, c, d, c] with the PyTorch implementation,
while our implementation yields [a, a, b, c, d, d].
"""
b, c, h, w = x.size()
if dim == 2 or dim == -2:
padding_buffer = x.new_zeros(b, c, h + pad_pre + pad_post, w)
padding_buffer[..., pad_pre : (h + pad_pre), :].copy_(x)
for p in range(pad_pre):
padding_buffer[..., pad_pre - p - 1, :].copy_(x[..., p, :])
for p in range(pad_post):
padding_buffer[..., h + pad_pre + p, :].copy_(x[..., -(p + 1), :])
else:
padding_buffer = x.new_zeros(b, c, h, w + pad_pre + pad_post)
padding_buffer[..., pad_pre : (w + pad_pre)].copy_(x)
for p in range(pad_pre):
padding_buffer[..., pad_pre - p - 1].copy_(x[..., p])
for p in range(pad_post):
padding_buffer[..., w + pad_pre + p].copy_(x[..., -(p + 1)])
return padding_buffer
def padding(
x: torch.Tensor,
dim: int,
pad_pre: int,
pad_post: int,
padding_type: typing.Optional[str] = "reflect",
) -> torch.Tensor:
if padding_type is None:
return x
elif padding_type == "reflect":
x_pad = reflect_padding(x, dim, pad_pre, pad_post)
else:
raise ValueError("{} padding is not supported!".format(padding_type))
return x_pad
def get_padding(
base: torch.Tensor, kernel_size: int, x_size: int
) -> typing.Tuple[int, int, torch.Tensor]:
base = base.long()
r_min = base.min()
r_max = base.max() + kernel_size - 1
if r_min <= 0:
pad_pre = -r_min
pad_pre = pad_pre.item()
base += pad_pre
else:
pad_pre = 0
if r_max >= x_size:
pad_post = r_max - x_size + 1
pad_post = pad_post.item()
else:
pad_post = 0
return pad_pre, pad_post, base
def get_weight(
dist: torch.Tensor,
kernel_size: int,
kernel: str = "cubic",
sigma: float = 2.0,
antialiasing_factor: float = 1,
) -> torch.Tensor:
buffer_pos = dist.new_zeros(kernel_size, len(dist))
for idx, buffer_sub in enumerate(buffer_pos):
buffer_sub.copy_(dist - idx)
# Expand (downsampling) / Shrink (upsampling) the receptive field.
buffer_pos *= antialiasing_factor
if kernel == "cubic":
weight = cubic_contribution(buffer_pos)
elif kernel == "gaussian":
weight = gaussian_contribution(buffer_pos, sigma=sigma)
else:
raise ValueError("{} kernel is not supported!".format(kernel))
weight /= weight.sum(dim=0, keepdim=True)
return weight
def reshape_tensor(x: torch.Tensor, dim: int, kernel_size: int) -> torch.Tensor:
# Resize height
if dim == 2 or dim == -2:
k = (kernel_size, 1)
h_out = x.size(-2) - kernel_size + 1
w_out = x.size(-1)
# Resize width
else:
k = (1, kernel_size)
h_out = x.size(-2)
w_out = x.size(-1) - kernel_size + 1
unfold = F.unfold(x, k)
unfold = unfold.view(unfold.size(0), -1, h_out, w_out)
return unfold
def reshape_input(x: torch.Tensor) -> typing.Tuple[torch.Tensor, _I, _I, int, int]:
if x.dim() == 4:
b, c, h, w = x.size()
elif x.dim() == 3:
c, h, w = x.size()
b = None
elif x.dim() == 2:
h, w = x.size()
b = c = None
else:
raise ValueError("{}-dim Tensor is not supported!".format(x.dim()))
x = x.view(-1, 1, h, w)
return x, b, c, h, w
def reshape_output(x: torch.Tensor, b: _I, c: _I) -> torch.Tensor:
rh = x.size(-2)
rw = x.size(-1)
# Back to the original dimension
if b is not None:
x = x.view(b, c, rh, rw) # 4-dim
else:
if c is not None:
x = x.view(c, rh, rw) # 3-dim
else:
x = x.view(rh, rw) # 2-dim
return x
def cast_input(x: torch.Tensor) -> typing.Tuple[torch.Tensor, _D]:
if x.dtype != torch.float32 or x.dtype != torch.float64:
dtype = x.dtype
x = x.float()
else:
dtype = None
return x, dtype
def cast_output(x: torch.Tensor, dtype: _D) -> torch.Tensor:
if dtype is not None:
if not dtype.is_floating_point:
x = x - x.detach() + x.round()
# To prevent over/underflow when converting types
if dtype is torch.uint8:
x = x.clamp(0, 255)
x = x.to(dtype=dtype)
return x
def resize_1d(
x: torch.Tensor,
dim: int,
size: int,
scale: float,
kernel: str = "cubic",
sigma: float = 2.0,
padding_type: str = "reflect",
antialiasing: bool = True,
) -> torch.Tensor:
"""
Args:
x (torch.Tensor): A torch.Tensor of dimension (B x C, 1, H, W).
dim (int):
scale (float):
size (int):
Return:
"""
# Identity case
if scale == 1:
return x
# Default bicubic kernel with antialiasing (only when downsampling)
if kernel == "cubic":
kernel_size = 4
else:
kernel_size = math.floor(6 * sigma)
if antialiasing and (scale < 1):
antialiasing_factor = scale
kernel_size = math.ceil(kernel_size / antialiasing_factor)
else:
antialiasing_factor = 1
# We allow margin to both sizes
kernel_size += 2
# Weights only depend on the shape of input and output,
# so we do not calculate gradients here.
with torch.no_grad():
pos = torch.linspace(
0,
size - 1,
steps=size,
dtype=x.dtype,
device=x.device,
)
pos = (pos + 0.5) / scale - 0.5
base = pos.floor() - (kernel_size // 2) + 1
dist = pos - base
weight = get_weight(
dist,
kernel_size,
kernel=kernel,
sigma=sigma,
antialiasing_factor=antialiasing_factor,
)
pad_pre, pad_post, base = get_padding(base, kernel_size, x.size(dim))
# To backpropagate through x
x_pad = padding(x, dim, pad_pre, pad_post, padding_type=padding_type)
unfold = reshape_tensor(x_pad, dim, kernel_size)
# Subsampling first
if dim == 2 or dim == -2:
sample = unfold[..., base, :]
weight = weight.view(1, kernel_size, sample.size(2), 1)
else:
sample = unfold[..., base]
weight = weight.view(1, kernel_size, 1, sample.size(3))
# Apply the kernel
x = sample * weight
x = x.sum(dim=1, keepdim=True)
return x
def downsampling_2d(
x: torch.Tensor, k: torch.Tensor, scale: int, padding_type: str = "reflect"
) -> torch.Tensor:
c = x.size(1)
k_h = k.size(-2)
k_w = k.size(-1)
k = k.to(dtype=x.dtype, device=x.device)
k = k.view(1, 1, k_h, k_w)
k = k.repeat(c, c, 1, 1)
e = torch.eye(c, dtype=k.dtype, device=k.device, requires_grad=False)
e = e.view(c, c, 1, 1)
k = k * e
pad_h = (k_h - scale) // 2
pad_w = (k_w - scale) // 2
x = padding(x, -2, pad_h, pad_h, padding_type=padding_type)
x = padding(x, -1, pad_w, pad_w, padding_type=padding_type)
y = F.conv2d(x, k, padding=0, stride=scale)
return y
def imresize(
x: torch.Tensor,
scale: typing.Optional[float] = None,
sizes: typing.Optional[typing.Tuple[int, int]] = None,
kernel: typing.Union[str, torch.Tensor] = "cubic",
sigma: float = 2,
rotation_degree: float = 0,
padding_type: str = "reflect",
antialiasing: bool = True,
) -> torch.Tensor:
"""
Args:
x (torch.Tensor):
scale (float):
sizes (tuple(int, int)):
kernel (str, default='cubic'):
sigma (float, default=2):
rotation_degree (float, default=0):
padding_type (str, default='reflect'):
antialiasing (bool, default=True):
Return:
torch.Tensor:
"""
if scale is None and sizes is None:
raise ValueError("One of scale or sizes must be specified!")
if scale is not None and sizes is not None:
raise ValueError("Please specify scale or sizes to avoid conflict!")
x, b, c, h, w = reshape_input(x)
if sizes is None and scale is not None:
"""
# Check if we can apply the convolution algorithm
scale_inv = 1 / scale
if isinstance(kernel, str) and scale_inv.is_integer():
kernel = discrete_kernel(kernel, scale, antialiasing=antialiasing)
elif isinstance(kernel, torch.Tensor) and not scale_inv.is_integer():
raise ValueError(
'An integer downsampling factor '
'should be used with a predefined kernel!'
)
"""
# Determine output size
sizes = (math.ceil(h * scale), math.ceil(w * scale))
scales = (scale, scale)
if scale is None and sizes is not None:
scales = (sizes[0] / h, sizes[1] / w)
x, dtype = cast_input(x)
if isinstance(kernel, str) and sizes is not None:
# Core resizing module
x = resize_1d(
x,
-2,
size=sizes[0],
scale=scales[0],
kernel=kernel,
sigma=sigma,
padding_type=padding_type,
antialiasing=antialiasing,
)
x = resize_1d(
x,
-1,
size=sizes[1],
scale=scales[1],
kernel=kernel,
sigma=sigma,
padding_type=padding_type,
antialiasing=antialiasing,
)
elif isinstance(kernel, torch.Tensor) and scale is not None:
x = downsampling_2d(x, kernel, scale=int(1 / scale))
x = reshape_output(x, b, c)
x = cast_output(x, dtype)
return x
| 12,696 | 27.404922 | 83 | py |
BVQI | BVQI-master/pyiqa/matlab_utils/.ipynb_checkpoints/functions-checkpoint.py | import math
import numpy as np
import torch
import torch.nn.functional as F
from pyiqa.archs.arch_util import ExactPadding2d, symm_pad, to_2tuple
def fspecial(size=None, sigma=None, channels=1, filter_type="gaussian"):
r"""Function same as 'fspecial' in MATLAB, only support gaussian now.
Args:
size (int or tuple): size of window
sigma (float): sigma of gaussian
channels (int): channels of output
"""
if filter_type == "gaussian":
shape = to_2tuple(size)
m, n = [(ss - 1.0) / 2.0 for ss in shape]
y, x = np.ogrid[-m : m + 1, -n : n + 1]
h = np.exp(-(x * x + y * y) / (2.0 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
h = torch.from_numpy(h).float().repeat(channels, 1, 1, 1)
return h
else:
raise NotImplementedError(
f"Only support gaussian filter now, got {filter_type}"
)
def conv2d(input, weight, bias=None, stride=1, padding="same", dilation=1, groups=1):
"""Matlab like conv2, weights needs to be flipped.
Args:
input (tensor): (b, c, h, w)
weight (tensor): (out_ch, in_ch, kh, kw), conv weight
bias (bool or None): bias
stride (int or tuple): conv stride
padding (str): padding mode
dilation (int): conv dilation
"""
kernel_size = weight.shape[2:]
pad_func = ExactPadding2d(kernel_size, stride, dilation, mode=padding)
weight = torch.flip(weight, dims=(-1, -2))
return F.conv2d(
pad_func(input), weight, bias, stride, dilation=dilation, groups=groups
)
def imfilter(input, weight, bias=None, stride=1, padding="same", dilation=1, groups=1):
"""imfilter same as matlab.
Args:
input (tensor): (b, c, h, w) tensor to be filtered
weight (tensor): (out_ch, in_ch, kh, kw) filter kernel
padding (str): padding mode
dilation (int): dilation of conv
groups (int): groups of conv
"""
kernel_size = weight.shape[2:]
pad_func = ExactPadding2d(kernel_size, stride, dilation, mode=padding)
return F.conv2d(
pad_func(input), weight, bias, stride, dilation=dilation, groups=groups
)
def filter2(input, weight, shape="same"):
if shape == "same":
return imfilter(input, weight, groups=input.shape[1])
elif shape == "valid":
return F.conv2d(input, weight, stride=1, padding=0, groups=input.shape[1])
else:
raise NotImplementedError(f"Shape type {shape} is not implemented.")
def dct(x, norm=None):
"""
Discrete Cosine Transform, Type II (a.k.a. the DCT)
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
Args:
x: the input signal
norm: the normalization, None or 'ortho'
Return:
the DCT-II of the signal over the last dimension
"""
x_shape = x.shape
N = x_shape[-1]
x = x.contiguous().view(-1, N)
v = torch.cat([x[:, ::2], x[:, 1::2].flip([1])], dim=-1)
Vc = torch.view_as_real(torch.fft.fft(v, dim=-1))
k = -torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V = Vc[:, :, 0] * W_r - Vc[:, :, 1] * W_i
if norm == "ortho":
V[:, 0] /= np.sqrt(N) * 2
V[:, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V.view(*x_shape)
return V
def dct2d(x, norm="ortho"):
"""
2-dimentional Discrete Cosine Transform, Type II (a.k.a. the DCT)
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.fftpack.dct.html
:param x: the input signal
:param norm: the normalization, None or 'ortho'
:return: the DCT-II of the signal over the last 2 dimensions
"""
X1 = dct(x, norm=norm)
X2 = dct(X1.transpose(-1, -2), norm=norm)
return X2.transpose(-1, -2)
def fitweibull(x, iters=50, eps=1e-2):
"""Simulate wblfit function in matlab.
ref: https://github.com/mlosch/python-weibullfit/blob/master/weibull/backend_pytorch.py
Fits a 2-parameter Weibull distribution to the given data using maximum-likelihood estimation.
:param x (tensor): (B, N), batch of samples from an (unknown) distribution. Each value must satisfy x > 0.
:param iters: Maximum number of iterations
:param eps: Stopping criterion. Fit is stopped ff the change within two iterations is smaller than eps.
:param use_cuda: Use gpu
:return: Tuple (Shape, Scale) which can be (NaN, NaN) if a fit is impossible.
Impossible fits may be due to 0-values in x.
"""
ln_x = torch.log(x)
k = 1.2 / torch.std(ln_x, dim=1, keepdim=True)
k_t_1 = k
for t in range(iters):
# Partial derivative df/dk
x_k = x ** k.repeat(1, x.shape[1])
x_k_ln_x = x_k * ln_x
ff = torch.sum(x_k_ln_x, dim=-1, keepdim=True)
fg = torch.sum(x_k, dim=-1, keepdim=True)
f1 = torch.mean(ln_x, dim=-1, keepdim=True)
f = ff / fg - f1 - (1.0 / k)
ff_prime = torch.sum(x_k_ln_x * ln_x, dim=-1, keepdim=True)
fg_prime = ff
f_prime = (ff_prime / fg - (ff / fg * fg_prime / fg)) + (1.0 / (k * k))
# Newton-Raphson method k = k - f(k;x)/f'(k;x)
k = k - f / f_prime
error = torch.abs(k - k_t_1).max().item()
if error < eps:
break
k_t_1 = k
# Lambda (scale) can be calculated directly
lam = torch.mean(x ** k.repeat(1, x.shape[1]), dim=-1, keepdim=True) ** (1.0 / k)
return torch.cat((k, lam), dim=1) # Shape (SC), Scale (FE)
def cov(tensor, rowvar=True, bias=False):
r"""Estimate a covariance matrix (np.cov)
Ref: https://gist.github.com/ModarTensai/5ab449acba9df1a26c12060240773110
"""
tensor = tensor if rowvar else tensor.transpose(-1, -2)
tensor = tensor - tensor.mean(dim=-1, keepdim=True)
if tensor.shape[-1] > 1:
factor = 1 / (tensor.shape[-1] - int(not bool(bias)))
else:
factor = 1
return factor * tensor @ tensor.transpose(-1, -2)
def nancov(x):
r"""Calculate nancov for batched tensor, rows that contains nan value
will be removed.
Args:
x (tensor): (B, row_num, feat_dim)
Return:
cov (tensor): (B, feat_dim, feat_dim)
"""
assert (
len(x.shape) == 3
), f"Shape of input should be (batch_size, row_num, feat_dim), but got {x.shape}"
b, rownum, feat_dim = x.shape
nan_mask = torch.isnan(x).any(dim=2, keepdim=True)
cov_x = []
for i in range(b):
x_no_nan = x[i].masked_select(~nan_mask[i]).reshape(-1, feat_dim)
cov_x.append(cov(x_no_nan, rowvar=False))
return torch.stack(cov_x)
def nanmean(v, *args, inplace=False, **kwargs):
r"""nanmean same as matlab function: calculate mean values by removing all nan."""
if not inplace:
v = v.clone()
is_nan = torch.isnan(v)
v[is_nan] = 0
return v.sum(*args, **kwargs) / (~is_nan).float().sum(*args, **kwargs)
def im2col(x, kernel, mode="sliding"):
r"""simple im2col as matlab
Args:
x (Tensor): shape (b, c, h, w)
kernel (int): kernel size
mode (string):
- sliding (default): rearranges sliding image neighborhoods of kernel size into columns with no zero-padding
- distinct: rearranges discrete image blocks of kernel size into columns, zero pad right and bottom if necessary
Return:
flatten patch (Tensor): (b, h * w / kernel **2, kernel * kernel)
"""
b, c, h, w = x.shape
kernel = to_2tuple(kernel)
if mode == "sliding":
stride = 1
elif mode == "distinct":
stride = kernel
h2 = math.ceil(h / stride[0])
w2 = math.ceil(w / stride[1])
pad_row = (h2 - 1) * stride[0] + kernel[0] - h
pad_col = (w2 - 1) * stride[1] + kernel[1] - w
x = F.pad(x, (0, pad_col, 0, pad_row))
else:
raise NotImplementedError(f"Type {mode} is not implemented yet.")
patches = F.unfold(x, kernel, dilation=1, stride=stride)
b, _, pnum = patches.shape
patches = patches.transpose(1, 2).reshape(b, pnum, -1)
return patches
def blockproc(
x, kernel, fun, border_size=None, pad_partial=False, pad_method="zero", **func_args
):
r"""blockproc function like matlab
Difference:
- Partial blocks is discarded (if exist) for fast GPU process.
Args:
x (tensor): shape (b, c, h, w)
kernel (int or tuple): block size
func (function): function to process each block
border_size (int or tuple): border pixels to each block
pad_partial: pad partial blocks to make them full-sized, default False
pad_method: [zero, replicate, symmetric] how to pad partial block when pad_partial is set True
Return:
results (tensor): concatenated results of each block
"""
assert len(x.shape) == 4, f"Shape of input has to be (b, c, h, w) but got {x.shape}"
kernel = to_2tuple(kernel)
if pad_partial:
b, c, h, w = x.shape
stride = kernel
h2 = math.ceil(h / stride[0])
w2 = math.ceil(w / stride[1])
pad_row = (h2 - 1) * stride[0] + kernel[0] - h
pad_col = (w2 - 1) * stride[1] + kernel[1] - w
padding = (0, pad_col, 0, pad_row)
if pad_method == "zero":
x = F.pad(x, padding, mode="constant")
elif pad_method == "symmetric":
x = symm_pad(x, padding)
else:
x = F.pad(x, padding, mode=pad_method)
if border_size is not None:
raise NotImplementedError("Blockproc with border is not implemented yet")
else:
b, c, h, w = x.shape
block_size_h, block_size_w = kernel
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
# extract blocks in (row, column) manner, i.e., stored with column first
blocks = F.unfold(x, kernel, stride=kernel)
blocks = blocks.reshape(b, c, *kernel, num_block_h, num_block_w)
blocks = blocks.permute(5, 4, 0, 1, 2, 3).reshape(
num_block_h * num_block_w * b, c, *kernel
)
results = fun(blocks, func_args)
results = results.reshape(
num_block_h * num_block_w, b, *results.shape[1:]
).transpose(0, 1)
return results
| 10,439 | 33.569536 | 124 | py |
BVQI | BVQI-master/pyiqa/models/lr_scheduler.py | import math
from collections import Counter
from torch.optim.lr_scheduler import _LRScheduler
class MultiStepRestartLR(_LRScheduler):
"""MultiStep with restarts learning rate scheme.
Args:
optimizer (torch.nn.optimizer): Torch optimizer.
milestones (list): Iterations that will decrease learning rate.
gamma (float): Decrease ratio. Default: 0.1.
restarts (list): Restart iterations. Default: [0].
restart_weights (list): Restart weights at each restart iteration.
Default: [1].
last_epoch (int): Used in _LRScheduler. Default: -1.
"""
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
restarts=(0,),
restart_weights=(1,),
last_epoch=-1,
):
self.milestones = Counter(milestones)
self.gamma = gamma
self.restarts = restarts
self.restart_weights = restart_weights
assert len(self.restarts) == len(
self.restart_weights
), "restarts and their weights do not match."
super(MultiStepRestartLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch in self.restarts:
weight = self.restart_weights[self.restarts.index(self.last_epoch)]
return [
group["initial_lr"] * weight for group in self.optimizer.param_groups
]
if self.last_epoch not in self.milestones:
return [group["lr"] for group in self.optimizer.param_groups]
return [
group["lr"] * self.gamma ** self.milestones[self.last_epoch]
for group in self.optimizer.param_groups
]
def get_position_from_periods(iteration, cumulative_period):
"""Get the position from a period list.
It will return the index of the right-closest number in the period list.
For example, the cumulative_period = [100, 200, 300, 400],
if iteration == 50, return 0;
if iteration == 210, return 2;
if iteration == 300, return 2.
Args:
iteration (int): Current iteration.
cumulative_period (list[int]): Cumulative period list.
Returns:
int: The position of the right-closest number in the period list.
"""
for i, period in enumerate(cumulative_period):
if iteration <= period:
return i
class CosineAnnealingRestartLR(_LRScheduler):
"""Cosine annealing with restarts learning rate scheme.
An example of config:
periods = [10, 10, 10, 10]
restart_weights = [1, 0.5, 0.5, 0.5]
eta_min=1e-7
It has four cycles, each has 10 iterations. At 10th, 20th, 30th, the
scheduler will restart with the weights in restart_weights.
Args:
optimizer (torch.nn.optimizer): Torch optimizer.
periods (list): Period for each cosine anneling cycle.
restart_weights (list): Restart weights at each restart iteration.
Default: [1].
eta_min (float): The minimum lr. Default: 0.
last_epoch (int): Used in _LRScheduler. Default: -1.
"""
def __init__(
self, optimizer, periods, restart_weights=(1,), eta_min=0, last_epoch=-1
):
self.periods = periods
self.restart_weights = restart_weights
self.eta_min = eta_min
assert len(self.periods) == len(
self.restart_weights
), "periods and restart_weights should have the same length."
self.cumulative_period = [
sum(self.periods[0 : i + 1]) for i in range(0, len(self.periods))
]
super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
idx = get_position_from_periods(self.last_epoch, self.cumulative_period)
current_weight = self.restart_weights[idx]
nearest_restart = 0 if idx == 0 else self.cumulative_period[idx - 1]
current_period = self.periods[idx]
return [
self.eta_min
+ current_weight
* 0.5
* (base_lr - self.eta_min)
* (
1
+ math.cos(
math.pi * ((self.last_epoch - nearest_restart) / current_period)
)
)
for base_lr in self.base_lrs
]
| 4,268 | 32.880952 | 85 | py |
BVQI | BVQI-master/pyiqa/models/base_model.py | import os
import time
from collections import OrderedDict
from copy import deepcopy
import torch
from torch.nn.parallel import DataParallel, DistributedDataParallel
from pyiqa.models import lr_scheduler as lr_scheduler
from pyiqa.utils import get_root_logger
from pyiqa.utils.dist_util import master_only
class BaseModel:
"""Base model."""
def __init__(self, opt):
self.opt = opt
self.device = torch.device("cuda" if opt["num_gpu"] != 0 else "cpu")
self.is_train = opt["is_train"]
self.schedulers = []
self.optimizers = []
def feed_data(self, data):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
pass
def save(self, epoch, current_iter):
"""Save networks and training state."""
pass
def validation(self, dataloader, current_iter, tb_logger, save_img=False):
"""Validation function.
Args:
dataloader (torch.utils.data.DataLoader): Validation dataloader.
current_iter (int): Current iteration.
tb_logger (tensorboard logger): Tensorboard logger.
save_img (bool): Whether to save images. Default: False.
"""
if self.opt["dist"]:
self.dist_validation(dataloader, current_iter, tb_logger, save_img)
else:
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def _initialize_best_metric_results(self, dataset_name):
"""Initialize the best metric results dict for recording the best metric value and iteration."""
if (
hasattr(self, "best_metric_results")
and dataset_name in self.best_metric_results
):
return
elif not hasattr(self, "best_metric_results"):
self.best_metric_results = dict()
# add a dataset record
record = dict()
for metric, content in self.opt["val"]["metrics"].items():
better = content.get("better", "higher")
init_val = float("-inf") if better == "higher" else float("inf")
record[metric] = dict(better=better, val=init_val, iter=-1)
self.best_metric_results[dataset_name] = record
self.key_metric = self.opt["val"].get("key_metric", None)
def _update_metric_result(self, dataset_name, metric, val, current_iter):
self.best_metric_results[dataset_name][metric]["val"] = val
self.best_metric_results[dataset_name][metric]["iter"] = current_iter
def _update_best_metric_result(self, dataset_name, metric, val, current_iter):
if self.best_metric_results[dataset_name][metric]["better"] == "higher":
if val >= self.best_metric_results[dataset_name][metric]["val"]:
self.best_metric_results[dataset_name][metric]["val"] = val
self.best_metric_results[dataset_name][metric]["iter"] = current_iter
return True
else:
return False
else:
if val <= self.best_metric_results[dataset_name][metric]["val"]:
self.best_metric_results[dataset_name][metric]["val"] = val
self.best_metric_results[dataset_name][metric]["iter"] = current_iter
return True
else:
return False
def model_ema(self, decay=0.999):
net_g = self.get_bare_model(self.net_g)
net_g_params = dict(net_g.named_parameters())
net_g_ema_params = dict(self.net_g_ema.named_parameters())
for k in net_g_ema_params.keys():
net_g_ema_params[k].data.mul_(decay).add_(
net_g_params[k].data, alpha=1 - decay
)
def copy_model(self, net_a, net_b):
"""copy model from net_a to net_b"""
tmp_net_a = self.get_bare_model(net_a)
tmp_net_b = self.get_bare_model(net_b)
tmp_net_b.load_state_dict(tmp_net_a.state_dict())
def get_current_log(self):
return self.log_dict
def model_to_device(self, net):
"""Model to device. It also warps models with DistributedDataParallel
or DataParallel.
Args:
net (nn.Module)
"""
net = net.to(self.device)
if self.opt["dist"]:
find_unused_parameters = self.opt.get("find_unused_parameters", False)
net = DistributedDataParallel(
net,
device_ids=[torch.cuda.current_device()],
find_unused_parameters=find_unused_parameters,
)
elif self.opt["num_gpu"] > 1:
net = DataParallel(net)
return net
def get_optimizer(self, optim_type, params, lr, **kwargs):
optim_class = getattr(torch.optim, optim_type)
optimizer = optim_class(params, lr, **kwargs)
return optimizer
def setup_schedulers(self, scheduler_name="scheduler"):
"""Set up schedulers."""
train_opt = self.opt["train"]
scheduler_type = train_opt[scheduler_name].pop("type")
if scheduler_type in ["MultiStepLR", "MultiStepRestartLR"]:
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.MultiStepRestartLR(
optimizer, **train_opt[scheduler_name]
)
)
elif scheduler_type == "CosineAnnealingRestartLR":
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.CosineAnnealingRestartLR(
optimizer, **train_opt[scheduler_name]
)
)
else:
scheduler = getattr(torch.optim.lr_scheduler, scheduler_type)
for optimizer in self.optimizers:
self.schedulers.append(
scheduler(optimizer, **train_opt[scheduler_name])
)
def get_bare_model(self, net):
"""Get bare model, especially under wrapping with
DistributedDataParallel or DataParallel.
"""
if isinstance(net, (DataParallel, DistributedDataParallel)):
net = net.module
return net
@master_only
def print_network(self, net):
"""print the str and parameter number of a network.
Args:
net (nn.Module)
"""
if isinstance(net, (DataParallel, DistributedDataParallel)):
net_cls_str = f"{net.__class__.__name__} - {net.module.__class__.__name__}"
else:
net_cls_str = f"{net.__class__.__name__}"
net = self.get_bare_model(net)
net_str = str(net)
net_params = sum(map(lambda x: x.numel(), net.parameters()))
logger = get_root_logger()
logger.info(f"Network: {net_cls_str}, with parameters: {net_params:,d}")
logger.info(net_str)
def _set_lr(self, lr_groups_l):
"""Set learning rate for warmup.
Args:
lr_groups_l (list): List for lr_groups, each for an optimizer.
"""
for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):
for param_group, lr in zip(optimizer.param_groups, lr_groups):
param_group["lr"] = lr
def _get_init_lr(self):
"""Get the initial lr, which is set by the scheduler."""
init_lr_groups_l = []
for optimizer in self.optimizers:
init_lr_groups_l.append([v["initial_lr"] for v in optimizer.param_groups])
return init_lr_groups_l
def update_learning_rate(self, current_iter, warmup_iter=-1):
"""Update learning rate.
Args:
current_iter (int): Current iteration.
warmup_iter (int): Warmup iter numbers. -1 for no warmup.
Default: -1.
"""
if current_iter > 1:
for scheduler in self.schedulers:
scheduler.step()
# set up warm-up learning rate
if current_iter < warmup_iter:
# get initial lr for each group
init_lr_g_l = self._get_init_lr()
# modify warming-up learning rates
# currently only support linearly warm up
warm_up_lr_l = []
for init_lr_g in init_lr_g_l:
warm_up_lr_l.append([v / warmup_iter * current_iter for v in init_lr_g])
# set learning rate
self._set_lr(warm_up_lr_l)
def get_current_learning_rate(self):
return [param_group["lr"] for param_group in self.optimizers[0].param_groups]
@master_only
def save_network(self, net, net_label, current_iter=None, param_key="params"):
"""Save networks.
Args:
net (nn.Module | list[nn.Module]): Network(s) to be saved.
net_label (str): Network label.
current_iter (int): Current iter number.
param_key (str | list[str]): The parameter key(s) to save network.
Default: 'params'.
"""
if current_iter == -1:
current_iter = "latest"
if current_iter is not None:
save_filename = f"{net_label}_{current_iter}.pth"
else:
save_filename = f"{net_label}.pth"
save_path = os.path.join(self.opt["path"]["models"], save_filename)
net = net if isinstance(net, list) else [net]
param_key = param_key if isinstance(param_key, list) else [param_key]
assert len(net) == len(
param_key
), "The lengths of net and param_key should be the same."
save_dict = {}
for net_, param_key_ in zip(net, param_key):
net_ = self.get_bare_model(net_)
state_dict = net_.state_dict()
for key, param in state_dict.items():
if key.startswith("module."): # remove unnecessary 'module.'
key = key[7:]
state_dict[key] = param.cpu()
save_dict[param_key_] = state_dict
# avoid occasional writing errors
retry = 3
while retry > 0:
try:
torch.save(save_dict, save_path)
except Exception as e:
logger = get_root_logger()
logger.warning(
f"Save model error: {e}, remaining retry times: {retry - 1}"
)
time.sleep(1)
else:
break
finally:
retry -= 1
if retry == 0:
logger.warning(f"Still cannot save {save_path}. Just ignore it.")
# raise IOError(f'Cannot save {save_path}.')
def _print_different_keys_loading(self, crt_net, load_net, strict=True):
"""print keys with different name or different size when loading models.
1. print keys with different names.
2. If strict=False, print the same key but with different tensor size.
It also ignore these keys with different sizes (not load).
Args:
crt_net (torch model): Current network.
load_net (dict): Loaded network.
strict (bool): Whether strictly loaded. Default: True.
"""
crt_net = self.get_bare_model(crt_net)
crt_net = crt_net.state_dict()
crt_net_keys = set(crt_net.keys())
load_net_keys = set(load_net.keys())
logger = get_root_logger()
if crt_net_keys != load_net_keys:
logger.warning("Current net - loaded net:")
for v in sorted(list(crt_net_keys - load_net_keys)):
logger.warning(f" {v}")
logger.warning("Loaded net - current net:")
for v in sorted(list(load_net_keys - crt_net_keys)):
logger.warning(f" {v}")
# check the size for the same keys
if not strict:
common_keys = crt_net_keys & load_net_keys
for k in common_keys:
if crt_net[k].size() != load_net[k].size():
logger.warning(
f"Size different, ignore [{k}]: crt_net: "
f"{crt_net[k].shape}; load_net: {load_net[k].shape}"
)
load_net[k + ".ignore"] = load_net.pop(k)
def load_network(self, net, load_path, strict=True, param_key="params"):
"""Load network.
Args:
load_path (str): The path of networks to be loaded.
net (nn.Module): Network.
strict (bool): Whether strictly loaded.
param_key (str): The parameter key of loaded network. If set to
None, use the root 'path'.
Default: 'params'.
"""
logger = get_root_logger()
net = self.get_bare_model(net)
load_net = torch.load(load_path, map_location=lambda storage, loc: storage)
if param_key is not None:
if param_key not in load_net and "params" in load_net:
param_key = "params"
logger.info("Loading: params_ema does not exist, use params.")
load_net = load_net[param_key]
logger.info(
f"Loading {net.__class__.__name__} model from {load_path}, with param key: [{param_key}]."
)
# remove unnecessary 'module.'
for k, v in deepcopy(load_net).items():
if k.startswith("module."):
load_net[k[7:]] = v
load_net.pop(k)
self._print_different_keys_loading(net, load_net, strict)
net.load_state_dict(load_net, strict=strict)
@master_only
def save_training_state(self, epoch, current_iter):
"""Save training states during training, which will be used for
resuming.
Args:
epoch (int): Current epoch.
current_iter (int): Current iteration.
"""
if current_iter != -1:
state = {
"epoch": epoch,
"iter": current_iter,
"optimizers": [],
"schedulers": [],
}
for o in self.optimizers:
state["optimizers"].append(o.state_dict())
for s in self.schedulers:
state["schedulers"].append(s.state_dict())
save_filename = f"{current_iter}.state"
save_path = os.path.join(self.opt["path"]["training_states"], save_filename)
# avoid occasional writing errors
retry = 3
while retry > 0:
try:
torch.save(state, save_path)
except Exception as e:
logger = get_root_logger()
logger.warning(
f"Save training state error: {e}, remaining retry times: {retry - 1}"
)
time.sleep(1)
else:
break
finally:
retry -= 1
if retry == 0:
logger.warning(f"Still cannot save {save_path}. Just ignore it.")
# raise IOError(f'Cannot save {save_path}.')
def resume_training(self, resume_state):
"""Reload the optimizers and schedulers for resumed training.
Args:
resume_state (dict): Resume state.
"""
resume_optimizers = resume_state["optimizers"]
resume_schedulers = resume_state["schedulers"]
assert len(resume_optimizers) == len(
self.optimizers
), "Wrong lengths of optimizers"
assert len(resume_schedulers) == len(
self.schedulers
), "Wrong lengths of schedulers"
for i, o in enumerate(resume_optimizers):
self.optimizers[i].load_state_dict(o)
for i, s in enumerate(resume_schedulers):
self.schedulers[i].load_state_dict(s)
def reduce_loss_dict(self, loss_dict):
"""reduce loss dict.
In distributed training, it averages the losses among different GPUs .
Args:
loss_dict (OrderedDict): Loss dict.
"""
with torch.no_grad():
if self.opt["dist"]:
keys = []
losses = []
for name, value in loss_dict.items():
keys.append(name)
losses.append(value)
losses = torch.stack(losses, 0)
torch.distributed.reduce(losses, dst=0)
if self.opt["rank"] == 0:
losses /= self.opt["world_size"]
loss_dict = {key: loss for key, loss in zip(keys, losses)}
log_dict = OrderedDict()
for name, value in loss_dict.items():
log_dict[name] = value.mean().item()
return log_dict
| 16,657 | 36.859091 | 104 | py |
BVQI | BVQI-master/pyiqa/models/hypernet_model.py | from collections import OrderedDict
import torch
from pyiqa.metrics import calculate_metric
from pyiqa.utils.registry import MODEL_REGISTRY
from .general_iqa_model import GeneralIQAModel
@MODEL_REGISTRY.register()
class HyperNetModel(GeneralIQAModel):
"""General module to train an IQA network."""
def test(self):
self.net.eval()
with torch.no_grad():
self.output_score = self.get_bare_model(self.net).random_crop_test(
self.img_input
)
self.net.train()
def setup_optimizers(self):
train_opt = self.opt["train"]
optim_opt = train_opt["optim"]
bare_net = self.get_bare_model(self.net)
optim_params = [
{
"params": bare_net.base_model.parameters(),
"lr": optim_opt.pop("lr_basemodel"),
},
{
"params": [
p for k, p in bare_net.named_parameters() if "base_model" not in k
],
"lr": optim_opt.pop("lr_hypermodule"),
},
]
optim_type = optim_opt.pop("type")
self.optimizer = self.get_optimizer(optim_type, optim_params, **optim_opt)
self.optimizers.append(self.optimizer)
| 1,260 | 28.325581 | 86 | py |
BVQI | BVQI-master/pyiqa/models/dbcnn_model.py | from collections import OrderedDict
from os import path as osp
import torch
from tqdm import tqdm
from pyiqa.archs import build_network
from pyiqa.losses import build_loss
from pyiqa.metrics import calculate_metric
from pyiqa.models import lr_scheduler as lr_scheduler
from pyiqa.utils import get_root_logger, imwrite, logger, tensor2img
from pyiqa.utils.registry import MODEL_REGISTRY
from .general_iqa_model import GeneralIQAModel
@MODEL_REGISTRY.register()
class DBCNNModel(GeneralIQAModel):
"""General module to train an IQA network."""
def __init__(self, opt):
super(DBCNNModel, self).__init__(opt)
self.train_stage = "train"
def reset_optimizers_finetune(self):
logger = get_root_logger()
logger.info(f"\n Start finetune stage. Set all parameters trainable\n")
train_opt = self.opt["train"]
optim_params = []
for k, v in self.net.named_parameters():
v.requires_grad = True
optim_params.append(v)
optim_type = train_opt["optim_finetune"].pop("type")
self.optimizer = self.get_optimizer(
optim_type, optim_params, **train_opt["optim_finetune"]
)
self.optimizers = [self.optimizer]
# reset schedulers
self.schedulers = []
self.setup_schedulers("scheduler_finetune")
def optimize_parameters(self, current_iter):
if (
current_iter >= self.opt["train"]["finetune_start_iter"]
and self.train_stage != "finetune"
):
# copy best model from coarse training stage and reset optimizers
self.copy_model(self.net_best, self.net)
self.reset_optimizers_finetune()
self.train_stage = "finetune"
super().optimize_parameters(current_iter)
| 1,797 | 31.690909 | 79 | py |
BVQI | BVQI-master/pyiqa/models/sr_model.py | from collections import OrderedDict
from os import path as osp
import torch
from tqdm import tqdm
from pyiqa.archs import build_network
from pyiqa.losses import build_loss
from pyiqa.metrics import calculate_metric
from pyiqa.utils import get_root_logger, imwrite, tensor2img
from pyiqa.utils.registry import MODEL_REGISTRY
from .base_model import BaseModel
@MODEL_REGISTRY.register()
class SRModel(BaseModel):
"""Base SR model for single image super-resolution."""
def __init__(self, opt):
super(SRModel, self).__init__(opt)
# define network
self.net_g = build_network(opt["network_g"])
self.net_g = self.model_to_device(self.net_g)
self.print_network(self.net_g)
# load pretrained models
load_path = self.opt["path"].get("pretrain_network_g", None)
if load_path is not None:
param_key = self.opt["path"].get("param_key_g", "params")
self.load_network(
self.net_g,
load_path,
self.opt["path"].get("strict_load_g", True),
param_key,
)
if self.is_train:
self.init_training_settings()
def init_training_settings(self):
self.net_g.train()
train_opt = self.opt["train"]
self.ema_decay = train_opt.get("ema_decay", 0)
if self.ema_decay > 0:
logger = get_root_logger()
logger.info(f"Use Exponential Moving Average with decay: {self.ema_decay}")
# define network net_g with Exponential Moving Average (EMA)
# net_g_ema is used only for testing on one GPU and saving
# There is no need to wrap with DistributedDataParallel
self.net_g_ema = build_network(self.opt["network_g"]).to(self.device)
# load pretrained model
load_path = self.opt["path"].get("pretrain_network_g", None)
if load_path is not None:
self.load_network(
self.net_g_ema,
load_path,
self.opt["path"].get("strict_load_g", True),
"params_ema",
)
else:
self.model_ema(0) # copy net_g weight
self.net_g_ema.eval()
# define losses
if train_opt.get("pixel_opt"):
self.cri_pix = build_loss(train_opt["pixel_opt"]).to(self.device)
else:
self.cri_pix = None
if train_opt.get("perceptual_opt"):
self.cri_perceptual = build_loss(train_opt["perceptual_opt"]).to(
self.device
)
else:
self.cri_perceptual = None
if self.cri_pix is None and self.cri_perceptual is None:
raise ValueError("Both pixel and perceptual losses are None.")
# set up optimizers and schedulers
self.setup_optimizers()
self.setup_schedulers()
def setup_optimizers(self):
train_opt = self.opt["train"]
optim_params = []
for k, v in self.net_g.named_parameters():
if v.requires_grad:
optim_params.append(v)
else:
logger = get_root_logger()
logger.warning(f"Params {k} will not be optimized.")
optim_type = train_opt["optim_g"].pop("type")
self.optimizer_g = self.get_optimizer(
optim_type, optim_params, **train_opt["optim_g"]
)
self.optimizers.append(self.optimizer_g)
def feed_data(self, data):
self.lq = data["lq"].to(self.device)
if "gt" in data:
self.gt = data["gt"].to(self.device)
def optimize_parameters(self, current_iter):
self.optimizer_g.zero_grad()
self.output = self.net_g(self.lq)
l_total = 0
loss_dict = OrderedDict()
# pixel loss
if self.cri_pix:
l_pix = self.cri_pix(self.output, self.gt)
l_total += l_pix
loss_dict["l_pix"] = l_pix
# perceptual loss
if self.cri_perceptual:
l_percep, l_style = self.cri_perceptual(self.output, self.gt)
if l_percep is not None:
l_total += l_percep
loss_dict["l_percep"] = l_percep
if l_style is not None:
l_total += l_style
loss_dict["l_style"] = l_style
l_total.backward()
self.optimizer_g.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
if self.ema_decay > 0:
self.model_ema(decay=self.ema_decay)
def test(self):
if hasattr(self, "net_g_ema"):
self.net_g_ema.eval()
with torch.no_grad():
self.output = self.net_g_ema(self.lq)
else:
self.net_g.eval()
with torch.no_grad():
self.output = self.net_g(self.lq)
self.net_g.train()
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
if self.opt["rank"] == 0:
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
dataset_name = dataloader.dataset.opt["name"]
with_metrics = self.opt["val"].get("metrics") is not None
use_pbar = self.opt["val"].get("pbar", False)
if with_metrics:
if not hasattr(self, "metric_results"): # only execute in the first run
self.metric_results = {
metric: 0 for metric in self.opt["val"]["metrics"].keys()
}
# initialize the best metric results for each dataset_name (supporting multiple validation datasets)
self._initialize_best_metric_results(dataset_name)
# zero self.metric_results
if with_metrics:
self.metric_results = {metric: 0 for metric in self.metric_results}
metric_data = dict()
if use_pbar:
pbar = tqdm(total=len(dataloader), unit="image")
for idx, val_data in enumerate(dataloader):
img_name = osp.splitext(osp.basename(val_data["lq_path"][0]))[0]
self.feed_data(val_data)
self.test()
visuals = self.get_current_visuals()
sr_img = tensor2img([visuals["result"]])
metric_data["img"] = sr_img
if "gt" in visuals:
gt_img = tensor2img([visuals["gt"]])
metric_data["img2"] = gt_img
del self.gt
# tentative for out of GPU memory
del self.lq
del self.output
torch.cuda.empty_cache()
if save_img:
if self.opt["is_train"]:
save_img_path = osp.join(
self.opt["path"]["visualization"],
img_name,
f"{img_name}_{current_iter}.png",
)
else:
if self.opt["val"]["suffix"]:
save_img_path = osp.join(
self.opt["path"]["visualization"],
dataset_name,
f'{img_name}_{self.opt["val"]["suffix"]}.png',
)
else:
save_img_path = osp.join(
self.opt["path"]["visualization"],
dataset_name,
f'{img_name}_{self.opt["name"]}.png',
)
imwrite(sr_img, save_img_path)
if with_metrics:
# calculate metrics
for name, opt_ in self.opt["val"]["metrics"].items():
self.metric_results[name] += calculate_metric(metric_data, opt_)
if use_pbar:
pbar.update(1)
pbar.set_description(f"Test {img_name}")
if use_pbar:
pbar.close()
if with_metrics:
for metric in self.metric_results.keys():
self.metric_results[metric] /= idx + 1
# update the best metric result
self._update_best_metric_result(
dataset_name, metric, self.metric_results[metric], current_iter
)
self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
def _log_validation_metric_values(self, current_iter, dataset_name, tb_logger):
log_str = f"Validation {dataset_name}\n"
for metric, value in self.metric_results.items():
log_str += f"\t # {metric}: {value:.4f}"
if hasattr(self, "best_metric_results"):
log_str += (
f'\tBest: {self.best_metric_results[dataset_name][metric]["val"]:.4f} @ '
f'{self.best_metric_results[dataset_name][metric]["iter"]} iter'
)
log_str += "\n"
logger = get_root_logger()
logger.info(log_str)
if tb_logger:
for metric, value in self.metric_results.items():
tb_logger.add_scalar(
f"metrics/{dataset_name}/{metric}", value, current_iter
)
def get_current_visuals(self):
out_dict = OrderedDict()
out_dict["lq"] = self.lq.detach().cpu()
out_dict["result"] = self.output.detach().cpu()
if hasattr(self, "gt"):
out_dict["gt"] = self.gt.detach().cpu()
return out_dict
def save(self, epoch, current_iter):
if hasattr(self, "net_g_ema"):
self.save_network(
[self.net_g, self.net_g_ema],
"net_g",
current_iter,
param_key=["params", "params_ema"],
)
else:
self.save_network(self.net_g, "net_g", current_iter)
self.save_training_state(epoch, current_iter)
| 9,927 | 35.77037 | 112 | py |
BVQI | BVQI-master/pyiqa/models/pieapp_model.py | from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from pyiqa.metrics.correlation_coefficient import calculate_rmse
from pyiqa.utils.registry import MODEL_REGISTRY
from .general_iqa_model import GeneralIQAModel
@MODEL_REGISTRY.register()
class PieAPPModel(GeneralIQAModel):
"""General module to train an IQA network."""
def feed_data(self, data):
is_test = "img" in data.keys()
if "use_ref" in self.opt["train"]:
self.use_ref = self.opt["train"]["use_ref"]
if is_test:
self.img_input = data["img"].to(self.device)
self.gt_mos = data["mos_label"].to(self.device)
self.ref_input = data["ref_img"].to(self.device)
self.ref_img_path = data["ref_img_path"]
self.img_path = data["img_path"]
else:
self.img_A_input = data["distA_img"].to(self.device)
self.img_B_input = data["distB_img"].to(self.device)
self.img_ref_input = data["ref_img"].to(self.device)
self.gt_prob = data["mos_label"].to(self.device)
# from torchvision.utils import save_image
# save_image(torch.cat([self.img_A_input, self.img_B_input, self.img_ref_input], dim=0), 'tmp_test_pieappdataset.jpg')
# exit()
def optimize_parameters(self, current_iter):
self.optimizer.zero_grad()
score_A = self.net(self.img_A_input, self.img_ref_input)
score_B = self.net(self.img_B_input, self.img_ref_input)
train_output_score = 1 / (1 + torch.exp(score_A - score_B))
l_total = 0
loss_dict = OrderedDict()
# pixel loss
if self.cri_mos:
l_mos = self.cri_mos(train_output_score, self.gt_prob)
l_total += l_mos
loss_dict["l_mos"] = l_mos
l_total.backward()
self.optimizer.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
# log metrics in training batch
pred_score = train_output_score.squeeze(-1).cpu().detach().numpy()
gt_prob = self.gt_prob.squeeze(-1).cpu().detach().numpy()
self.log_dict[f"train_metrics/rmse"] = calculate_rmse(pred_score, gt_prob)
| 2,271 | 32.910448 | 130 | py |
BVQI | BVQI-master/pyiqa/models/general_iqa_model.py | from collections import OrderedDict
from os import path as osp
import torch
from tqdm import tqdm
from pyiqa.archs import build_network
from pyiqa.losses import build_loss
from pyiqa.metrics import calculate_metric
from pyiqa.utils import get_root_logger, imwrite, tensor2img
from pyiqa.utils.registry import MODEL_REGISTRY
from .base_model import BaseModel
@MODEL_REGISTRY.register()
class GeneralIQAModel(BaseModel):
"""General module to train an IQA network."""
def __init__(self, opt):
super(GeneralIQAModel, self).__init__(opt)
# define network
self.net = build_network(opt["network"])
self.net = self.model_to_device(self.net)
self.print_network(self.net)
# load pretrained models
load_path = self.opt["path"].get("pretrain_network", None)
if load_path is not None:
param_key = self.opt["path"].get("param_key_g", "params")
self.load_network(
self.net,
load_path,
self.opt["path"].get("strict_load", True),
param_key,
)
if self.is_train:
self.init_training_settings()
def init_training_settings(self):
self.net.train()
train_opt = self.opt["train"]
self.net_best = build_network(self.opt["network"]).to(self.device)
# define losses
if train_opt.get("mos_loss_opt"):
self.cri_mos = build_loss(train_opt["mos_loss_opt"]).to(self.device)
else:
self.cri_mos = None
# define metric related loss, such as plcc loss
if train_opt.get("metric_loss_opt"):
self.cri_metric = build_loss(train_opt["metric_loss_opt"]).to(self.device)
else:
self.cri_metric = None
# set up optimizers and schedulers
self.setup_optimizers()
self.setup_schedulers()
def setup_optimizers(self):
train_opt = self.opt["train"]
optim_params = []
for k, v in self.net.named_parameters():
if v.requires_grad:
optim_params.append(v)
else:
logger = get_root_logger()
logger.warning(f"Params {k} will not be optimized.")
optim_type = train_opt["optim"].pop("type")
self.optimizer = self.get_optimizer(
optim_type, optim_params, **train_opt["optim"]
)
self.optimizers.append(self.optimizer)
def feed_data(self, data):
self.img_input = data["img"].to(self.device)
if "mos_label" in data:
self.gt_mos = data["mos_label"].to(self.device)
self.use_ref = self.opt["train"].get("use_ref", False)
def net_forward(self, net):
if self.use_ref:
return net(self.img_input, self.ref_input)
else:
return net(self.img_input)
def optimize_parameters(self, current_iter):
self.optimizer.zero_grad()
self.output_score = self.net_forward(self.net)
l_total = 0
loss_dict = OrderedDict()
# pixel loss
if self.cri_mos:
l_mos = self.cri_mos(self.output_score, self.gt_mos)
l_total += l_mos
loss_dict["l_mos"] = l_mos
if self.cri_metric:
l_metric = self.cri_metric(self.output_score, self.gt_mos)
l_total += l_metric
loss_dict["l_metric"] = l_metric
l_total.backward()
self.optimizer.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
# log metrics in training batch
pred_score = self.output_score.squeeze(1).cpu().detach().numpy()
gt_mos = self.gt_mos.squeeze(1).cpu().detach().numpy()
for name, opt_ in self.opt["val"]["metrics"].items():
self.log_dict[f"train_metrics/{name}"] = calculate_metric(
[pred_score, gt_mos], opt_
)
def test(self):
self.net.eval()
with torch.no_grad():
self.output_score = self.net_forward(self.net)
self.net.train()
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
if self.opt["rank"] == 0:
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
dataset_name = dataloader.dataset.opt["name"]
with_metrics = self.opt["val"].get("metrics") is not None
use_pbar = self.opt["val"].get("pbar", False)
if with_metrics:
if not hasattr(self, "metric_results"): # only execute in the first run
self.metric_results = {
metric: 0 for metric in self.opt["val"]["metrics"].keys()
}
# initialize the best metric results for each dataset_name (supporting multiple validation datasets)
self._initialize_best_metric_results(dataset_name)
# zero self.metric_results
if with_metrics:
self.metric_results = {metric: 0 for metric in self.metric_results}
if use_pbar:
pbar = tqdm(total=len(dataloader), unit="image")
pred_score = []
gt_mos = []
for idx, val_data in enumerate(dataloader):
img_name = osp.basename(val_data["img_path"][0])
self.feed_data(val_data)
self.test()
pred_score.append(self.output_score)
gt_mos.append(self.gt_mos)
if use_pbar:
pbar.update(1)
pbar.set_description(f"Test {img_name:>20}")
if use_pbar:
pbar.close()
pred_score = torch.cat(pred_score, dim=0).squeeze(1).cpu().numpy()
gt_mos = torch.cat(gt_mos, dim=0).squeeze(1).cpu().numpy()
if with_metrics:
# calculate all metrics
for name, opt_ in self.opt["val"]["metrics"].items():
self.metric_results[name] = calculate_metric([pred_score, gt_mos], opt_)
if self.key_metric is not None:
# If the best metric is updated, update and save best model
to_update = self._update_best_metric_result(
dataset_name,
self.key_metric,
self.metric_results[self.key_metric],
current_iter,
)
if to_update:
for name, opt_ in self.opt["val"]["metrics"].items():
self._update_metric_result(
dataset_name, name, self.metric_results[name], current_iter
)
self.copy_model(self.net, self.net_best)
self.save_network(self.net_best, "net_best")
else:
# update each metric separately
updated = []
for name, opt_ in self.opt["val"]["metrics"].items():
tmp_updated = self._update_best_metric_result(
dataset_name, name, self.metric_results[name], current_iter
)
updated.append(tmp_updated)
# save best model if any metric is updated
if sum(updated):
self.copy_model(self.net, self.net_best)
self.save_network(self.net_best, "net_best")
self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
def _log_validation_metric_values(self, current_iter, dataset_name, tb_logger):
log_str = f"Validation {dataset_name}\n"
for metric, value in self.metric_results.items():
log_str += f"\t # {metric}: {value:.4f}"
if hasattr(self, "best_metric_results"):
log_str += (
f'\tBest: {self.best_metric_results[dataset_name][metric]["val"]:.4f} @ '
f'{self.best_metric_results[dataset_name][metric]["iter"]} iter'
)
log_str += "\n"
logger = get_root_logger()
logger.info(log_str)
if tb_logger:
for metric, value in self.metric_results.items():
tb_logger.add_scalar(
f"val_metrics/{dataset_name}/{metric}", value, current_iter
)
def save(self, epoch, current_iter, save_net_label="net"):
self.save_network(self.net, save_net_label, current_iter)
self.save_training_state(epoch, current_iter)
| 8,455 | 36.087719 | 112 | py |
BVQI | BVQI-master/pyiqa/models/bapps_model.py | import os.path as osp
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from pyiqa.metrics import calculate_metric
from pyiqa.utils.registry import MODEL_REGISTRY
from .general_iqa_model import GeneralIQAModel
@MODEL_REGISTRY.register()
class BAPPSModel(GeneralIQAModel):
"""General module to train an IQA network."""
def feed_data(self, data):
if "use_ref" in self.opt["train"]:
self.use_ref = self.opt["train"]["use_ref"]
self.img_A_input = data["distA_img"].to(self.device)
self.img_B_input = data["distB_img"].to(self.device)
self.img_ref_input = data["ref_img"].to(self.device)
self.gt_mos = data["mos_label"].to(self.device)
self.img_path = data["img_path"]
# from torchvision.utils import save_image
# print(self.img_ref_input.shape)
# save_image(torch.cat([self.img_ref_input, self.img_A_input, self.img_B_input], dim=0), 'tmp_test_bappsdataset.jpg')
# exit()
def compute_accuracy(self, d0, d1, judge):
d1_lt_d0 = (d1 < d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
acc = d1_lt_d0 * judge_per + (1 - d1_lt_d0) * (1 - judge_per)
return acc.mean()
def optimize_parameters(self, current_iter):
self.optimizer.zero_grad()
score_A = self.net(self.img_A_input, self.img_ref_input)
score_B = self.net(self.img_B_input, self.img_ref_input)
# For BAPPS,
train_output_score = 1 / (1 + torch.exp(score_B - score_A))
l_total = 0
loss_dict = OrderedDict()
# pixel loss
if self.cri_mos:
l_mos = self.cri_mos(train_output_score, self.gt_mos)
l_total += l_mos
loss_dict["l_mos"] = l_mos
l_total.backward()
self.optimizer.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
# log metrics in training batch
self.log_dict[f"train_metrics/acc"] = self.compute_accuracy(
score_A, score_B, self.gt_mos
)
@torch.no_grad()
def test(self):
self.net.eval()
with torch.no_grad():
self.score_A = self.net(self.img_A_input, self.img_ref_input)
self.score_B = self.net(self.img_B_input, self.img_ref_input)
self.net.train()
@torch.no_grad()
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
dataset_name = dataloader.dataset.opt["name"]
with_metrics = self.opt["val"].get("metrics") is not None
use_pbar = self.opt["val"].get("pbar", False)
if with_metrics:
if not hasattr(self, "metric_results"): # only execute in the first run
self.metric_results = {
metric: 0 for metric in self.opt["val"]["metrics"].keys()
}
# initialize the best metric results for each dataset_name (supporting multiple validation datasets)
self._initialize_best_metric_results(dataset_name)
# zero self.metric_results
if with_metrics:
self.metric_results = {metric: 0 for metric in self.metric_results}
if use_pbar:
pbar = tqdm(total=len(dataloader), unit="image")
pred_score_A = []
pred_score_B = []
gt_mos = []
for idx, val_data in enumerate(dataloader):
img_name = osp.basename(val_data["img_path"][0])
self.feed_data(val_data)
self.test()
if len(self.score_A.shape) <= 1:
self.score_A = self.score_A.reshape(-1, 1)
self.score_B = self.score_B.reshape(-1, 1)
pred_score_A.append(self.score_A)
pred_score_B.append(self.score_B)
gt_mos.append(self.gt_mos)
if use_pbar:
pbar.update(1)
pbar.set_description(f"Test {img_name:>20}")
if use_pbar:
pbar.close()
pred_score_A = torch.cat(pred_score_A, dim=0).squeeze(1).cpu().numpy()
pred_score_B = torch.cat(pred_score_B, dim=0).squeeze(1).cpu().numpy()
gt_mos = torch.cat(gt_mos, dim=0).squeeze(1).cpu().numpy()
if with_metrics:
# calculate all metrics
for name, opt_ in self.opt["val"]["metrics"].items():
self.metric_results[name] = calculate_metric(
[pred_score_A, pred_score_B, gt_mos], opt_
)
if self.key_metric is not None:
# If the best metric is updated, update and save best model
to_update = self._update_best_metric_result(
dataset_name,
self.key_metric,
self.metric_results[self.key_metric],
current_iter,
)
if to_update:
for name, opt_ in self.opt["val"]["metrics"].items():
self._update_metric_result(
dataset_name, name, self.metric_results[name], current_iter
)
self.copy_model(self.net, self.net_best)
self.save_network(self.net_best, "net_best")
else:
# update each metric separately
updated = []
for name, opt_ in self.opt["val"]["metrics"].items():
tmp_updated = self._update_best_metric_result(
dataset_name, name, self.metric_results[name], current_iter
)
updated.append(tmp_updated)
# save best model if any metric is updated
if sum(updated):
self.copy_model(self.net, self.net_best)
self.save_network(self.net_best, "net_best")
self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
| 5,987 | 36.898734 | 125 | py |
BVQI | BVQI-master/pyiqa/models/inference_model.py | from collections import OrderedDict
import torch
import torchvision as tv
from pyiqa.default_model_configs import DEFAULT_CONFIGS
from pyiqa.utils.img_util import imread2tensor
from pyiqa.utils.registry import ARCH_REGISTRY
class InferenceModel(torch.nn.Module):
"""Common interface for quality inference of images with default setting of each metric."""
def __init__(
self, metric_name, as_loss=False, device=None, **kwargs # Other metric options
):
super(InferenceModel, self).__init__()
self.metric_name = metric_name
# ============ set metric properties ===========
self.lower_better = DEFAULT_CONFIGS[metric_name].get("lower_better", False)
self.metric_mode = DEFAULT_CONFIGS[metric_name].get("metric_mode", None)
if self.metric_mode is None:
self.metric_mode = kwargs.pop("metric_mode")
elif "metric_mode" in kwargs:
kwargs.pop("metric_mode")
if device is None:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
self.device = device
self.as_loss = as_loss
# =========== define metric model ===============
net_opts = OrderedDict()
# load default setting first
if metric_name in DEFAULT_CONFIGS.keys():
default_opt = DEFAULT_CONFIGS[metric_name]["metric_opts"]
net_opts.update(default_opt)
# then update with custom setting
net_opts.update(kwargs)
network_type = net_opts.pop("type")
self.net = ARCH_REGISTRY.get(network_type)(**net_opts)
self.net = self.net.to(self.device)
self.net.eval()
def forward(self, target, ref=None, **kwargs):
torch.set_grad_enabled(self.as_loss)
if "fid" in self.metric_name:
output = self.net(target, ref, device=self.device, **kwargs)
else:
if not torch.is_tensor(target):
print("\nfound it\n")
target = imread2tensor(target)
target = target.unsqueeze(0)
if self.metric_mode == "FR":
assert (
ref is not None
), "Please specify reference image for Full Reference metric"
ref = imread2tensor(ref)
ref = ref.unsqueeze(0)
if self.metric_mode == "FR":
output = self.net(target.to(self.device), ref.to(self.device))
elif self.metric_mode == "NR":
output = self.net(target.to(self.device))
return output
| 2,613 | 36.884058 | 95 | py |
BVQI | BVQI-master/pyiqa/models/nima_model.py | from collections import OrderedDict
import torch
from pyiqa.metrics import calculate_metric
from pyiqa.utils.registry import MODEL_REGISTRY
from .general_iqa_model import GeneralIQAModel
@MODEL_REGISTRY.register()
class NIMAModel(GeneralIQAModel):
"""General module to train an IQA network."""
def feed_data(self, data):
self.img_input = data["img"].to(self.device)
self.gt_mos = data["mos_label"].to(self.device)
self.gt_mos_dist = data["mos_dist"].to(self.device)
self.use_ref = False
def setup_optimizers(self):
train_opt = self.opt["train"]
optim_opt = train_opt["optim"]
optim_params = [
{
"params": self.get_bare_model(self.net).base_model.parameters(),
"lr": optim_opt.pop("lr_basemodel"),
},
{
"params": self.get_bare_model(self.net).classifier.parameters(),
"lr": optim_opt.pop("lr_classifier"),
},
]
optim_type = optim_opt.pop("type")
self.optimizer = self.get_optimizer(optim_type, optim_params, **optim_opt)
self.optimizers.append(self.optimizer)
def test(self):
self.net.eval()
with torch.no_grad():
self.output_score = self.net(
self.img_input, return_mos=True, return_dist=False
)
self.net.train()
def optimize_parameters(self, current_iter):
self.optimizer.zero_grad()
self.output_mos, self.output_dist = self.net(
self.img_input, return_mos=True, return_dist=True
)
l_total = 0
loss_dict = OrderedDict()
if self.cri_mos:
l_mos = self.cri_mos(self.output_dist, self.gt_mos_dist)
l_total += l_mos
loss_dict["l_mos"] = l_mos
l_total.backward()
self.optimizer.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
# log metrics in training batch
pred_score = self.output_mos.squeeze(1).cpu().detach().numpy()
gt_mos = self.gt_mos.squeeze(1).cpu().detach().numpy()
for name, opt_ in self.opt["val"]["metrics"].items():
self.log_dict[f"train_metrics/{name}"] = calculate_metric(
[pred_score, gt_mos], opt_
)
| 2,311 | 31.111111 | 82 | py |
BVQI | BVQI-master/pyiqa/utils/download_util.py | import math
import os
from urllib.parse import urlparse
import requests
from torch.hub import download_url_to_file, get_dir
from tqdm import tqdm
from .misc import sizeof_fmt
def download_file_from_google_drive(file_id, save_path):
"""Download files from google drive.
Ref:
https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive # noqa E501
Args:
file_id (str): File id.
save_path (str): Save path.
"""
session = requests.Session()
URL = "https://docs.google.com/uc?export=download"
params = {"id": file_id}
response = session.get(URL, params=params, stream=True)
token = get_confirm_token(response)
if token:
params["confirm"] = token
response = session.get(URL, params=params, stream=True)
# get file size
response_file_size = session.get(
URL, params=params, stream=True, headers={"Range": "bytes=0-2"}
)
if "Content-Range" in response_file_size.headers:
file_size = int(response_file_size.headers["Content-Range"].split("/")[1])
else:
file_size = None
save_response_content(response, save_path, file_size)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def save_response_content(response, destination, file_size=None, chunk_size=32768):
if file_size is not None:
pbar = tqdm(total=math.ceil(file_size / chunk_size), unit="chunk")
readable_file_size = sizeof_fmt(file_size)
else:
pbar = None
with open(destination, "wb") as f:
downloaded_size = 0
for chunk in response.iter_content(chunk_size):
downloaded_size += chunk_size
if pbar is not None:
pbar.update(1)
pbar.set_description(
f"Download {sizeof_fmt(downloaded_size)} / {readable_file_size}"
)
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if pbar is not None:
pbar.close()
def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
"""Load file form http url, will download models if necessary.
Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
Args:
url (str): URL to be downloaded.
model_dir (str): The path to save the downloaded model. Should be a full path. If None, use pytorch hub_dir.
Default: None.
progress (bool): Whether to show the download progress. Default: True.
file_name (str): The downloaded file name. If None, use the file name in the url. Default: None.
Returns:
str: The path to the downloaded file.
"""
if model_dir is None: # use the pytorch hub_dir
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, "checkpoints")
os.makedirs(model_dir, exist_ok=True)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if file_name is not None:
filename = file_name
cached_file = os.path.abspath(os.path.join(model_dir, filename))
if not os.path.exists(cached_file):
print(f'Downloading: "{url}" to {cached_file}\n')
download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
return cached_file
| 3,398 | 31.371429 | 116 | py |
BVQI | BVQI-master/pyiqa/utils/misc.py | import os
import random
import shutil
import time
from os import path as osp
import numpy as np
import torch
from .dist_util import master_only
def set_random_seed(seed):
"""Set random seeds."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_time_str():
return time.strftime("%Y%m%d_%H%M%S", time.localtime())
def mkdir_and_rename(path):
"""mkdirs. If path exists, rename it with timestamp, create a new one, and move it to archive folder.
Args:
path (str): Folder path.
"""
if osp.exists(path):
new_name = path + "_archived_" + get_time_str()
new_name = new_name.replace("tb_logger", "tb_logger_archived")
print(f"Path already exists. Rename it to {new_name}", flush=True)
os.rename(path, new_name)
os.makedirs(path, exist_ok=True)
@master_only
def make_exp_dirs(opt):
"""Make dirs for experiments."""
path_opt = opt["path"].copy()
if opt["is_train"]:
mkdir_and_rename(path_opt.pop("experiments_root"))
else:
mkdir_and_rename(path_opt.pop("results_root"))
for key, path in path_opt.items():
if (
("strict_load" in key)
or ("pretrain_network" in key)
or ("resume" in key)
or ("param_key" in key)
):
continue
else:
os.makedirs(path, exist_ok=True)
def scandir(dir_path, suffix=None, recursive=False, full_path=False):
"""Scan a directory to find the interested files.
Args:
dir_path (str): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
directory. Default: False.
full_path (bool, optional): If set to True, include the dir_path.
Default: False.
Returns:
A generator for all the interested files with relative paths.
"""
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
root = dir_path
def _scandir(dir_path, suffix, recursive):
for entry in os.scandir(dir_path):
if not entry.name.startswith(".") and entry.is_file():
if full_path:
return_path = entry.path
else:
return_path = osp.relpath(entry.path, root)
if suffix is None:
yield return_path
elif return_path.endswith(suffix):
yield return_path
else:
if recursive:
yield from _scandir(entry.path, suffix=suffix, recursive=recursive)
else:
continue
return _scandir(dir_path, suffix=suffix, recursive=recursive)
def check_resume(opt, resume_iter):
"""Check resume states and pretrain_network paths.
Args:
opt (dict): Options.
resume_iter (int): Resume iteration.
"""
if opt["path"]["resume_state"]:
# get all the networks
networks = [key for key in opt.keys() if key.startswith("network_")]
flag_pretrain = False
for network in networks:
if opt["path"].get(f"pretrain_{network}") is not None:
flag_pretrain = True
if flag_pretrain:
print("pretrain_network path will be ignored during resuming.")
# set pretrained model paths
for network in networks:
name = f"pretrain_{network}"
basename = network.replace("network_", "")
if opt["path"].get("ignore_resume_networks") is None or (
network not in opt["path"]["ignore_resume_networks"]
):
opt["path"][name] = osp.join(
opt["path"]["models"], f"net_{basename}_{resume_iter}.pth"
)
print(f"Set {name} to {opt['path'][name]}")
# change param_key to params in resume
param_keys = [key for key in opt["path"].keys() if key.startswith("param_key")]
for param_key in param_keys:
if opt["path"][param_key] == "params_ema":
opt["path"][param_key] = "params"
print(f"Set {param_key} to params")
def sizeof_fmt(size, suffix="B"):
"""Get human readable file size.
Args:
size (int): File size.
suffix (str): Suffix. Default: 'B'.
Return:
str: Formatted file siz.
"""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(size) < 1024.0:
return f"{size:3.1f} {unit}{suffix}"
size /= 1024.0
return f"{size:3.1f} Y{suffix}"
| 4,827 | 30.555556 | 105 | py |
BVQI | BVQI-master/pyiqa/utils/logger.py | import datetime
import logging
import time
from .dist_util import get_dist_info, master_only
initialized_logger = {}
class AvgTimer:
def __init__(self, window=200):
self.window = window # average window
self.current_time = 0
self.total_time = 0
self.count = 0
self.avg_time = 0
self.start()
def start(self):
self.start_time = self.tic = time.time()
def record(self):
self.count += 1
self.toc = time.time()
self.current_time = self.toc - self.tic
self.total_time += self.current_time
# calculate average time
self.avg_time = self.total_time / self.count
# reset
if self.count > self.window:
self.count = 0
self.total_time = 0
self.tic = time.time()
def get_current_time(self):
return self.current_time
def get_avg_time(self):
return self.avg_time
class MessageLogger:
"""Message logger for printing.
Args:
opt (dict): Config. It contains the following keys:
name (str): Exp name.
logger (dict): Contains 'print_freq' (str) for logger interval.
train (dict): Contains 'total_iter' (int) for total iters.
use_tb_logger (bool): Use tensorboard logger.
start_iter (int): Start iter. Default: 1.
tb_logger (obj:`tb_logger`): Tensorboard logger. Default: None.
"""
def __init__(self, opt, start_iter=1, tb_logger=None):
self.exp_name = opt["name"]
self.interval = opt["logger"]["print_freq"]
self.start_iter = start_iter
self.max_iters = opt["train"]["total_iter"]
self.use_tb_logger = opt["logger"]["use_tb_logger"]
self.tb_logger = tb_logger
self.start_time = time.time()
self.logger = get_root_logger()
def reset_start_time(self):
self.start_time = time.time()
@master_only
def __call__(self, log_vars):
"""Format logging message.
Args:
log_vars (dict): It contains the following keys:
epoch (int): Epoch number.
iter (int): Current iter.
lrs (list): List for learning rates.
time (float): Iter time.
data_time (float): Data time for each iter.
"""
# epoch, iter, learning rates
epoch = log_vars.pop("epoch")
current_iter = log_vars.pop("iter")
lrs = log_vars.pop("lrs")
message = (
f"[{self.exp_name[:5]}..][epoch:{epoch:3d}, iter:{current_iter:8,d}, lr:("
)
for v in lrs:
message += f"{v:.3e},"
message += ")] "
# time and estimated time
if "time" in log_vars.keys():
iter_time = log_vars.pop("time")
data_time = log_vars.pop("data_time")
total_time = time.time() - self.start_time
time_sec_avg = total_time / (current_iter - self.start_iter + 1)
eta_sec = time_sec_avg * (self.max_iters - current_iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
message += f"[eta: {eta_str}, "
message += f"time (data): {iter_time:.3f} ({data_time:.3f})] "
# other items, especially losses
for k, v in log_vars.items():
message += f"{k}: {v:.4e} "
# tensorboard logger
if self.use_tb_logger and "debug" not in self.exp_name:
if k.startswith("l_"):
self.tb_logger.add_scalar(f"losses/{k}", v, current_iter)
else:
self.tb_logger.add_scalar(k, v, current_iter)
self.logger.info(message)
@master_only
def init_tb_logger(log_dir):
from torch.utils.tensorboard import SummaryWriter
tb_logger = SummaryWriter(log_dir=log_dir)
return tb_logger
@master_only
def init_wandb_logger(opt):
"""We now only use wandb to sync tensorboard log."""
import wandb
logger = get_root_logger()
project = opt["logger"]["wandb"]["project"]
resume_id = opt["logger"]["wandb"].get("resume_id")
if resume_id:
wandb_id = resume_id
resume = "allow"
logger.warning(f"Resume wandb logger with id={wandb_id}.")
else:
wandb_id = wandb.util.generate_id()
resume = "never"
wandb.init(
id=wandb_id,
resume=resume,
name=opt["name"],
config=opt,
project=project,
sync_tensorboard=True,
)
logger.info(f"Use wandb logger with id={wandb_id}; project={project}.")
def get_root_logger(logger_name="pyiqa", log_level=logging.INFO, log_file=None):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added.
Args:
logger_name (str): root logger name. Default: 'basicsr'.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = logging.getLogger(logger_name)
# if the logger has been initialized, just return it
if logger_name in initialized_logger:
return logger
format_str = "%(asctime)s %(levelname)s: %(message)s"
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(format_str))
logger.addHandler(stream_handler)
logger.propagate = False
rank, _ = get_dist_info()
if rank != 0:
logger.setLevel("ERROR")
elif log_file is not None:
logger.setLevel(log_level)
# add file handler
file_handler = logging.FileHandler(log_file, "w")
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(log_level)
logger.addHandler(file_handler)
initialized_logger[logger_name] = True
return logger
def get_env_info():
"""Get environment information.
Currently, only log the software version.
"""
import torch
import torchvision
# from basicsr.version import __version__
# msg = r"""
# ____ _ _____ ____
# / __ ) ____ _ _____ (_)_____/ ___/ / __ \
# / __ |/ __ `// ___// // ___/\__ \ / /_/ /
# / /_/ // /_/ /(__ )/ // /__ ___/ // _, _/
# /_____/ \__,_//____//_/ \___//____//_/ |_|
# ______ __ __ __ __
# / ____/____ ____ ____/ / / / __ __ _____ / /__ / /
# / / __ / __ \ / __ \ / __ / / / / / / // ___// //_/ / /
# / /_/ // /_/ // /_/ // /_/ / / /___/ /_/ // /__ / /< /_/
# \____/ \____/ \____/ \____/ /_____/\____/ \___//_/|_| (_)
# """
msg = (
"\nVersion Information: "
# f'\n\tBasicSR: {__version__}'
f"\n\tPyTorch: {torch.__version__}"
f"\n\tTorchVision: {torchvision.__version__}"
)
return msg
| 7,216 | 30.933628 | 86 | py |
BVQI | BVQI-master/pyiqa/utils/img_util.py | import io
import math
import os
import cv2
import numpy as np
import torch
import torchvision.transforms.functional as TF
from PIL import Image
from torchvision.utils import make_grid
IMG_EXTENSIONS = [
".jpg",
".JPG",
".jpeg",
".JPEG",
".png",
".PNG",
".ppm",
".PPM",
".bmp",
".BMP",
".tif",
".TIF",
".tiff",
".TIFF",
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def imread2tensor(img_source, rgb=False):
"""Read image to tensor.
Args:
img_source (str, bytes, or PIL.Image): image filepath string, image contents as a bytearray or a PIL Image instance
rgb: convert input to RGB if true
"""
print("This is also the one")
if type(img_source) == bytes:
img = Image.open(io.BytesIO(img_source))
elif type(img_source) == str:
assert is_image_file(img_source), f"{img_source} is not a valid image file."
img = Image.open(img_source)
elif type(img_source) == Image.Image:
img = img_source
else:
raise Exception("Unsupported source type")
if rgb:
img = img.convert("RGB")
img_tensor = TF.to_tensor(img)
# print(img_tensor.size())
# torch.save(img_tensor, "./myTensor.pt")
return img_tensor
def img2tensor(imgs, bgr2rgb=True, float32=True):
"""Numpy array to tensor.
Args:
imgs (list[ndarray] | ndarray): Input images.
bgr2rgb (bool): Whether to change bgr to rgb.
float32 (bool): Whether to change to float32.
Returns:
list[tensor] | tensor: Tensor images. If returned results only have
one element, just return tensor.
"""
def _totensor(img, bgr2rgb, float32):
if img.shape[2] == 3 and bgr2rgb:
if img.dtype == "float64":
img = img.astype("float32")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img.transpose(2, 0, 1))
if float32:
img = img.float()
return img
if isinstance(imgs, list):
return [_totensor(img, bgr2rgb, float32) for img in imgs]
else:
return _totensor(imgs, bgr2rgb, float32)
def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
"""Convert torch Tensors into image numpy arrays.
After clamping to [min, max], values will be normalized to [0, 1].
Args:
tensor (Tensor or list[Tensor]): Accept shapes:
1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
2) 3D Tensor of shape (3/1 x H x W);
3) 2D Tensor of shape (H x W).
Tensor channel should be in RGB order.
rgb2bgr (bool): Whether to change rgb to bgr.
out_type (numpy type): output types. If ``np.uint8``, transform outputs
to uint8 type with range [0, 255]; otherwise, float type with
range [0, 1]. Default: ``np.uint8``.
min_max (tuple[int]): min and max values for clamp.
Returns:
(Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
shape (H x W). The channel order is BGR.
"""
if not (
torch.is_tensor(tensor)
or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))
):
raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}")
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
_tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(
_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False
).numpy()
img_np = img_np.transpose(1, 2, 0)
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = img_np.transpose(1, 2, 0)
if img_np.shape[2] == 1: # gray image
img_np = np.squeeze(img_np, axis=2)
else:
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise TypeError(
f"Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}"
)
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
if len(result) == 1:
result = result[0]
return result
def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1)):
"""This implementation is slightly faster than tensor2img.
It now only supports torch tensor with shape (1, c, h, w).
Args:
tensor (Tensor): Now only support torch tensor with (1, c, h, w).
rgb2bgr (bool): Whether to change rgb to bgr. Default: True.
min_max (tuple[int]): min and max values for clamp.
"""
output = tensor.squeeze(0).detach().clamp_(*min_max).permute(1, 2, 0)
output = (output - min_max[0]) / (min_max[1] - min_max[0]) * 255
output = output.type(torch.uint8).cpu().numpy()
if rgb2bgr:
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
def imfrombytes(content, flag="color", float32=False):
"""Read an image from bytes.
Args:
content (bytes): Image bytes got from files or other streams.
flag (str): Flags specifying the color type of a loaded image,
candidates are `color`, `grayscale` and `unchanged`.
float32 (bool): Whether to change to float32., If True, will also norm
to [0, 1]. Default: False.
Returns:
ndarray: Loaded image array.
"""
img_np = np.frombuffer(content, np.uint8)
imread_flags = {
"color": cv2.IMREAD_COLOR,
"grayscale": cv2.IMREAD_GRAYSCALE,
"unchanged": cv2.IMREAD_UNCHANGED,
}
img = cv2.imdecode(img_np, imread_flags[flag])
if float32:
img = img.astype(np.float32) / 255.0
return img
def imwrite(img, file_path, params=None, auto_mkdir=True):
"""Write image to file.
Args:
img (ndarray): Image array to be written.
file_path (str): Image file path.
params (None or list): Same as opencv's :func:`imwrite` interface.
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
whether to create it automatically.
Returns:
bool: Successful or not.
"""
if auto_mkdir:
dir_name = os.path.abspath(os.path.dirname(file_path))
os.makedirs(dir_name, exist_ok=True)
ok = cv2.imwrite(file_path, img, params)
if not ok:
raise IOError("Failed in writing images.")
def crop_border(imgs, crop_border):
"""Crop borders of images.
Args:
imgs (list[ndarray] | ndarray): Images with shape (h, w, c).
crop_border (int): Crop border for each end of height and weight.
Returns:
list[ndarray]: Cropped images.
"""
if crop_border == 0:
return imgs
else:
if isinstance(imgs, list):
return [
v[crop_border:-crop_border, crop_border:-crop_border, ...] for v in imgs
]
else:
return imgs[crop_border:-crop_border, crop_border:-crop_border, ...]
| 7,559 | 31.033898 | 123 | py |
BVQI | BVQI-master/pyiqa/utils/options.py | import argparse
import random
from collections import OrderedDict
from os import path as osp
import torch
import yaml
from pyiqa.utils import set_random_seed
from pyiqa.utils.dist_util import get_dist_info, init_dist, master_only
def ordered_yaml():
"""Support OrderedDict for yaml.
Returns:
yaml Loader and Dumper.
"""
try:
from yaml import CDumper as Dumper
from yaml import CLoader as Loader
except ImportError:
from yaml import Dumper, Loader
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
Dumper.add_representer(OrderedDict, dict_representer)
Loader.add_constructor(_mapping_tag, dict_constructor)
return Loader, Dumper
def dict2str(opt, indent_level=1):
"""dict to string for printing options.
Args:
opt (dict): Option dict.
indent_level (int): Indent level. Default: 1.
Return:
(str): Option string for printing.
"""
msg = "\n"
for k, v in opt.items():
if isinstance(v, dict):
msg += " " * (indent_level * 2) + k + ":["
msg += dict2str(v, indent_level + 1)
msg += " " * (indent_level * 2) + "]\n"
else:
msg += " " * (indent_level * 2) + k + ": " + str(v) + "\n"
return msg
def _postprocess_yml_value(value):
# None
if value == "~" or value.lower() == "none":
return None
# bool
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
# !!float number
if value.startswith("!!float"):
return float(value.replace("!!float", ""))
# number
if value.isdigit():
return int(value)
elif value.replace(".", "", 1).isdigit() and value.count(".") < 2:
return float(value)
# list
if value.startswith("["):
return eval(value)
# str
return value
def make_paths(opt, root_path):
if opt["is_train"]:
experiments_root = osp.join(root_path, "experiments", opt["name"])
opt["path"]["experiments_root"] = experiments_root
opt["path"]["models"] = osp.join(experiments_root, "models")
opt["path"]["training_states"] = osp.join(experiments_root, "training_states")
opt["path"]["log"] = experiments_root
opt["path"]["visualization"] = osp.join(experiments_root, "visualization")
# change some options for debug mode
if "debug" in opt["name"]:
if "val" in opt:
opt["val"]["val_freq"] = 7
opt["logger"]["print_freq"] = 1
opt["logger"]["save_checkpoint_freq"] = 7
else: # test
results_root = osp.join(root_path, "results", opt["name"])
opt["path"]["results_root"] = results_root
opt["path"]["log"] = results_root
opt["path"]["visualization"] = osp.join(results_root, "visualization")
def parse_options(root_path, is_train=True):
parser = argparse.ArgumentParser()
parser.add_argument(
"-opt", type=str, required=True, help="Path to option YAML file."
)
parser.add_argument(
"--launcher",
choices=["none", "pytorch", "slurm"],
default="none",
help="job launcher",
)
parser.add_argument("--auto_resume", action="store_true")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--force_yml",
nargs="+",
default=None,
help="Force to update yml files. Examples: train:ema_decay=0.999",
)
args = parser.parse_args()
# parse yml to dict
with open(args.opt, mode="r") as f:
opt = yaml.load(f, Loader=ordered_yaml()[0])
# distributed settings
if args.launcher == "none":
opt["dist"] = False
print("Disable distributed.", flush=True)
else:
opt["dist"] = True
if args.launcher == "slurm" and "dist_params" in opt:
init_dist(args.launcher, **opt["dist_params"])
else:
init_dist(args.launcher)
opt["rank"], opt["world_size"] = get_dist_info()
# random seed
seed = opt.get("manual_seed")
if seed is None:
seed = random.randint(1, 10000)
opt["manual_seed"] = seed
set_random_seed(seed + opt["rank"])
# force to update yml options
if args.force_yml is not None:
for entry in args.force_yml:
# now do not support creating new keys
keys, value = entry.split("=")
keys, value = keys.strip(), value.strip()
value = _postprocess_yml_value(value)
eval_str = "opt"
for key in keys.split(":"):
eval_str += f'["{key}"]'
eval_str += "=value"
# using exec function
exec(eval_str)
opt["auto_resume"] = args.auto_resume
opt["is_train"] = is_train
# debug setting
if args.debug and not opt["name"].startswith("debug"):
opt["name"] = "debug_" + opt["name"]
if opt["num_gpu"] == "auto":
opt["num_gpu"] = torch.cuda.device_count()
# datasets
for phase, dataset in opt["datasets"].items():
# for multiple datasets, e.g., val_1, val_2; test_1, test_2
phase = phase.split("_")[0]
dataset["phase"] = phase
if "scale" in opt:
dataset["scale"] = opt["scale"]
if dataset.get("dataroot_gt") is not None:
dataset["dataroot_gt"] = osp.expanduser(dataset["dataroot_gt"])
if dataset.get("dataroot_lq") is not None:
dataset["dataroot_lq"] = osp.expanduser(dataset["dataroot_lq"])
# paths
for key, val in opt["path"].items():
if (val is not None) and ("resume_state" in key or "pretrain_network" in key):
opt["path"][key] = osp.expanduser(val)
make_paths(opt, root_path)
return opt, args
@master_only
def copy_opt_file(opt_file, experiments_root):
# copy the yml file to the experiment root
import sys
import time
from shutil import copyfile
cmd = " ".join(sys.argv)
filename = osp.join(experiments_root, osp.basename(opt_file))
copyfile(opt_file, filename)
with open(filename, "r+") as f:
lines = f.readlines()
lines.insert(0, f"# GENERATE TIME: {time.asctime()}\n# CMD:\n# {cmd}\n\n")
f.seek(0)
f.writelines(lines)
| 6,559 | 29.943396 | 86 | py |
BVQI | BVQI-master/pyiqa/utils/color_util.py | r"""Color space conversion functions
Created by: https://github.com/photosynthesis-team/piq/blob/master/piq/functional/colour_conversion.py
Modified by: Chaofeng Chen (https://github.com/chaofengc)
"""
from typing import Dict, Union
import torch
def safe_frac_pow(x: torch.Tensor, p) -> torch.Tensor:
EPS = torch.finfo(x.dtype).eps
return torch.sign(x) * torch.abs(x + EPS).pow(p)
def to_y_channel(
img: torch.Tensor, out_data_range: float = 1.0, color_space: str = "yiq"
) -> torch.Tensor:
r"""Change to Y channel
Args:
image tensor: tensor with shape (N, 3, H, W) in range [0, 1].
Returns:
image tensor: Y channel of the input tensor
"""
assert (
img.ndim == 4 and img.shape[1] == 3
), "input image tensor should be RGB image batches with shape (N, 3, H, W)"
color_space = color_space.lower()
if color_space == "yiq":
img = rgb2yiq(img)
elif color_space == "ycbcr":
img = rgb2ycbcr(img)
elif color_space == "lhm":
img = rgb2lhm(img)
out_img = img[:, [0], :, :] * out_data_range
if out_data_range >= 255:
# differentiable round with pytorch
out_img = out_img - out_img.detach() + out_img.round()
return out_img
def rgb2ycbcr(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of YCbCr images
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
Args:
x: Batch of images with shape (N, 3, H, W). RGB color space, range [0, 1].
Returns:
Batch of images with shape (N, 3, H, W). YCbCr color space.
"""
weights_rgb_to_ycbcr = torch.tensor(
[
[65.481, -37.797, 112.0],
[128.553, -74.203, -93.786],
[24.966, 112.0, -18.214],
]
).to(x)
bias_rgb_to_ycbcr = torch.tensor([16, 128, 128]).view(1, 3, 1, 1).to(x)
x_ycbcr = (
torch.matmul(x.permute(0, 2, 3, 1), weights_rgb_to_ycbcr).permute(0, 3, 1, 2)
+ bias_rgb_to_ycbcr
)
x_ycbcr = x_ycbcr / 255.0
return x_ycbcr
def ycbcr2rgb(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of YCbCr images to a batch of RGB images
It implements the inversion of the above rgb2ycbcr function.
Args:
x: Batch of images with shape (N, 3, H, W). YCbCr color space, range [0, 1].
Returns:
Batch of images with shape (N, 3, H, W). RGB color space.
"""
x = x * 255.0
weights_ycbcr_to_rgb = (
255.0
* torch.tensor(
[
[0.00456621, 0.00456621, 0.00456621],
[0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0],
]
).to(x)
)
bias_ycbcr_to_rgb = (
torch.tensor([-222.921, 135.576, -276.836]).view(1, 3, 1, 1).to(x)
)
x_rgb = (
torch.matmul(x.permute(0, 2, 3, 1), weights_ycbcr_to_rgb).permute(0, 3, 1, 2)
+ bias_ycbcr_to_rgb
)
x_rgb = x_rgb / 255.0
return x_rgb
def rgb2lmn(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of LMN images
Args:
x: Batch of images with shape (N, 3, H, W). RGB colour space.
Returns:
Batch of images with shape (N, 3, H, W). LMN colour space.
"""
weights_rgb_to_lmn = (
torch.tensor([[0.06, 0.63, 0.27], [0.30, 0.04, -0.35], [0.34, -0.6, 0.17]])
.t()
.to(x)
)
x_lmn = torch.matmul(x.permute(0, 2, 3, 1), weights_rgb_to_lmn).permute(0, 3, 1, 2)
return x_lmn
def rgb2xyz(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of XYZ images
Args:
x: Batch of images with shape (N, 3, H, W). RGB colour space.
Returns:
Batch of images with shape (N, 3, H, W). XYZ colour space.
"""
mask_below = (x <= 0.04045).to(x)
mask_above = (x > 0.04045).to(x)
tmp = x / 12.92 * mask_below + torch.pow((x + 0.055) / 1.055, 2.4) * mask_above
weights_rgb_to_xyz = torch.tensor(
[
[0.4124564, 0.3575761, 0.1804375],
[0.2126729, 0.7151522, 0.0721750],
[0.0193339, 0.1191920, 0.9503041],
]
).to(x)
x_xyz = torch.matmul(tmp.permute(0, 2, 3, 1), weights_rgb_to_xyz.t()).permute(
0, 3, 1, 2
)
return x_xyz
def xyz2lab(
x: torch.Tensor, illuminant: str = "D50", observer: str = "2"
) -> torch.Tensor:
r"""Convert a batch of XYZ images to a batch of LAB images
Args:
x: Batch of images with shape (N, 3, H, W). XYZ colour space.
illuminant: {“A”, “D50”, “D55”, “D65”, “D75”, “E”}, optional. The name of the illuminant.
observer: {“2”, “10”}, optional. The aperture angle of the observer.
Returns:
Batch of images with shape (N, 3, H, W). LAB colour space.
"""
epsilon = 0.008856
kappa = 903.3
illuminants: Dict[str, Dict] = {
"A": {
"2": (1.098466069456375, 1, 0.3558228003436005),
"10": (1.111420406956693, 1, 0.3519978321919493),
},
"D50": {
"2": (0.9642119944211994, 1, 0.8251882845188288),
"10": (0.9672062750333777, 1, 0.8142801513128616),
},
"D55": {
"2": (0.956797052643698, 1, 0.9214805860173273),
"10": (0.9579665682254781, 1, 0.9092525159847462),
},
"D65": {
"2": (0.95047, 1.0, 1.08883), # This was: `lab_ref_white`
"10": (0.94809667673716, 1, 1.0730513595166162),
},
"D75": {
"2": (0.9497220898840717, 1, 1.226393520724154),
"10": (0.9441713925645873, 1, 1.2064272211720228),
},
"E": {"2": (1.0, 1.0, 1.0), "10": (1.0, 1.0, 1.0)},
}
illuminants_to_use = (
torch.tensor(illuminants[illuminant][observer]).to(x).view(1, 3, 1, 1)
)
tmp = x / illuminants_to_use
mask_below = tmp <= epsilon
mask_above = tmp > epsilon
tmp = (
safe_frac_pow(tmp, 1.0 / 3.0) * mask_above
+ (kappa * tmp + 16.0) / 116.0 * mask_below
)
weights_xyz_to_lab = torch.tensor(
[[0, 116.0, 0], [500.0, -500.0, 0], [0, 200.0, -200.0]]
).to(x)
bias_xyz_to_lab = torch.tensor([-16.0, 0.0, 0.0]).to(x).view(1, 3, 1, 1)
x_lab = (
torch.matmul(tmp.permute(0, 2, 3, 1), weights_xyz_to_lab.t()).permute(
0, 3, 1, 2
)
+ bias_xyz_to_lab
)
return x_lab
def rgb2lab(x: torch.Tensor, data_range: Union[int, float] = 255) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of LAB images
Args:
x: Batch of images with shape (N, 3, H, W). RGB colour space.
data_range: dynamic range of the input image.
Returns:
Batch of images with shape (N, 3, H, W). LAB colour space.
"""
return xyz2lab(rgb2xyz(x / float(data_range)))
def rgb2yiq(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of YIQ images
Args:
x: Batch of images with shape (N, 3, H, W). RGB colour space.
Returns:
Batch of images with shape (N, 3, H, W). YIQ colour space.
"""
yiq_weights = (
torch.tensor(
[
[0.299, 0.587, 0.114],
[0.5959, -0.2746, -0.3213],
[0.2115, -0.5227, 0.3112],
]
)
.t()
.to(x)
)
x_yiq = torch.matmul(x.permute(0, 2, 3, 1), yiq_weights).permute(0, 3, 1, 2)
return x_yiq
def rgb2lhm(x: torch.Tensor) -> torch.Tensor:
r"""Convert a batch of RGB images to a batch of LHM images
Args:
x: Batch of images with shape (N, 3, H, W). RGB colour space.
Returns:
Batch of images with shape (N, 3, H, W). LHM colour space.
Reference:
https://arxiv.org/pdf/1608.07433.pdf
"""
lhm_weights = (
torch.tensor([[0.2989, 0.587, 0.114], [0.3, 0.04, -0.35], [0.34, -0.6, 0.17]])
.t()
.to(x)
)
x_lhm = torch.matmul(x.permute(0, 2, 3, 1), lhm_weights).permute(0, 3, 1, 2)
return x_lhm
| 8,153 | 28.759124 | 102 | py |
BVQI | BVQI-master/pyiqa/utils/dist_util.py | # Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py # noqa: E501
import functools
import os
import subprocess
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
def init_dist(launcher, backend="nccl", **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method("spawn")
if launcher == "pytorch":
_init_dist_pytorch(backend, **kwargs)
elif launcher == "slurm":
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError(f"Invalid launcher type: {launcher}")
def _init_dist_pytorch(backend, **kwargs):
rank = int(os.environ["RANK"])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
def _init_dist_slurm(backend, port=None):
"""Initialize slurm distributed training environment.
If argument ``port`` is not specified, then the master port will be system
environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
environment variable, then a default port ``29500`` will be used.
Args:
backend (str): Backend of torch.distributed.
port (int, optional): Master port. Defaults to None.
"""
proc_id = int(os.environ["SLURM_PROCID"])
ntasks = int(os.environ["SLURM_NTASKS"])
node_list = os.environ["SLURM_NODELIST"]
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput(f"scontrol show hostname {node_list} | head -n1")
# specify master port
if port is not None:
os.environ["MASTER_PORT"] = str(port)
elif "MASTER_PORT" in os.environ:
pass # use MASTER_PORT in the environment variable
else:
# 29500 is torch.distributed default port
os.environ["MASTER_PORT"] = "29500"
os.environ["MASTER_ADDR"] = addr
os.environ["WORLD_SIZE"] = str(ntasks)
os.environ["LOCAL_RANK"] = str(proc_id % num_gpus)
os.environ["RANK"] = str(proc_id)
dist.init_process_group(backend=backend)
def get_dist_info():
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
| 2,608 | 30.433735 | 102 | py |
BVQI | BVQI-master/pyiqa/data/livechallenge_dataset.py | import os
import pickle
import numpy as np
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class LIVEChallengeDataset(data.Dataset):
"""The LIVE Challenge Dataset introduced by
D. Ghadiyaram and A.C. Bovik,
"Massive Online Crowdsourced Study of Subjective and Objective Picture Quality,"
IEEE Transactions on Image Processing, 2016
url: https://live.ece.utexas.edu/research/ChallengeDB/index.html
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(LIVEChallengeDataset, self).__init__()
self.opt = opt
target_img_folder = os.path.join(opt["dataroot_target"], "Images")
self.paths_mos = read_meta_info_file(target_img_folder, opt["meta_info_file"])
# remove first 7 training images as previous works
self.paths_mos = self.paths_mos[7:]
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][opt["phase"]]
self.paths_mos = [self.paths_mos[i] for i in splits]
transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
transform_list += transform_mapping(k, v)
img_range = opt.get("img_range", 1.0)
transform_list += [
tf.ToTensor(),
tf.Lambda(lambda x: x * img_range),
]
self.trans = tf.Compose(transform_list)
def __getitem__(self, index):
img_path = self.paths_mos[index][0]
mos_label = self.paths_mos[index][1]
img_pil = Image.open(img_path)
img_tensor = self.trans(img_pil)
mos_label_tensor = torch.Tensor([mos_label])
return {"img": img_tensor, "mos_label": mos_label_tensor, "img_path": img_path}
def __len__(self):
return len(self.paths_mos)
| 2,524 | 33.589041 | 87 | py |
BVQI | BVQI-master/pyiqa/data/general_nr_dataset.py | import pickle
import cv2
import numpy as np
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import PairedToTensor, augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class GeneralNRDataset(data.Dataset):
"""General No Reference dataset with meta info file.
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(GeneralNRDataset, self).__init__()
self.opt = opt
if opt.get("override_phase", None) is None:
self.phase = opt["phase"]
else:
self.phase = opt["override_phase"]
target_img_folder = opt["dataroot_target"]
self.paths_mos = read_meta_info_file(target_img_folder, opt["meta_info_file"])
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][self.phase]
self.paths_mos = [self.paths_mos[i] for i in splits]
dmos_max = opt.get("dmos_max", 0.0)
if dmos_max:
self.use_dmos = True
self.dmos_max = opt.get("dmos_max")
else:
self.use_dmos = False
self.mos_max = opt.get("mos_max", 1.0)
transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
transform_list += transform_mapping(k, v)
self.img_range = opt.get("img_range", 1.0)
transform_list += [
PairedToTensor(),
]
self.trans = tf.Compose(transform_list)
def __getitem__(self, index):
img_path = self.paths_mos[index][0]
mos_label = self.paths_mos[index][1]
img_pil = Image.open(img_path).convert("RGB")
img_tensor = self.trans(img_pil) * self.img_range
if self.use_dmos:
mos_label = self.dmos_max - mos_label
else:
mos_label /= self.mos_max
mos_label_tensor = torch.Tensor([mos_label])
return {"img": img_tensor, "mos_label": mos_label_tensor, "img_path": img_path}
def __len__(self):
return len(self.paths_mos)
| 2,691 | 31.433735 | 87 | py |
BVQI | BVQI-master/pyiqa/data/multiscale_trans_util.py | r"""Preprocessing utils for Multiscale Transformer
Reference: https://github.com/google-research/google-research/blob/5c622d523c/musiq/model/preprocessing.py
Modified: Chaofeng Chen (https://github.com/chaofengc)
"""
import math
from os import path as osp
from unittest.mock import patch
import numpy as np
import torch
from torch.nn import functional as F
def extract_image_patches(x, kernel, stride=1, dilation=1):
"""
Ref: https://stackoverflow.com/a/65886666
"""
# Do TF 'SAME' Padding
b, c, h, w = x.shape
h2 = math.ceil(h / stride)
w2 = math.ceil(w / stride)
pad_row = (h2 - 1) * stride + (kernel - 1) * dilation + 1 - h
pad_col = (w2 - 1) * stride + (kernel - 1) * dilation + 1 - w
x = F.pad(
x, (pad_col // 2, pad_col - pad_col // 2, pad_row // 2, pad_row - pad_row // 2)
)
# Extract patches
patches = F.unfold(x, kernel, dilation, stride=stride)
return patches
def _ceil_divide_int(x, y):
"""Returns ceil(x / y) as int"""
return int(math.ceil(x / y))
def resize_preserve_aspect_ratio(image, h, w, longer_side_length):
"""Aspect-ratio-preserving resizing with tf.image.ResizeMethod.GAUSSIAN.
Args:
image: The image tensor (n_crops, c, h, w).
h: Height of the input image.
w: Width of the input image.
longer_side_length: The length of the longer side after resizing.
Returns:
A tuple of [Image after resizing, Resized height, Resized width].
"""
# Computes the height and width after aspect-ratio-preserving resizing.
ratio = longer_side_length / max(h, w)
rh = round(h * ratio)
rw = round(w * ratio)
resized = F.interpolate(image, (rh, rw), mode="bicubic", align_corners=False)
return resized, rh, rw
def _pad_or_cut_to_max_seq_len(x, max_seq_len):
"""Pads (or cuts) patch tensor `max_seq_len`.
Args:
x: input tensor of shape (n_crops, c, num_patches).
max_seq_len: max sequence length.
Returns:
The padded or cropped tensor of shape (n_crops, c, max_seq_len).
"""
# Shape of x (n_crops, c, num_patches)
# Padding makes sure that # patches > max_seq_length. Note that it also
# makes the input mask zero for shorter input.
n_crops, c, num_patches = x.shape
paddings = torch.zeros((n_crops, c, max_seq_len)).to(x)
x = torch.cat([x, paddings], dim=-1)
x = x[:, :, :max_seq_len]
return x
def get_hashed_spatial_pos_emb_index(grid_size, count_h, count_w):
"""Get hased spatial pos embedding index for each patch.
The size H x W is hashed to grid_size x grid_size.
Args:
grid_size: grid size G for the hashed-based spatial positional embedding.
count_h: number of patches in each row for the image.
count_w: number of patches in each column for the image.
Returns:
hashed position of shape (1, HxW). Each value corresponded to the hashed
position index in [0, grid_size x grid_size).
"""
pos_emb_grid = torch.arange(grid_size).float()
pos_emb_hash_w = pos_emb_grid.reshape(1, 1, grid_size)
pos_emb_hash_w = F.interpolate(pos_emb_hash_w, (count_w), mode="nearest")
pos_emb_hash_w = pos_emb_hash_w.repeat(1, count_h, 1)
pos_emb_hash_h = pos_emb_grid.reshape(1, 1, grid_size)
pos_emb_hash_h = F.interpolate(pos_emb_hash_h, (count_h), mode="nearest")
pos_emb_hash_h = pos_emb_hash_h.transpose(1, 2)
pos_emb_hash_h = pos_emb_hash_h.repeat(1, 1, count_w)
pos_emb_hash = pos_emb_hash_h * grid_size + pos_emb_hash_w
pos_emb_hash = pos_emb_hash.reshape(1, -1)
return pos_emb_hash
def _extract_patches_and_positions_from_image(
image,
patch_size,
patch_stride,
hse_grid_size,
n_crops,
h,
w,
c,
scale_id,
max_seq_len,
):
"""Extracts patches and positional embedding lookup indexes for a given image.
Args:
image: the input image of shape [n_crops, c, h, w]
patch_size: the extracted patch size.
patch_stride: stride for extracting patches.
hse_grid_size: grid size for hash-based spatial positional embedding.
n_crops: number of crops from the input image.
h: height of the image.
w: width of the image.
c: number of channels for the image.
scale_id: the scale id for the image in the multi-scale representation.
max_seq_len: maximum sequence length for the number of patches. If
max_seq_len = 0, no patch is returned. If max_seq_len < 0 then we return
all the patches.
Returns:
A concatenating vector of (patches, HSE, SCE, input mask). The tensor shape
is (n_crops, num_patches, patch_size * patch_size * c + 3).
"""
n_crops, c, h, w = image.shape
p = extract_image_patches(image, patch_size, patch_stride)
assert p.shape[1] == c * patch_size ** 2
count_h = _ceil_divide_int(h, patch_stride)
count_w = _ceil_divide_int(w, patch_stride)
# Shape (1, num_patches)
spatial_p = get_hashed_spatial_pos_emb_index(hse_grid_size, count_h, count_w)
# Shape (n_crops, 1, num_patches)
spatial_p = spatial_p.unsqueeze(1).repeat(n_crops, 1, 1)
scale_p = torch.ones_like(spatial_p) * scale_id
mask_p = torch.ones_like(spatial_p)
# Concatenating is a hacky way to pass both patches, positions and input
# mask to the model.
# Shape (n_crops, c * patch_size * patch_size + 3, num_patches)
out = torch.cat([p, spatial_p.to(p), scale_p.to(p), mask_p.to(p)], dim=1)
if max_seq_len >= 0:
out = _pad_or_cut_to_max_seq_len(out, max_seq_len)
return out
def get_multiscale_patches(
image,
patch_size=32,
patch_stride=32,
hse_grid_size=10,
longer_side_lengths=[224, 384],
max_seq_len_from_original_res=None,
):
"""Extracts image patches from multi-scale representation.
Args:
image: input image tensor with shape [n_crops, 3, h, w]
patch_size: patch size.
patch_stride: patch stride.
hse_grid_size: Hash-based positional embedding grid size.
longer_side_lengths: List of longer-side lengths for each scale in the
multi-scale representation.
max_seq_len_from_original_res: Maximum number of patches extracted from
original resolution. <0 means use all the patches from the original
resolution. None means we don't use original resolution input.
Returns:
A concatenating vector of (patches, HSE, SCE, input mask). The tensor shape
is (n_crops, num_patches, patch_size * patch_size * c + 3).
"""
# Sorting the list to ensure a deterministic encoding of the scale position.
longer_side_lengths = sorted(longer_side_lengths)
if len(image.shape) == 3:
image = image.unsqueeze(0)
n_crops, c, h, w = image.shape
outputs = []
for scale_id, longer_size in enumerate(longer_side_lengths):
resized_image, rh, rw = resize_preserve_aspect_ratio(image, h, w, longer_size)
max_seq_len = int(np.ceil(longer_size / patch_stride) ** 2)
out = _extract_patches_and_positions_from_image(
resized_image,
patch_size,
patch_stride,
hse_grid_size,
n_crops,
rh,
rw,
c,
scale_id,
max_seq_len,
)
outputs.append(out)
if max_seq_len_from_original_res is not None:
out = _extract_patches_and_positions_from_image(
image,
patch_size,
patch_stride,
hse_grid_size,
n_crops,
h,
w,
c,
len(longer_side_lengths),
max_seq_len_from_original_res,
)
outputs.append(out)
outputs = torch.cat(outputs, dim=-1)
return outputs.transpose(1, 2)
| 7,785 | 33.451327 | 106 | py |
BVQI | BVQI-master/pyiqa/data/general_fr_dataset.py | import pickle
import numpy as np
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import PairedToTensor, augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class GeneralFRDataset(data.Dataset):
"""General Full Reference dataset with meta info file.
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(GeneralFRDataset, self).__init__()
self.opt = opt
if opt.get("override_phase", None) is None:
self.phase = opt["phase"]
else:
self.phase = opt["override_phase"]
target_img_folder = opt["dataroot_target"]
ref_img_folder = opt.get("dataroot_ref", None)
self.paths_mos = read_meta_info_file(
target_img_folder, opt["meta_info_file"], mode="fr", ref_dir=ref_img_folder
)
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][self.phase]
self.paths_mos = [self.paths_mos[i] for i in splits]
dmos_max = opt.get("dmos_max", 0.0)
if dmos_max:
self.use_dmos = True
self.dmos_max = opt.get("dmos_max")
else:
self.use_dmos = False
self.mos_max = opt.get("mos_max", 1.0)
# do paired transform first and then do common transform
paired_transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
paired_transform_list += transform_mapping(k, v)
self.paired_trans = tf.Compose(paired_transform_list)
common_transform_list = []
self.img_range = opt.get("img_range", 1.0)
common_transform_list += [
PairedToTensor(),
]
self.common_trans = tf.Compose(common_transform_list)
def __getitem__(self, index):
ref_path = self.paths_mos[index][0]
img_path = self.paths_mos[index][1]
mos_label = self.paths_mos[index][2]
img_pil = Image.open(img_path).convert("RGB")
ref_pil = Image.open(ref_path).convert("RGB")
img_pil, ref_pil = self.paired_trans([img_pil, ref_pil])
img_tensor = self.common_trans(img_pil) * self.img_range
ref_tensor = self.common_trans(ref_pil) * self.img_range
if self.use_dmos:
mos_label = (self.dmos_max - mos_label) / self.dmos_max
else:
mos_label /= self.mos_max
mos_label_tensor = torch.Tensor([mos_label])
return {
"img": img_tensor,
"ref_img": ref_tensor,
"mos_label": mos_label_tensor,
"img_path": img_path,
"ref_img_path": ref_path,
}
def __len__(self):
return len(self.paths_mos)
| 3,366 | 32.67 | 87 | py |
BVQI | BVQI-master/pyiqa/data/prefetch_dataloader.py | import queue as Queue
import threading
import torch
from torch.utils.data import DataLoader
class PrefetchGenerator(threading.Thread):
"""A general prefetch generator.
Ref:
https://stackoverflow.com/questions/7323664/python-generator-pre-fetch
Args:
generator: Python generator.
num_prefetch_queue (int): Number of prefetch queue.
"""
def __init__(self, generator, num_prefetch_queue):
threading.Thread.__init__(self)
self.queue = Queue.Queue(num_prefetch_queue)
self.generator = generator
self.daemon = True
self.start()
def run(self):
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def __next__(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
def __iter__(self):
return self
class PrefetchDataLoader(DataLoader):
"""Prefetch version of dataloader.
Ref:
https://github.com/IgorSusmelj/pytorch-styleguide/issues/5#
TODO:
Need to test on single gpu and ddp (multi-gpu). There is a known issue in
ddp.
Args:
num_prefetch_queue (int): Number of prefetch queue.
kwargs (dict): Other arguments for dataloader.
"""
def __init__(self, num_prefetch_queue, **kwargs):
self.num_prefetch_queue = num_prefetch_queue
super(PrefetchDataLoader, self).__init__(**kwargs)
def __iter__(self):
return PrefetchGenerator(super().__iter__(), self.num_prefetch_queue)
class CPUPrefetcher:
"""CPU prefetcher.
Args:
loader: Dataloader.
"""
def __init__(self, loader):
self.ori_loader = loader
self.loader = iter(loader)
def next(self):
try:
return next(self.loader)
except StopIteration:
return None
def reset(self):
self.loader = iter(self.ori_loader)
class CUDAPrefetcher:
"""CUDA prefetcher.
Ref:
https://github.com/NVIDIA/apex/issues/304#
It may consums more GPU memory.
Args:
loader: Dataloader.
opt (dict): Options.
"""
def __init__(self, loader, opt):
self.ori_loader = loader
self.loader = iter(loader)
self.opt = opt
self.stream = torch.cuda.Stream()
self.device = torch.device("cuda" if opt["num_gpu"] != 0 else "cpu")
self.preload()
def preload(self):
try:
self.batch = next(self.loader) # self.batch is a dict
except StopIteration:
self.batch = None
return None
# put tensors to gpu
with torch.cuda.stream(self.stream):
for k, v in self.batch.items():
if torch.is_tensor(v):
self.batch[k] = self.batch[k].to(
device=self.device, non_blocking=True
)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
self.preload()
return batch
def reset(self):
self.loader = iter(self.ori_loader)
self.preload()
| 3,174 | 23.612403 | 77 | py |
BVQI | BVQI-master/pyiqa/data/data_sampler.py | import math
import torch
from torch.utils.data.sampler import Sampler
class EnlargedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
Modified from torch.utils.data.distributed.DistributedSampler
Support enlarging the dataset for iteration-based training, for saving
time when restart the dataloader after each epoch
Args:
dataset (torch.utils.data.Dataset): Dataset used for sampling.
num_replicas (int | None): Number of processes participating in
the training. It is usually the world_size.
rank (int | None): Rank of the current process within num_replicas.
ratio (int): Enlarging ratio. Default: 1.
"""
def __init__(self, dataset, num_replicas, rank, ratio=1, use_shuffle=True):
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = math.ceil(len(self.dataset) * ratio / self.num_replicas)
self.total_size = self.num_samples * self.num_replicas
self.use_shuffle = use_shuffle
def __iter__(self):
# deterministically shuffle based on epoch
if self.use_shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(self.total_size, generator=g).tolist()
else:
indices = torch.arange(self.total_size).tolist()
dataset_size = len(self.dataset)
indices = [v % dataset_size for v in indices]
# subsample
indices = indices[self.rank : self.total_size : self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 1,817 | 32.666667 | 83 | py |
BVQI | BVQI-master/pyiqa/data/ava_dataset.py | import itertools
import os
import pickle
import random
import cv2
import numpy as np
import pandas as pd
import torch
import torchvision.transforms as tf
# avoid possible image read error in AVA dataset
from PIL import Image, ImageFile
from torch.utils import data as data
from pyiqa.data.transforms import transform_mapping
from pyiqa.utils.registry import DATASET_REGISTRY
ImageFile.LOAD_TRUNCATED_IMAGES = True
@DATASET_REGISTRY.register()
class AVADataset(data.Dataset):
"""AVA dataset, proposed by
Murray, Naila, Luca Marchesotti, and Florent Perronnin.
"AVA: A large-scale database for aesthetic visual analysis."
In 2012 IEEE conference on computer vision and pattern recognition (CVPR), pp. 2408-2415. IEEE, 2012.
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(AVADataset, self).__init__()
self.opt = opt
target_img_folder = opt["dataroot_target"]
self.dataroot = target_img_folder
self.paths_mos = pd.read_csv(opt["meta_info_file"]).values.tolist()
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
# use val_num for validation
val_num = 2000
train_split = split_dict[split_index]["train"]
val_split = split_dict[split_index]["val"]
train_split = train_split + val_split[:-val_num]
val_split = val_split[-val_num:]
split_dict[split_index]["train"] = train_split
split_dict[split_index]["val"] = val_split
if opt.get("override_phase", None) is None:
splits = split_dict[split_index][opt["phase"]]
else:
splits = split_dict[split_index][opt["override_phase"]]
self.paths_mos = [self.paths_mos[i] for i in splits]
self.mean_mos = np.array([item[1] for item in self.paths_mos]).mean()
# self.paths_mos.sort(key=lambda x: x[1])
# n = 32
# n = 4
# tmp_list = [self.paths_mos[i: i + n] for i in range(0, len(self.paths_mos), n)]
# random.shuffle(tmp_list)
# self.paths_mos = list(itertools.chain.from_iterable(tmp_list))
transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
transform_list += transform_mapping(k, v)
img_range = opt.get("img_range", 1.0)
transform_list += [
tf.ToTensor(),
tf.Lambda(lambda x: x * img_range),
]
self.trans = tf.Compose(transform_list)
def __getitem__(self, index):
img_path = os.path.join(self.dataroot, self.paths_mos[index][0])
mos_label = self.paths_mos[index][1]
mos_dist = self.paths_mos[index][2:12]
img_pil = Image.open(img_path).convert("RGB")
width, height = img_pil.size
img_tensor = self.trans(img_pil)
img_tensor2 = self.trans(img_pil)
mos_label_tensor = torch.Tensor([mos_label])
mos_dist_tensor = torch.Tensor(mos_dist) / sum(mos_dist)
if self.opt.get("list_imgs", False):
tmp_tensor = torch.zeros((img_tensor.shape[0], 800, 800))
h, w = img_tensor.shape[1:]
tmp_tensor[..., :h, :w] = img_tensor
return {
"img": tmp_tensor,
"mos_label": mos_label_tensor,
"mos_dist": mos_dist_tensor,
"org_size": torch.tensor([height, width]),
"img_path": img_path,
"mean_mos": torch.tensor(self.mean_mos),
}
else:
return {
"img": img_tensor,
"img2": img_tensor2,
"mos_label": mos_label_tensor,
"mos_dist": mos_dist_tensor,
"org_size": torch.tensor([height, width]),
"img_path": img_path,
"mean_mos": torch.tensor(self.mean_mos),
}
def __len__(self):
return len(self.paths_mos)
| 4,319 | 33.56 | 105 | py |
BVQI | BVQI-master/pyiqa/data/data_util.py | import csv
import os
from os import path as osp
import cv2
import numpy as np
import torch
from torch.nn import functional as F
from pyiqa.data.transforms import mod_crop
from pyiqa.utils import img2tensor, scandir
def read_meta_info_file(img_dir, meta_info_file, mode="nr", ref_dir=None):
"""Generate paths and mos labels from an meta information file.
Each line in the meta information file contains the image names and
mos label, separated by a white space.
Example of an meta information file:
- For NR datasets: name, mos(mean), std
```
100.bmp 32.56107532210109 19.12472638223644
```
- For FR datasets: ref_name, dist_name, mos(mean), std
```
I01.bmp I01_01_1.bmp 5.51429 0.13013
```
Args:
img_dir (str): directory path containing images
meta_info_file (str): Path to the meta information file.
Returns:
list[str, float]: image paths, mos label
"""
with open(meta_info_file, "r") as fin:
csvreader = csv.reader(fin)
name_mos = list(csvreader)[1:]
paths_mos = []
for item in name_mos:
if mode == "fr":
if ref_dir is None:
ref_dir = img_dir
ref_name, img_name, mos = item[:3]
ref_path = osp.join(ref_dir, ref_name)
img_path = osp.join(img_dir, img_name)
paths_mos.append([ref_path, img_path, float(mos)])
elif mode == "nr":
img_name, mos = item[:2]
img_path = osp.join(img_dir, img_name)
paths_mos.append([img_path, float(mos)])
return paths_mos
def read_img_seq(path, require_mod_crop=False, scale=1, return_imgname=False):
"""Read a sequence of images from a given folder path.
Args:
path (list[str] | str): List of image paths or image folder path.
require_mod_crop (bool): Require mod crop for each image.
Default: False.
scale (int): Scale factor for mod_crop. Default: 1.
return_imgname(bool): Whether return image names. Default False.
Returns:
Tensor: size (t, c, h, w), RGB, [0, 1].
list[str]: Returned image name list.
"""
if isinstance(path, list):
img_paths = path
else:
img_paths = sorted(list(scandir(path, full_path=True)))
imgs = [cv2.imread(v).astype(np.float32) / 255.0 for v in img_paths]
if require_mod_crop:
imgs = [mod_crop(img, scale) for img in imgs]
imgs = img2tensor(imgs, bgr2rgb=True, float32=True)
imgs = torch.stack(imgs, dim=0)
if return_imgname:
imgnames = [osp.splitext(osp.basename(path))[0] for path in img_paths]
return imgs, imgnames
else:
return imgs
def generate_frame_indices(crt_idx, max_frame_num, num_frames, padding="reflection"):
"""Generate an index list for reading `num_frames` frames from a sequence
of images.
Args:
crt_idx (int): Current center index.
max_frame_num (int): Max number of the sequence of images (from 1).
num_frames (int): Reading num_frames frames.
padding (str): Padding mode, one of
'replicate' | 'reflection' | 'reflection_circle' | 'circle'
Examples: current_idx = 0, num_frames = 5
The generated frame indices under different padding mode:
replicate: [0, 0, 0, 1, 2]
reflection: [2, 1, 0, 1, 2]
reflection_circle: [4, 3, 0, 1, 2]
circle: [3, 4, 0, 1, 2]
Returns:
list[int]: A list of indices.
"""
assert num_frames % 2 == 1, "num_frames should be an odd number."
assert padding in (
"replicate",
"reflection",
"reflection_circle",
"circle",
), f"Wrong padding mode: {padding}."
max_frame_num = max_frame_num - 1 # start from 0
num_pad = num_frames // 2
indices = []
for i in range(crt_idx - num_pad, crt_idx + num_pad + 1):
if i < 0:
if padding == "replicate":
pad_idx = 0
elif padding == "reflection":
pad_idx = -i
elif padding == "reflection_circle":
pad_idx = crt_idx + num_pad - i
else:
pad_idx = num_frames + i
elif i > max_frame_num:
if padding == "replicate":
pad_idx = max_frame_num
elif padding == "reflection":
pad_idx = max_frame_num * 2 - i
elif padding == "reflection_circle":
pad_idx = (crt_idx - num_pad) - (i - max_frame_num)
else:
pad_idx = i - num_frames
else:
pad_idx = i
indices.append(pad_idx)
return indices
def paired_paths_from_lmdb(folders, keys):
"""Generate paired paths from lmdb files.
Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is:
lq.lmdb
├── data.mdb
├── lock.mdb
├── meta_info.txt
The data.mdb and lock.mdb are standard lmdb files and you can refer to
https://lmdb.readthedocs.io/en/release/ for more details.
The meta_info.txt is a specified txt file to record the meta information
of our datasets. It will be automatically created when preparing
datasets by our provided dataset tools.
Each line in the txt file records
1)image name (with extension),
2)image shape,
3)compression level, separated by a white space.
Example: `baboon.png (120,125,3) 1`
We use the image name without extension as the lmdb key.
Note that we use the same key for the corresponding lq and gt images.
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
Note that this key is different from lmdb keys.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
"The len of folders should be 2 with [input_folder, gt_folder]. "
f"But got {len(folders)}"
)
assert (
len(keys) == 2
), f"The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}"
input_folder, gt_folder = folders
input_key, gt_key = keys
if not (input_folder.endswith(".lmdb") and gt_folder.endswith(".lmdb")):
raise ValueError(
f"{input_key} folder and {gt_key} folder should both in lmdb "
f"formats. But received {input_key}: {input_folder}; "
f"{gt_key}: {gt_folder}"
)
# ensure that the two meta_info files are the same
with open(osp.join(input_folder, "meta_info.txt")) as fin:
input_lmdb_keys = [line.split(".")[0] for line in fin]
with open(osp.join(gt_folder, "meta_info.txt")) as fin:
gt_lmdb_keys = [line.split(".")[0] for line in fin]
if set(input_lmdb_keys) != set(gt_lmdb_keys):
raise ValueError(
f"Keys in {input_key}_folder and {gt_key}_folder are different."
)
else:
paths = []
for lmdb_key in sorted(input_lmdb_keys):
paths.append(
dict([(f"{input_key}_path", lmdb_key), (f"{gt_key}_path", lmdb_key)])
)
return paths
def paired_paths_from_meta_info_file(folders, keys, meta_info_file, filename_tmpl):
"""Generate paired paths from an meta information file.
Each line in the meta information file contains the image names and
image shape (usually for gt), separated by a white space.
Example of an meta information file:
```
0001_s001.png (480,480,3)
0001_s002.png (480,480,3)
```
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
meta_info_file (str): Path to the meta information file.
filename_tmpl (str): Template for each filename. Note that the
template excludes the file extension. Usually the filename_tmpl is
for files in the input folder.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
"The len of folders should be 2 with [input_folder, gt_folder]. "
f"But got {len(folders)}"
)
assert (
len(keys) == 2
), f"The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}"
input_folder, gt_folder = folders
input_key, gt_key = keys
with open(meta_info_file, "r") as fin:
gt_names = [line.strip().split(" ")[0] for line in fin]
paths = []
for gt_name in gt_names:
basename, ext = osp.splitext(osp.basename(gt_name))
input_name = f"{filename_tmpl.format(basename)}{ext}"
input_path = osp.join(input_folder, input_name)
gt_path = osp.join(gt_folder, gt_name)
paths.append(
dict([(f"{input_key}_path", input_path), (f"{gt_key}_path", gt_path)])
)
return paths
def paired_paths_from_folder(folders, keys, filename_tmpl):
"""Generate paired paths from folders.
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
filename_tmpl (str): Template for each filename. Note that the
template excludes the file extension. Usually the filename_tmpl is
for files in the input folder.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
"The len of folders should be 2 with [input_folder, gt_folder]. "
f"But got {len(folders)}"
)
assert (
len(keys) == 2
), f"The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}"
input_folder, gt_folder = folders
input_key, gt_key = keys
input_paths = list(scandir(input_folder))
gt_paths = list(scandir(gt_folder))
assert len(input_paths) == len(gt_paths), (
f"{input_key} and {gt_key} datasets have different number of images: "
f"{len(input_paths)}, {len(gt_paths)}."
)
paths = []
for gt_path in gt_paths:
basename, ext = osp.splitext(osp.basename(gt_path))
input_name = f"{filename_tmpl.format(basename)}{ext}"
input_path = osp.join(input_folder, input_name)
assert input_name in input_paths, f"{input_name} is not in {input_key}_paths."
gt_path = osp.join(gt_folder, gt_path)
paths.append(
dict([(f"{input_key}_path", input_path), (f"{gt_key}_path", gt_path)])
)
return paths
def paths_from_folder(folder):
"""Generate paths from folder.
Args:
folder (str): Folder path.
Returns:
list[str]: Returned path list.
"""
paths = list(scandir(folder))
paths = [osp.join(folder, path) for path in paths]
return paths
def paths_from_lmdb(folder):
"""Generate paths from lmdb.
Args:
folder (str): Folder path.
Returns:
list[str]: Returned path list.
"""
if not folder.endswith(".lmdb"):
raise ValueError(f"Folder {folder}folder should in lmdb format.")
with open(osp.join(folder, "meta_info.txt")) as fin:
paths = [line.split(".")[0] for line in fin]
return paths
def generate_gaussian_kernel(kernel_size=13, sigma=1.6):
"""Generate Gaussian kernel used in `duf_downsample`.
Args:
kernel_size (int): Kernel size. Default: 13.
sigma (float): Sigma of the Gaussian kernel. Default: 1.6.
Returns:
np.array: The Gaussian kernel.
"""
from scipy.ndimage import filters as filters
kernel = np.zeros((kernel_size, kernel_size))
# set element at the middle to one, a dirac delta
kernel[kernel_size // 2, kernel_size // 2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter
return filters.gaussian_filter(kernel, sigma)
def duf_downsample(x, kernel_size=13, scale=4):
"""Downsamping with Gaussian kernel used in the DUF official code.
Args:
x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w).
kernel_size (int): Kernel size. Default: 13.
scale (int): Downsampling factor. Supported scale: (2, 3, 4).
Default: 4.
Returns:
Tensor: DUF downsampled frames.
"""
assert scale in (2, 3, 4), f"Only support scale (2, 3, 4), but got {scale}."
squeeze_flag = False
if x.ndim == 4:
squeeze_flag = True
x = x.unsqueeze(0)
b, t, c, h, w = x.size()
x = x.view(-1, 1, h, w)
pad_w, pad_h = kernel_size // 2 + scale * 2, kernel_size // 2 + scale * 2
x = F.pad(x, (pad_w, pad_w, pad_h, pad_h), "reflect")
gaussian_filter = generate_gaussian_kernel(kernel_size, 0.4 * scale)
gaussian_filter = (
torch.from_numpy(gaussian_filter).type_as(x).unsqueeze(0).unsqueeze(0)
)
x = F.conv2d(x, gaussian_filter, stride=scale)
x = x[:, :, 2:-2, 2:-2]
x = x.view(b, t, c, x.size(2), x.size(3))
if squeeze_flag:
x = x.squeeze(0)
return x
| 13,379 | 32.959391 | 86 | py |
BVQI | BVQI-master/pyiqa/data/flive_dataset.py | import pickle
import cv2
import numpy as np
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class FLIVEDataset(data.Dataset):
"""General No Reference dataset with meta info file.
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(FLIVEDataset, self).__init__()
self.opt = opt
target_img_folder = opt["dataroot_target"]
self.paths_mos = read_meta_info_file(target_img_folder, opt["meta_info_file"])
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
if opt.get("override_phase", None) is None:
splits = split_dict[split_index][opt["phase"]]
else:
splits = split_dict[split_index][opt["override_phase"]]
if opt["phase"] == "train":
self.paths_mos = [self.paths_mos[i] for i in splits]
else:
# remove patches during validation and test
self.paths_mos = [self.paths_mos[i] for i in splits]
self.paths_mos = [
[p, m] for p, m in self.paths_mos if not "patches/" in p
]
dmos_max = opt.get("dmos_max", 0.0)
if dmos_max:
self.use_dmos = True
self.dmos_max = opt.get("dmos_max")
else:
self.use_dmos = False
self.mos_max = opt.get("mos_max", 1.0)
transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
transform_list += transform_mapping(k, v)
self.img_range = opt.get("img_range", 1.0)
transform_list += [
tf.ToTensor(),
]
self.trans = tf.Compose(transform_list)
def __getitem__(self, index):
img_path = self.paths_mos[index][0]
mos_label = self.paths_mos[index][1]
img_pil = Image.open(img_path).convert("RGB")
img_tensor = self.trans(img_pil) * self.img_range
if self.use_dmos:
mos_label = self.dmos_max - mos_label
else:
mos_label = mos_label / self.mos_max
mos_label_tensor = torch.Tensor([mos_label])
return {"img": img_tensor, "mos_label": mos_label_tensor, "img_path": img_path}
def __len__(self):
return len(self.paths_mos)
| 2,999 | 33.090909 | 87 | py |
BVQI | BVQI-master/pyiqa/data/bapps_dataset.py | import os
import pickle
import numpy as np
import pandas as pd
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import PairedToTensor, augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class BAPPSDataset(data.Dataset):
"""The BAPPS Dataset introduced by:
Zhang, Richard and Isola, Phillip and Efros, Alexei A and Shechtman, Eli and Wang, Oliver
The Unreasonable Effectiveness of Deep Features as a Perceptual Metric.
CVPR2018
url: https://github.com/richzhang/PerceptualSimilarity
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
mode (str):
- 2afc: load 2afc triplet data
- jnd: load jnd pair data
"""
def __init__(self, opt):
super(BAPPSDataset, self).__init__()
self.opt = opt
if opt.get("override_phase", None) is None:
self.phase = opt["phase"]
else:
self.phase = opt["override_phase"]
self.dataset_mode = opt.get("mode", "2afc")
val_types = opt.get("val_types", None)
target_img_folder = opt["dataroot_target"]
self.dataroot = target_img_folder
ref_img_folder = opt.get("dataroot_ref", None)
self.paths_mos = pd.read_csv(opt["meta_info_file"]).values.tolist()
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][self.phase]
self.paths_mos = [self.paths_mos[i] for i in splits]
if self.dataset_mode == "2afc":
self.paths_mos = [x for x in self.paths_mos if x[0] != "jnd"]
elif self.dataset_mode == "jnd":
self.paths_mos = [x for x in self.paths_mos if x[0] == "jnd"]
if val_types is not None:
tmp_paths_mos = []
for item in self.paths_mos:
for vt in val_types:
if vt in item[1]:
tmp_paths_mos.append(item)
self.paths_mos = tmp_paths_mos
# TODO: paired transform
transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
transform_list += transform_mapping(k, v)
img_range = opt.get("img_range", 1.0)
transform_list += [
PairedToTensor(),
]
self.trans = tf.Compose(transform_list)
def __getitem__(self, index):
is_jnd_data = self.paths_mos[index][0] == "jnd"
distA_path = os.path.join(self.dataroot, self.paths_mos[index][1])
distB_path = os.path.join(self.dataroot, self.paths_mos[index][2])
distA_pil = Image.open(distA_path).convert("RGB")
distB_pil = Image.open(distB_path).convert("RGB")
score = self.paths_mos[index][3]
# original 0 means prefer p0, transfer to probability of p0
mos_label_tensor = torch.Tensor([score])
if not is_jnd_data:
ref_path = os.path.join(self.dataroot, self.paths_mos[index][0])
ref_img_pil = Image.open(ref_path).convert("RGB")
distA_tensor, distB_tensor, ref_tensor = self.trans(
[distA_pil, distB_pil, ref_img_pil]
)
else:
distA_tensor, distB_tensor = self.trans([distA_pil, distB_pil])
if not is_jnd_data:
return {
"ref_img": ref_tensor,
"distB_img": distB_tensor,
"distA_img": distA_tensor,
"mos_label": mos_label_tensor,
"img_path": ref_path,
"distB_path": distB_path,
"distA_path": distA_path,
}
else:
return {
"distB_img": distB_tensor,
"distA_img": distA_tensor,
"mos_label": mos_label_tensor,
"distB_path": distB_path,
"distA_path": distA_path,
}
def __len__(self):
return len(self.paths_mos)
| 4,533 | 33.090226 | 93 | py |
BVQI | BVQI-master/pyiqa/data/pipal_dataset.py | import pickle
import numpy as np
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import PairedToTensor, augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class PIPALDataset(data.Dataset):
"""General Full Reference dataset with meta info file.
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(PIPALDataset, self).__init__()
self.opt = opt
if opt.get("override_phase", None) is None:
self.phase = opt["phase"]
else:
self.phase = opt["override_phase"]
target_img_folder = opt["dataroot_target"]
ref_img_folder = opt.get("dataroot_ref", None)
self.paths_mos = read_meta_info_file(
target_img_folder, opt["meta_info_file"], mode="fr", ref_dir=ref_img_folder
)
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][self.phase]
self.paths_mos = [self.paths_mos[i] for i in splits]
dmos_max = opt.get("dmos_max", 0.0)
if dmos_max:
self.use_dmos = True
self.dmos_max = opt.get("dmos_max")
else:
self.use_dmos = False
# do paired transform first and then do common transform
paired_transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
paired_transform_list += transform_mapping(k, v)
self.paired_trans = tf.Compose(paired_transform_list)
common_transform_list = []
self.img_range = opt.get("img_range", 1.0)
common_transform_list += [
PairedToTensor(),
]
self.common_trans = tf.Compose(common_transform_list)
def __getitem__(self, index):
ref_path = self.paths_mos[index][0]
img_path = self.paths_mos[index][1]
mos_label = self.paths_mos[index][2]
img_pil = Image.open(img_path).convert("RGB")
ref_pil = Image.open(ref_path).convert("RGB")
img_pil, ref_pil = self.paired_trans([img_pil, ref_pil])
img_tensor = self.common_trans(img_pil) * self.img_range
ref_tensor = self.common_trans(ref_pil) * self.img_range
if self.use_dmos:
mos_label = self.dmos_max - mos_label
mos_label_tensor = torch.Tensor([mos_label])
return {
"img": img_tensor,
"ref_img": ref_tensor,
"mos_label": mos_label_tensor,
"img_path": img_path,
"ref_img_path": ref_path,
}
def __len__(self):
return len(self.paths_mos)
| 3,241 | 32.42268 | 87 | py |
BVQI | BVQI-master/pyiqa/data/pieapp_dataset.py | import os
import pickle
import numpy as np
import pandas as pd
import torch
import torchvision.transforms as tf
from PIL import Image
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from pyiqa.data.data_util import read_meta_info_file
from pyiqa.data.transforms import PairedToTensor, augment, transform_mapping
from pyiqa.utils import FileClient, imfrombytes, img2tensor
from pyiqa.utils.registry import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class PieAPPDataset(data.Dataset):
"""The PieAPP Dataset introduced by:
Prashnani, Ekta and Cai, Hong and Mostofi, Yasamin and Sen, Pradeep
PieAPP: Perceptual Image-Error Assessment Through Pairwise Preference
CVPR2018
url: http://civc.ucsb.edu/graphics/Papers/CVPR2018_PieAPP/
Args:
opt (dict): Config for train datasets with the following keys:
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(PieAPPDataset, self).__init__()
self.opt = opt
target_img_folder = opt["dataroot_target"]
self.dataroot = target_img_folder
if opt.get("override_phase", None) is None:
self.phase = opt["phase"]
else:
self.phase = opt["override_phase"]
if self.phase == "test":
metadata = pd.read_csv(
opt["meta_info_file"],
usecols=[
"ref_img_path",
"dist_imgB_path",
"per_img score for dist_imgB",
],
)
else:
metadata = pd.read_csv(opt["meta_info_file"])
self.paths_mos = metadata.values.tolist()
# read train/val/test splits
split_file_path = opt.get("split_file", None)
if split_file_path:
split_index = opt.get("split_index", 1)
with open(opt["split_file"], "rb") as f:
split_dict = pickle.load(f)
splits = split_dict[split_index][self.phase]
self.paths_mos = [self.paths_mos[i] for i in splits]
# remove duplicates
if self.phase == "test":
temp = []
[temp.append(item) for item in self.paths_mos if not item in temp]
self.paths_mos = temp
# do paired transform first and then do common transform
paired_transform_list = []
augment_dict = opt.get("augment", None)
if augment_dict is not None:
for k, v in augment_dict.items():
paired_transform_list += transform_mapping(k, v)
self.paired_trans = tf.Compose(paired_transform_list)
common_transform_list = []
self.img_range = opt.get("img_range", 1.0)
common_transform_list += [
PairedToTensor(),
]
self.common_trans = tf.Compose(common_transform_list)
def __getitem__(self, index):
ref_path = os.path.join(self.dataroot, self.paths_mos[index][0])
if self.phase == "test":
distB_path = os.path.join(self.dataroot, self.paths_mos[index][1])
else:
distA_path = os.path.join(self.dataroot, self.paths_mos[index][1])
distB_path = os.path.join(self.dataroot, self.paths_mos[index][2])
distB_pil = Image.open(distB_path).convert("RGB")
ref_img_pil = Image.open(ref_path).convert("RGB")
if self.phase != "test":
distA_pil = Image.open(distA_path).convert("RGB")
distA_pil, distB_pil, ref_img_pil = self.paired_trans(
[distA_pil, distB_pil, ref_img_pil]
)
distA_tensor, distB_tensor, ref_tensor = self.common_trans(
[distA_pil, distB_pil, ref_img_pil]
)
else:
distB_pil, ref_img_pil = self.paired_trans([distB_pil, ref_img_pil])
distB_tensor, ref_tensor = self.common_trans([distB_pil, ref_img_pil])
if self.phase == "train":
score = self.paths_mos[index][4]
mos_label_tensor = torch.Tensor([score])
distB_score = torch.Tensor([-1])
elif self.phase == "val":
score = self.paths_mos[index][4]
mos_label_tensor = torch.Tensor([score])
distB_score = torch.Tensor([-1])
elif self.phase == "test":
per_img_score = self.paths_mos[index][2]
distB_score = torch.Tensor([per_img_score])
if self.phase == "test":
return {
"img": distB_tensor,
"ref_img": ref_tensor,
"mos_label": distB_score,
"img_path": distB_path,
"ref_img_path": ref_path,
}
else:
return {
"distB_img": distB_tensor,
"ref_img": ref_tensor,
"distA_img": distA_tensor,
"mos_label": mos_label_tensor,
"distB_per_img_score": distB_score,
"distB_path": distB_path,
"ref_img_path": ref_path,
"distA_path": distA_path,
}
def __len__(self):
return len(self.paths_mos)
| 5,149 | 34.273973 | 82 | py |
BVQI | BVQI-master/pyiqa/data/__init__.py | import importlib
import random
from copy import deepcopy
from functools import partial
from os import path as osp
import numpy as np
import torch
import torch.utils.data
from pyiqa.data.prefetch_dataloader import PrefetchDataLoader
from pyiqa.utils import get_root_logger, scandir
from pyiqa.utils.dist_util import get_dist_info
from pyiqa.utils.registry import DATASET_REGISTRY
__all__ = ["build_dataset", "build_dataloader"]
# automatically scan and import dataset modules for registry
# scan all the files under the data folder with '_dataset' in file names
data_folder = osp.dirname(osp.abspath(__file__))
dataset_filenames = [
osp.splitext(osp.basename(v))[0]
for v in scandir(data_folder)
if v.endswith("_dataset.py")
]
# import all the dataset modules
_dataset_modules = [
importlib.import_module(f"pyiqa.data.{file_name}")
for file_name in dataset_filenames
]
def build_dataset(dataset_opt):
"""Build dataset from options.
Args:
dataset_opt (dict): Configuration for dataset. It must contain:
name (str): Dataset name.
type (str): Dataset type.
"""
dataset_opt = deepcopy(dataset_opt)
dataset = DATASET_REGISTRY.get(dataset_opt["type"])(dataset_opt)
logger = get_root_logger()
logger.info(
f'Dataset [{dataset.__class__.__name__}] - {dataset_opt["name"]} ' "is built."
)
return dataset
def build_dataloader(
dataset, dataset_opt, num_gpu=1, dist=False, sampler=None, seed=None
):
"""Build dataloader.
Args:
dataset (torch.utils.data.Dataset): Dataset.
dataset_opt (dict): Dataset options. It contains the following keys:
phase (str): 'train' or 'val'.
num_worker_per_gpu (int): Number of workers for each GPU.
batch_size_per_gpu (int): Training batch size for each GPU.
num_gpu (int): Number of GPUs. Used only in the train phase.
Default: 1.
dist (bool): Whether in distributed training. Used only in the train
phase. Default: False.
sampler (torch.utils.data.sampler): Data sampler. Default: None.
seed (int | None): Seed. Default: None
"""
phase = dataset_opt["phase"]
rank, _ = get_dist_info()
if phase == "train":
if dist: # distributed training
batch_size = dataset_opt["batch_size_per_gpu"]
num_workers = dataset_opt["num_worker_per_gpu"]
else: # non-distributed training
multiplier = 1 if num_gpu == 0 else num_gpu
batch_size = dataset_opt["batch_size_per_gpu"] * multiplier
num_workers = dataset_opt["num_worker_per_gpu"] * multiplier
dataloader_args = dict(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
sampler=sampler,
drop_last=True,
)
if sampler is None:
dataloader_args["shuffle"] = True
dataloader_args["worker_init_fn"] = (
partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed)
if seed is not None
else None
)
elif phase in ["val", "test"]: # validation
batch_size = dataset_opt.get("batch_size_per_gpu", 1)
num_workers = dataset_opt.get("num_worker_per_gpu", 0)
dataloader_args = dict(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
)
else:
raise ValueError(
f"Wrong dataset phase: {phase}. "
"Supported ones are 'train', 'val' and 'test'."
)
dataloader_args["pin_memory"] = dataset_opt.get("pin_memory", False)
dataloader_args["persistent_workers"] = dataset_opt.get("persistent_workers", False)
prefetch_mode = dataset_opt.get("prefetch_mode")
if prefetch_mode == "cpu": # CPUPrefetcher
num_prefetch_queue = dataset_opt.get("num_prefetch_queue", 1)
logger = get_root_logger()
logger.info(
f"Use {prefetch_mode} prefetch dataloader: num_prefetch_queue = {num_prefetch_queue}"
)
return PrefetchDataLoader(
num_prefetch_queue=num_prefetch_queue, **dataloader_args
)
else:
# prefetch_mode=None: Normal dataloader
# prefetch_mode='cuda': dataloader for CUDAPrefetcher
return torch.utils.data.DataLoader(**dataloader_args)
def worker_init_fn(worker_id, num_workers, rank, seed):
# Set the worker seed to num_workers * rank + worker_id + seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 4,699 | 34.606061 | 97 | py |
BVQI | BVQI-master/pyiqa/data/transforms.py | import functools
import random
from collections.abc import Sequence
from typing import Union
import cv2
import numpy as np
import torch
import torchvision.transforms as tf
import torchvision.transforms.functional as F
from imgaug import augmenters as iaa
from PIL import Image
from pyiqa.archs.arch_util import to_2tuple
def transform_mapping(key, args):
if key == "hflip" and args:
return [PairedRandomHorizontalFlip()]
if key == "vflip" and args:
return [PairedRandomHorizontalFlip()]
elif key == "random_crop":
return [PairedRandomCrop(args)]
elif key == "center_crop":
return [PairedCenterCrop(args)]
elif key == "resize":
return [PairedResize(args)]
elif key == "adaptive_resize":
return [PairedAdaptiveResize(args)]
elif key == "random_square_resize":
return [PairedRandomSquareResize(args)]
elif key == "random_arp_resize":
return [PairedRandomARPResize(args)]
elif key == "ada_pad":
return [PairedAdaptivePadding(args)]
elif key == "rot90" and args:
return [PairedRandomRot90(args)]
elif key == "randomerase":
return [PairedRandomErasing(**args)]
elif key == "changecolor":
return [ChangeColorSpace(args)]
elif key == "totensor" and args:
return [PairedToTensor()]
else:
return []
def _check_pair(x):
if isinstance(x, (tuple, list)) and len(x) >= 2:
return True
class PairedToTensor(tf.ToTensor):
"""Pair version of center crop"""
def to_tensor(self, x):
if isinstance(x, torch.Tensor):
return x
else:
return F.to_tensor(x)
def __call__(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
imgs[i] = self.to_tensor(imgs[i])
return imgs
else:
return self.to_tensor(imgs)
class ChangeColorSpace:
"""Pair version of center crop"""
def __init__(self, to_colorspace):
self.aug_op = iaa.color.ChangeColorspace(to_colorspace)
def __call__(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
tmpimg = self.aug_op.augment_image(np.array(imgs[i]))
imgs[i] = Image.fromarray(tmpimg)
return imgs
else:
imgs = self.aug_op.augment_image(np.array(imgs))
return Image.fromarray(imgs)
class PairedCenterCrop(tf.CenterCrop):
"""Pair version of center crop"""
def forward(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
imgs[i] = super().forward(imgs[i])
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedRandomCrop(tf.RandomCrop):
"""Pair version of random crop"""
def _pad(self, img):
if self.padding is not None:
img = F.pad(img, self.padding, self.fill, self.padding_mode)
width, height = img.size
# pad the width if needed
if self.pad_if_needed and width < self.size[1]:
padding = [self.size[1] - width, 0]
img = F.pad(img, padding, self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and height < self.size[0]:
padding = [0, self.size[0] - height]
img = F.pad(img, padding, self.fill, self.padding_mode)
return img
def forward(self, imgs):
if _check_pair(imgs):
i, j, h, w = self.get_params(imgs[0], self.size)
for i in range(len(imgs)):
img = self._pad(imgs[i])
img = F.crop(img, i, j, h, w)
imgs[i] = img
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedRandomErasing(tf.RandomErasing):
"""Pair version of random erasing"""
def forward(self, imgs):
if _check_pair(imgs):
if torch.rand(1) < self.p:
# cast self.value to script acceptable type
if isinstance(self.value, (int, float)):
value = [self.value]
elif isinstance(self.value, str):
value = None
elif isinstance(self.value, tuple):
value = list(self.value)
else:
value = self.value
if value is not None and not (len(value) in (1, imgs[0].shape[-3])):
raise ValueError(
"If value is a sequence, it should have either a single value or "
f"{imgs[0].shape[-3]} (number of input channels)"
)
x, y, h, w, v = self.get_params(
imgs[0], scale=self.scale, ratio=self.ratio, value=value
)
for i in range(len(imgs)):
imgs[i] = F.erase(imgs[i], x, y, h, w, v, self.inplace)
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedRandomHorizontalFlip(tf.RandomHorizontalFlip):
"""Pair version of random hflip"""
def forward(self, imgs):
if _check_pair(imgs):
if torch.rand(1) < self.p:
for i in range(len(imgs)):
imgs[i] = F.hflip(imgs[i])
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedRandomVerticalFlip(tf.RandomVerticalFlip):
"""Pair version of random hflip"""
def forward(self, imgs):
if _check_pair(imgs):
if torch.rand(1) < self.p:
for i in range(len(imgs)):
imgs[i] = F.vflip(imgs[i])
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedRandomRot90(torch.nn.Module):
"""Pair version of random hflip"""
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, imgs):
if _check_pair(imgs):
if torch.rand(1) < self.p:
for i in range(len(imgs)):
imgs[i] = F.rotate(imgs[i], 90)
return imgs
elif isinstance(imgs, Image.Image):
if torch.rand(1) < self.p:
imgs = F.rotate(imgs, 90)
return imgs
class PairedResize(tf.Resize):
"""Pair version of resize"""
def forward(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
imgs[i] = super().forward(imgs[i])
return imgs
elif isinstance(imgs, Image.Image):
return super().forward(imgs)
class PairedAdaptiveResize(tf.Resize):
"""ARP preserved resize when necessary"""
def forward(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
tmpimg = imgs[i]
min_size = min(tmpimg.size)
if min_size < self.size:
tmpimg = super().forward(tmpimg)
imgs[i] = tmpimg
return imgs
elif isinstance(imgs, Image.Image):
tmpimg = imgs
min_size = min(tmpimg.size)
if min_size < self.size:
tmpimg = super().forward(tmpimg)
return tmpimg
class PairedRandomARPResize(torch.nn.Module):
"""Pair version of resize"""
def __init__(
self, size_range, interpolation=tf.InterpolationMode.BILINEAR, antialias=None
):
super().__init__()
self.interpolation = interpolation
self.antialias = antialias
self.size_range = size_range
if not (isinstance(size_range, Sequence) and len(size_range) == 2):
raise TypeError(
f"size_range should be sequence with 2 int. Got {size_range} with {type(size_range)}"
)
def forward(self, imgs):
min_size, max_size = sorted(self.size_range)
target_size = random.randint(min_size, max_size)
if _check_pair(imgs):
for i in range(len(imgs)):
imgs[i] = F.resize(imgs[i], target_size, self.interpolation)
return imgs
elif isinstance(imgs, Image.Image):
return F.resize(imgs, target_size, self.interpolation)
class PairedRandomSquareResize(torch.nn.Module):
"""Pair version of resize"""
def __init__(
self, size_range, interpolation=tf.InterpolationMode.BILINEAR, antialias=None
):
super().__init__()
self.interpolation = interpolation
self.antialias = antialias
self.size_range = size_range
if not (isinstance(size_range, Sequence) and len(size_range) == 2):
raise TypeError(
f"size_range should be sequence with 2 int. Got {size_range} with {type(size_range)}"
)
def forward(self, imgs):
min_size, max_size = sorted(self.size_range)
target_size = random.randint(min_size, max_size)
target_size = (target_size, target_size)
if _check_pair(imgs):
for i in range(len(imgs)):
imgs[i] = F.resize(imgs[i], target_size, self.interpolation)
return imgs
elif isinstance(imgs, Image.Image):
return F.resize(imgs, target_size, self.interpolation)
class PairedAdaptivePadding(torch.nn.Module):
"""Pair version of resize"""
def __init__(self, target_size, fill=0, padding_mode="constant"):
super().__init__()
self.target_size = to_2tuple(target_size)
self.fill = fill
self.padding_mode = padding_mode
def get_padding(self, x):
w, h = x.size
th, tw = self.target_size
assert (
th >= h and tw >= w
), f"Target size {self.target_size} should be larger than image size ({h}, {w})"
pad_row = th - h
pad_col = tw - w
pad_l, pad_r, pad_t, pad_b = (
pad_col // 2,
pad_col - pad_col // 2,
pad_row // 2,
pad_row - pad_row // 2,
)
return (pad_l, pad_t, pad_r, pad_b)
def forward(self, imgs):
if _check_pair(imgs):
for i in range(len(imgs)):
padding = self.get_padding(imgs[i])
imgs[i] = F.pad(imgs[i], padding, self.fill, self.padding_mode)
return imgs
elif isinstance(imgs, Image.Image):
padding = self.get_padding(imgs)
imgs = F.pad(imgs, padding, self.fill, self.padding_mode)
return imgs
def mod_crop(img, scale):
"""Mod crop images, used during testing.
Args:
img (ndarray): Input image.
scale (int): Scale factor.
Returns:
ndarray: Result image.
"""
img = img.copy()
if img.ndim in (2, 3):
h, w = img.shape[0], img.shape[1]
h_remainder, w_remainder = h % scale, w % scale
img = img[: h - h_remainder, : w - w_remainder, ...]
else:
raise ValueError(f"Wrong img ndim: {img.ndim}.")
return img
def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False):
"""Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).
We use vertical flip and transpose for rotation implementation.
All the images in the list use the same augmentation.
Args:
imgs (list[ndarray] | ndarray): Images to be augmented. If the input
is an ndarray, it will be transformed to a list.
hflip (bool): Horizontal flip. Default: True.
rotation (bool): Ratotation. Default: True.
flows (list[ndarray]: Flows to be augmented. If the input is an
ndarray, it will be transformed to a list.
Dimension is (h, w, 2). Default: None.
return_status (bool): Return the status of flip and rotation.
Default: False.
Returns:
list[ndarray] | ndarray: Augmented images and flows. If returned
results only have one element, just return ndarray.
"""
hflip = hflip and random.random() < 0.5
vflip = rotation and random.random() < 0.5
rot90 = rotation and random.random() < 0.5
def _augment(img):
if hflip: # horizontal
cv2.flip(img, 1, img)
if vflip: # vertical
cv2.flip(img, 0, img)
if rot90:
img = img.transpose(1, 0, 2)
return img
def _augment_flow(flow):
if hflip: # horizontal
cv2.flip(flow, 1, flow)
flow[:, :, 0] *= -1
if vflip: # vertical
cv2.flip(flow, 0, flow)
flow[:, :, 1] *= -1
if rot90:
flow = flow.transpose(1, 0, 2)
flow = flow[:, :, [1, 0]]
return flow
if not isinstance(imgs, list):
imgs = [imgs]
imgs = [_augment(img) for img in imgs]
if len(imgs) == 1:
imgs = imgs[0]
if flows is not None:
if not isinstance(flows, list):
flows = [flows]
flows = [_augment_flow(flow) for flow in flows]
if len(flows) == 1:
flows = flows[0]
return imgs, flows
else:
if return_status:
return imgs, (hflip, vflip, rot90)
else:
return imgs
def img_rotate(img, angle, center=None, scale=1.0):
"""Rotate image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees. Positive values mean
counter-clockwise rotation.
center (tuple[int]): Rotation center. If the center is None,
initialize it as the center of the image. Default: None.
scale (float): Isotropic scale factor. Default: 1.0.
"""
(h, w) = img.shape[:2]
if center is None:
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, scale)
rotated_img = cv2.warpAffine(img, matrix, (w, h))
return rotated_img
| 13,970 | 31.117241 | 101 | py |
BVQI | BVQI-master/pyiqa/archs/maniqa_arch.py | r"""MANIQA proposed by
MANIQA: Multi-dimension Attention Network for No-Reference Image Quality Assessment
Sidi Yang, Tianhe Wu, Shuwei Shi, Shanshan Lao, Yuan Gong, Mingdeng Cao, Jiahao Wang and Yujiu Yang.
CVPR Workshop 2022, winner of NTIRE2022 NRIQA challenge
Reference:
- Official github: https://github.com/IIGROUP/MANIQA
"""
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from timm.data import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
IMAGENET_INCEPTION_MEAN,
IMAGENET_INCEPTION_STD,
)
from timm.models.vision_transformer import Block
from torch import nn
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import extract_2d_patches
from .maniqa_swin import SwinTransformer
default_model_urls = {
"pipal": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/MANIQA_PIPAL-ae6d356b.pth"
}
def random_crop(x, sample_size=224, sample_num=8):
b, c, h, w = x.shape
th = tw = sample_size
cropped_x = []
for s in range(sample_num):
i = torch.randint(0, h - th + 1, size=(1,)).item()
j = torch.randint(0, w - tw + 1, size=(1,)).item()
cropped_x.append(x[:, :, i : i + th, j : j + tw])
cropped_x = torch.stack(cropped_x, dim=1)
return cropped_x
class TABlock(nn.Module):
def __init__(self, dim, drop=0.1):
super().__init__()
self.c_q = nn.Linear(dim, dim)
self.c_k = nn.Linear(dim, dim)
self.c_v = nn.Linear(dim, dim)
self.norm_fact = dim ** -0.5
self.softmax = nn.Softmax(dim=-1)
self.proj_drop = nn.Dropout(drop)
def forward(self, x):
_x = x
B, C, N = x.shape
q = self.c_q(x)
k = self.c_k(x)
v = self.c_v(x)
attn = q @ k.transpose(-2, -1) * self.norm_fact
attn = self.softmax(attn)
x = (attn @ v).transpose(1, 2).reshape(B, C, N)
x = self.proj_drop(x)
x = x + _x
return x
class SaveOutput:
def __init__(self):
self.outputs = []
def __call__(self, module, module_in, module_out):
self.outputs.append(module_out)
def clear(self):
self.outputs = []
@ARCH_REGISTRY.register()
class MANIQA(nn.Module):
def __init__(
self,
embed_dim=768,
num_outputs=1,
patch_size=8,
drop=0.1,
depths=[2, 2],
window_size=4,
dim_mlp=768,
num_heads=[4, 4],
img_size=224,
num_tab=2,
scale=0.13,
test_sample=20,
pretrained=True,
pretrained_model_path=None,
default_mean=None,
default_std=None,
**kwargs,
):
super().__init__()
self.img_size = img_size
self.patch_size = patch_size
self.input_size = img_size // patch_size
self.test_sample = test_sample
self.patches_resolution = (img_size // patch_size, img_size // patch_size)
self.vit = timm.create_model("vit_base_patch8_224", pretrained=True)
self.save_output = SaveOutput()
hook_handles = []
for layer in self.vit.modules():
if isinstance(layer, Block):
handle = layer.register_forward_hook(self.save_output)
hook_handles.append(handle)
self.tablock1 = nn.ModuleList()
for i in range(num_tab):
tab = TABlock(self.input_size ** 2)
self.tablock1.append(tab)
self.conv1 = nn.Conv2d(embed_dim * 4, embed_dim, 1, 1, 0)
self.swintransformer1 = SwinTransformer(
patches_resolution=self.patches_resolution,
depths=depths,
num_heads=num_heads,
embed_dim=embed_dim,
window_size=window_size,
dim_mlp=dim_mlp,
scale=scale,
)
self.tablock2 = nn.ModuleList()
for i in range(num_tab):
tab = TABlock(self.input_size ** 2)
self.tablock2.append(tab)
self.conv2 = nn.Conv2d(embed_dim, embed_dim // 2, 1, 1, 0)
self.swintransformer2 = SwinTransformer(
patches_resolution=self.patches_resolution,
depths=depths,
num_heads=num_heads,
embed_dim=embed_dim // 2,
window_size=window_size,
dim_mlp=dim_mlp,
scale=scale,
)
self.fc_score = nn.Sequential(
nn.Linear(embed_dim // 2, embed_dim // 2),
nn.ReLU(),
nn.Dropout(drop),
nn.Linear(embed_dim // 2, num_outputs),
nn.ReLU(),
)
self.fc_weight = nn.Sequential(
nn.Linear(embed_dim // 2, embed_dim // 2),
nn.ReLU(),
nn.Dropout(drop),
nn.Linear(embed_dim // 2, num_outputs),
nn.Sigmoid(),
)
self.default_mean = torch.Tensor(IMAGENET_INCEPTION_MEAN).view(1, 3, 1, 1)
self.default_std = torch.Tensor(IMAGENET_INCEPTION_STD).view(1, 3, 1, 1)
if pretrained_model_path is not None:
load_pretrained_network(
self, pretrained_model_path, True, weight_keys="params"
)
# load_pretrained_network(self, pretrained_model_path, True, )
elif pretrained:
load_pretrained_network(self, default_model_urls["pipal"], True)
def extract_feature(self, save_output):
x6 = save_output.outputs[6][:, 1:]
x7 = save_output.outputs[7][:, 1:]
x8 = save_output.outputs[8][:, 1:]
x9 = save_output.outputs[9][:, 1:]
x = torch.cat((x6, x7, x8, x9), dim=2)
return x
def forward(self, x):
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
if self.training:
x_patches = random_crop(x, sample_size=224, sample_num=1)
else:
x_patches = random_crop(x, sample_size=224, sample_num=self.test_sample)
bsz, num_patches, c, psz, psz = x_patches.shape
x = x_patches.reshape(bsz * num_patches, c, psz, psz)
_x = self.vit(x)
x = self.extract_feature(self.save_output)
self.save_output.outputs.clear()
# stage 1
x = rearrange(x, "b (h w) c -> b c (h w)", h=self.input_size, w=self.input_size)
for tab in self.tablock1:
x = tab(x)
x = rearrange(x, "b c (h w) -> b c h w", h=self.input_size, w=self.input_size)
x = self.conv1(x)
x = self.swintransformer1(x)
# stage2
x = rearrange(x, "b c h w -> b c (h w)", h=self.input_size, w=self.input_size)
for tab in self.tablock2:
x = tab(x)
x = rearrange(x, "b c (h w) -> b c h w", h=self.input_size, w=self.input_size)
x = self.conv2(x)
x = self.swintransformer2(x)
x = rearrange(x, "b c h w -> b (h w) c", h=self.input_size, w=self.input_size)
per_patch_score = self.fc_score(x)
per_patch_score = per_patch_score.reshape(bsz, -1)
per_patch_weight = self.fc_weight(x)
per_patch_weight = per_patch_weight.reshape(bsz, -1)
score = (per_patch_weight * per_patch_score).sum(dim=-1) / (
per_patch_weight.sum(dim=-1) + 1e-8
)
return score.unsqueeze(1)
| 7,322 | 30.83913 | 112 | py |
BVQI | BVQI-master/pyiqa/archs/dbcnn_arch.py | r"""DBCNN Metric
Created by: https://github.com/zwx8981/DBCNN-PyTorch/blob/master/DBCNN.py
Modified by: Chaofeng Chen (https://github.com/chaofengc)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"csiq": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_CSIQ-8677d071.pth",
"tid2008": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_TID2008-4b47c5d1.pth",
"tid2013": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_TID2013-485d021d.pth",
"live": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_LIVE-97262bf4.pth",
"livec": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_LIVEC-83f6dad3.pth",
"livem": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_LIVEM-698474e3.pth",
"koniq": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DBCNN_KonIQ10k-254e8241.pth",
}
class SCNN(nn.Module):
"""Network branch for synthetic distortions.
Args:
use_bn (Boolean): Whether to use batch normalization.
Modified from https://github.com/zwx8981/DBCNN-PyTorch/blob/master/SCNN.py
"""
def __init__(self, use_bn=True):
super(SCNN, self).__init__()
self.num_class = 39
self.use_bn = use_bn
self.features = nn.Sequential(
*self._make_layers(3, 48, 3, 1, 1),
*self._make_layers(48, 48, 3, 2, 1),
*self._make_layers(48, 64, 3, 1, 1),
*self._make_layers(64, 64, 3, 2, 1),
*self._make_layers(64, 64, 3, 1, 1),
*self._make_layers(64, 64, 3, 2, 1),
*self._make_layers(64, 128, 3, 1, 1),
*self._make_layers(128, 128, 3, 1, 1),
*self._make_layers(128, 128, 3, 2, 1),
)
self.pooling = nn.AdaptiveAvgPool2d(1)
self.projection = nn.Sequential(
*self._make_layers(128, 256, 1, 1, 0),
*self._make_layers(256, 256, 1, 1, 0),
)
self.classifier = nn.Linear(256, self.num_class)
def _make_layers(self, in_ch, out_ch, ksz, stride, pad):
if self.use_bn:
layers = [
nn.Conv2d(in_ch, out_ch, ksz, stride, pad),
nn.BatchNorm2d(out_ch),
nn.ReLU(True),
]
else:
layers = [
nn.Conv2d(in_ch, out_ch, ksz, stride, pad),
nn.ReLU(True),
]
return layers
def forward(self, X):
X = self.features(X)
X = self.pooling(X)
X = self.projection(X)
X = X.view(X.shape[0], -1)
X = self.classifier(X)
return X
@ARCH_REGISTRY.register()
class DBCNN(nn.Module):
"""Full DBCNN network.
Args:
fc (Boolean): Whether initialize the fc layers.
use_bn (Boolean): Whether use batch normalization.
pretrained_scnn_path (String): Pretrained scnn path.
default_mean (list): Default mean value.
default_std (list): Default std value.
Reference:
Zhang, Weixia, et al. "Blind image quality assessment using
a deep bilinear convolutional neural network." IEEE Transactions
on Circuits and Systems for Video Technology 30.1 (2018): 36-47.
"""
def __init__(
self,
fc=True,
use_bn=True,
pretrained_scnn_path=None,
pretrained=True,
pretrained_model_path=None,
default_mean=[0.485, 0.456, 0.406],
default_std=[0.229, 0.224, 0.225],
):
super(DBCNN, self).__init__()
# Convolution and pooling layers of VGG-16.
self.features1 = torchvision.models.vgg16(pretrained=True).features
self.features1 = nn.Sequential(*list(self.features1.children())[:-1])
scnn = SCNN(use_bn=use_bn)
if pretrained_scnn_path is not None:
load_pretrained_network(scnn, pretrained_scnn_path)
self.features2 = scnn.features
# Linear classifier.
self.fc = torch.nn.Linear(512 * 128, 1)
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
if fc:
# Freeze all previous layers.
for param in self.features1.parameters():
param.requires_grad = False
for param in scnn.parameters():
param.requires_grad = False
# Initialize the fc layers.
nn.init.kaiming_normal_(self.fc.weight.data)
if self.fc.bias is not None:
nn.init.constant_(self.fc.bias.data, val=0)
if pretrained_model_path is None and pretrained:
url_key = "koniq" if isinstance(pretrained, bool) else pretrained
pretrained_model_path = default_model_urls[url_key]
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path, True, "params")
def preprocess(self, x):
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
return x
def forward(self, X):
r"""Compute IQA using DBCNN model.
Args:
X: An input tensor with (N, C, H, W) shape. RGB channel order for colour images.
Returns:
Value of DBCNN model.
"""
X = self.preprocess(X)
X1 = self.features1(X)
X2 = self.features2(X)
N, _, H, W = X1.shape
N, _, H2, W2 = X2.shape
if (H != H2) or (W != W2):
X2 = F.interpolate(X2, (H, W), mode="bilinear", align_corners=True)
X1 = X1.view(N, 512, H * W)
X2 = X2.view(N, 128, H * W)
X = torch.bmm(X1, torch.transpose(X2, 1, 2)) / (H * W) # Bilinear
X = X.view(N, 512 * 128)
X = torch.sqrt(X + 1e-8)
X = torch.nn.functional.normalize(X)
X = self.fc(X)
return X
| 6,123 | 32.464481 | 116 | py |
BVQI | BVQI-master/pyiqa/archs/pieapp_arch.py | r"""PieAPP metric, proposed by
Prashnani, Ekta, Hong Cai, Yasamin Mostofi, and Pradeep Sen.
"Pieapp: Perceptual image-error assessment through pairwise preference."
In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1808-1817. 2018.
Ref url: https://github.com/prashnani/PerceptualImageError/blob/master/model/PieAPPv0pt1_PT.py
Modified by: Chaofeng Chen (https://github.com/chaofengc)
!!! Important Note: to keep simple test process and fair comparison with other methods,
we use zero padding and extract subpatches only once
rather than from multiple subimages as the original codes.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import extract_2d_patches
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/PieAPPv0.1-0937b014.pth"
}
class CompactLinear(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.parameter.Parameter(torch.randn(1))
self.bias = nn.parameter.Parameter(torch.randn(1))
def forward(self, x):
return x * self.weight + self.bias
@ARCH_REGISTRY.register()
class PieAPP(nn.Module):
def __init__(
self, patch_size=64, stride=27, pretrained=True, pretrained_model_path=None
):
super(PieAPP, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 3, padding=1)
self.conv2 = nn.Conv2d(64, 64, 3, padding=1)
self.pool2 = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(64, 64, 3, padding=1)
self.conv4 = nn.Conv2d(64, 128, 3, padding=1)
self.pool4 = nn.MaxPool2d(2, 2)
self.conv5 = nn.Conv2d(128, 128, 3, padding=1)
self.conv6 = nn.Conv2d(128, 128, 3, padding=1)
self.pool6 = nn.MaxPool2d(2, 2)
self.conv7 = nn.Conv2d(128, 256, 3, padding=1)
self.conv8 = nn.Conv2d(256, 256, 3, padding=1)
self.pool8 = nn.MaxPool2d(2, 2)
self.conv9 = nn.Conv2d(256, 256, 3, padding=1)
self.conv10 = nn.Conv2d(256, 512, 3, padding=1)
self.pool10 = nn.MaxPool2d(2, 2)
self.conv11 = nn.Conv2d(512, 512, 3, padding=1)
self.fc1_score = nn.Linear(120832, 512)
self.fc2_score = nn.Linear(512, 1)
self.fc1_weight = nn.Linear(2048, 512)
self.fc2_weight = nn.Linear(512, 1)
self.ref_score_subtract = CompactLinear()
self.patch_size = patch_size
self.stride = stride
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path)
elif pretrained:
load_pretrained_network(self, default_model_urls["url"])
self.pretrained = pretrained
def flatten(self, matrix): # takes NxCxHxW input and outputs NxHWC
return torch.flatten(matrix, 1)
def compute_features(self, input):
# conv1 -> relu -> conv2 -> relu -> pool2 -> conv3 -> relu
x3 = F.relu(
self.conv3(self.pool2(F.relu(self.conv2(F.relu(self.conv1(input))))))
)
# conv4 -> relu -> pool4 -> conv5 -> relu
x5 = F.relu(self.conv5(self.pool4(F.relu(self.conv4(x3)))))
# conv6 -> relu -> pool6 -> conv7 -> relu
x7 = F.relu(self.conv7(self.pool6(F.relu(self.conv6(x5)))))
# conv8 -> relu -> pool8 -> conv9 -> relu
x9 = F.relu(self.conv9(self.pool8(F.relu(self.conv8(x7)))))
# conv10 -> relu -> pool10 -> conv11 -> relU
x11 = self.flatten(F.relu(self.conv11(self.pool10(F.relu(self.conv10(x9))))))
# flatten and concatenate
feature_ms = torch.cat(
(
self.flatten(x3),
self.flatten(x5),
self.flatten(x7),
self.flatten(x9),
x11,
),
1,
)
return feature_ms, x11
def preprocess(self, x):
"""Default BGR in [0, 255] in original codes"""
x = x[:, [2, 1, 0]] * 255.0
return x
def forward(self, dist, ref):
assert (
dist.shape == ref.shape
), f"Input and reference images should have the same shape, but got {dist.shape}"
f" and {ref.shape}"
if self.pretrained:
dist = self.preprocess(dist)
ref = self.preprocess(ref)
image_A_patches = extract_2d_patches(
dist, self.patch_size, self.stride, padding="none"
)
image_ref_patches = extract_2d_patches(
ref, self.patch_size, self.stride, padding="none"
)
bsz, num_patches, c, psz, psz = image_A_patches.shape
image_A_patches = image_A_patches.reshape(bsz * num_patches, c, psz, psz)
image_ref_patches = image_ref_patches.reshape(bsz * num_patches, c, psz, psz)
A_multi_scale, A_coarse = self.compute_features(image_A_patches)
ref_multi_scale, ref_coarse = self.compute_features(image_ref_patches)
diff_ms = ref_multi_scale - A_multi_scale
diff_coarse = ref_coarse - A_coarse
# per patch score: fc1_score -> relu -> fc2_score
per_patch_score = self.ref_score_subtract(
0.01 * self.fc2_score(F.relu(self.fc1_score(diff_ms)))
)
per_patch_score = per_patch_score.view((-1, num_patches))
# per patch weight: fc1_weight -> relu -> fc2_weight
per_patch_weight = self.fc2_weight(F.relu(self.fc1_weight(diff_coarse))) + 1e-6
per_patch_weight = per_patch_weight.view((-1, num_patches))
score = (per_patch_weight * per_patch_score).sum(dim=-1) / per_patch_weight.sum(
dim=-1
)
return score.squeeze()
| 5,788 | 37.852349 | 108 | py |
BVQI | BVQI-master/pyiqa/archs/lpips_arch.py | r"""LPIPS Model.
Created by: https://github.com/richzhang/PerceptualSimilarity.
Modified by: Jiadi Mo (https://github.com/JiadiMo)
"""
from collections import namedtuple
import torch
import torch.nn as nn
from torchvision import models
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
# key "url" is the default
"0.0_alex": "https://github.com/chaofengc/IQA-Toolbox-Python/releases/download/v0.1-weights/LPIPS_v0.0_alex-18720f55.pth",
"0.0_vgg": "https://github.com/chaofengc/IQA-Toolbox-Python/releases/download/v0.1-weights/LPIPS_v0.0_vgg-b9e42362.pth",
"0.0_squeeze": "https://github.com/chaofengc/IQA-Toolbox-Python/releases/download/v0.1-weights/LPIPS_v0.0_squeeze-c27abd3a.pth",
"0.1_alex": "https://github.com/chaofengc/IQA-Toolbox-Python/releases/download/v0.1-weights/LPIPS_v0.1_alex-df73285e.pth",
"0.1_vgg": "https://github.com/chaofengc/IQA-Toolbox-Python/releases/download/v0.1-weights/LPIPS_v0.1_vgg-a78928a0.pth",
"0.1_squeeze": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/LPIPS_v0.1_squeeze-4a5350f2.pth",
}
def upsample(in_tens, out_HW=(64, 64)): # assumes scale factor is same for H and W
return nn.Upsample(size=out_HW, mode="bilinear", align_corners=False)(in_tens)
def spatial_average(in_tens, keepdim=True):
return in_tens.mean([2, 3], keepdim=keepdim)
def normalize_tensor(in_feat, eps=1e-10):
norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1, keepdim=True))
return in_feat / (norm_factor + eps)
@ARCH_REGISTRY.register()
class LPIPS(nn.Module):
"""LPIPS model.
Args:
lpips (Boolean) : Whether to use linear layers on top of base/trunk network.
pretrained (Boolean): Whether means linear layers are calibrated with human
perceptual judgments.
pnet_rand (Boolean): Whether to randomly initialized trunk.
net (String): ['alex','vgg','squeeze'] are the base/trunk networks available.
version (String): choose the version ['v0.1'] is the default and latest;
['v0.0'] contained a normalization bug.
pretrained_model_path (String): Petrained model path.
The following parameters should only be changed if training the network:
eval_mode (Boolean): choose the mode; True is for test mode (default).
pnet_tune (Boolean): Whether to tune the base/trunk network.
use_dropout (Boolean): Whether to use dropout when training linear layers.
Reference:
Zhang, Richard, et al. "The unreasonable effectiveness of deep features as
a perceptual metric." Proceedings of the IEEE conference on computer vision
and pattern recognition. 2018.
"""
def __init__(
self,
pretrained=True,
net="alex",
version="0.1",
lpips=True,
spatial=False,
pnet_rand=False,
pnet_tune=False,
use_dropout=True,
pretrained_model_path=None,
eval_mode=True,
**kwargs,
):
super(LPIPS, self).__init__()
self.pnet_type = net
self.pnet_tune = pnet_tune
self.pnet_rand = pnet_rand
self.spatial = spatial
self.lpips = lpips # false means baseline of just averaging all layers
self.version = version
self.scaling_layer = ScalingLayer()
if self.pnet_type in ["vgg", "vgg16"]:
net_type = vgg16
self.chns = [64, 128, 256, 512, 512]
elif self.pnet_type == "alex":
net_type = alexnet
self.chns = [64, 192, 384, 256, 256]
elif self.pnet_type == "squeeze":
net_type = squeezenet
self.chns = [64, 128, 256, 384, 384, 512, 512]
self.L = len(self.chns)
self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)
if lpips:
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
self.lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
if self.pnet_type == "squeeze": # 7 layers for squeezenet
self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)
self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)
self.lins += [self.lin5, self.lin6]
self.lins = nn.ModuleList(self.lins)
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path, False)
elif pretrained:
load_pretrained_network(
self, default_model_urls[f"{version}_{net}"], False
)
if eval_mode:
self.eval()
def forward(self, in1, in0, retPerLayer=False, normalize=True):
r"""Computation IQA using LPIPS.
Args:
in1: An input tensor. Shape :math:`(N, C, H, W)`.
in0: A reference tensor. Shape :math:`(N, C, H, W)`.
retPerLayer (Boolean): return result contains ressult of
each layer or not. Default: False.
normalize (Boolean): Whether to normalize image data range
in [0,1] to [-1,1]. Default: True.
Returns:
Quality score.
"""
if (
normalize
): # turn on this flag if input is [0,1] so it can be adjusted to [-1, +1]
in0 = 2 * in0 - 1
in1 = 2 * in1 - 1
# v0.0 - original release had a bug, where input was not scaled
in0_input, in1_input = (
(self.scaling_layer(in0), self.scaling_layer(in1))
if self.version == "0.1"
else (in0, in1)
)
outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input)
feats0, feats1, diffs = {}, {}, {}
for kk in range(self.L):
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(
outs1[kk]
)
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
if self.lpips:
if self.spatial:
res = [
upsample(self.lins[kk](diffs[kk]), out_HW=in0.shape[2:])
for kk in range(self.L)
]
else:
res = [
spatial_average(self.lins[kk](diffs[kk]), keepdim=True)
for kk in range(self.L)
]
else:
if self.spatial:
res = [
upsample(diffs[kk].sum(dim=1, keepdim=True), out_HW=in0.shape[2:])
for kk in range(self.L)
]
else:
res = [
spatial_average(diffs[kk].sum(dim=1, keepdim=True), keepdim=True)
for kk in range(self.L)
]
val = 0
for i in range(self.L):
val += res[i]
if retPerLayer:
return (val, res)
else:
return val.squeeze()
class ScalingLayer(nn.Module):
def __init__(self):
super(ScalingLayer, self).__init__()
self.register_buffer(
"shift", torch.Tensor([-0.030, -0.088, -0.188])[None, :, None, None]
)
self.register_buffer(
"scale", torch.Tensor([0.458, 0.448, 0.450])[None, :, None, None]
)
def forward(self, inp):
return (inp - self.shift) / self.scale
class NetLinLayer(nn.Module):
"""A single linear layer which does a 1x1 conv"""
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = (
[
nn.Dropout(),
]
if (use_dropout)
else []
)
layers += [
nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),
]
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class squeezenet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(squeezenet, self).__init__()
pretrained_features = models.squeezenet1_1(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.slice6 = torch.nn.Sequential()
self.slice7 = torch.nn.Sequential()
self.N_slices = 7
for x in range(2):
self.slice1.add_module(str(x), pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), pretrained_features[x])
for x in range(10, 11):
self.slice5.add_module(str(x), pretrained_features[x])
for x in range(11, 12):
self.slice6.add_module(str(x), pretrained_features[x])
for x in range(12, 13):
self.slice7.add_module(str(x), pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
h = self.slice6(h)
h_relu6 = h
h = self.slice7(h)
h_relu7 = h
vgg_outputs = namedtuple(
"SqueezeOutputs",
["relu1", "relu2", "relu3", "relu4", "relu5", "relu6", "relu7"],
)
out = vgg_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7)
return out
class alexnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(alexnet, self).__init__()
alexnet_pretrained_features = models.alexnet(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(2):
self.slice1.add_module(str(x), alexnet_pretrained_features[x])
for x in range(2, 5):
self.slice2.add_module(str(x), alexnet_pretrained_features[x])
for x in range(5, 8):
self.slice3.add_module(str(x), alexnet_pretrained_features[x])
for x in range(8, 10):
self.slice4.add_module(str(x), alexnet_pretrained_features[x])
for x in range(10, 12):
self.slice5.add_module(str(x), alexnet_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1 = h
h = self.slice2(h)
h_relu2 = h
h = self.slice3(h)
h_relu3 = h
h = self.slice4(h)
h_relu4 = h
h = self.slice5(h)
h_relu5 = h
alexnet_outputs = namedtuple(
"AlexnetOutputs", ["relu1", "relu2", "relu3", "relu4", "relu5"]
)
out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
return out
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple(
"VggOutputs", ["relu1_2", "relu2_2", "relu3_3", "relu4_3", "relu5_3"]
)
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
class resnet(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True, num=18):
super(resnet, self).__init__()
if num == 18:
self.net = models.resnet18(pretrained=pretrained)
elif num == 34:
self.net = models.resnet34(pretrained=pretrained)
elif num == 50:
self.net = models.resnet50(pretrained=pretrained)
elif num == 101:
self.net = models.resnet101(pretrained=pretrained)
elif num == 152:
self.net = models.resnet152(pretrained=pretrained)
self.N_slices = 5
self.conv1 = self.net.conv1
self.bn1 = self.net.bn1
self.relu = self.net.relu
self.maxpool = self.net.maxpool
self.layer1 = self.net.layer1
self.layer2 = self.net.layer2
self.layer3 = self.net.layer3
self.layer4 = self.net.layer4
def forward(self, X):
h = self.conv1(X)
h = self.bn1(h)
h = self.relu(h)
h_relu1 = h
h = self.maxpool(h)
h = self.layer1(h)
h_conv2 = h
h = self.layer2(h)
h_conv3 = h
h = self.layer3(h)
h_conv4 = h
h = self.layer4(h)
h_conv5 = h
outputs = namedtuple("Outputs", ["relu1", "conv2", "conv3", "conv4", "conv5"])
out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
return out
| 14,838 | 34.670673 | 132 | py |
BVQI | BVQI-master/pyiqa/archs/hypernet_arch.py | r"""HyperNet Metric
Created by: https://github.com/SSL92/hyperIQA
Modified by: Chaofeng Chen (https://github.com/chaofengc)
"""
import timm
import torch
import torch.nn as nn
from pyiqa.utils.registry import ARCH_REGISTRY
@ARCH_REGISTRY.register()
class HyperNet(nn.Module):
"""HyperNet Model.
Args:
base_model_name (String): pretrained model to extract features,
can be any models supported by timm. Default: resnet50.
pretrained_model_path (String): Pretrained model path.
default_mean (list): Default mean value.
default_std (list): Default std value.
Reference:
Su, Shaolin, Qingsen Yan, Yu Zhu, Cheng Zhang, Xin Ge,
Jinqiu Sun, and Yanning Zhang. "Blindly assess image
quality in the wild guided by a self-adaptive hyper network."
In Proceedings of the IEEE/CVF Conference on Computer Vision
and Pattern Recognition (CVPR), pp. 3667-3676. 2020.
"""
def __init__(
self,
base_model_name="resnet50",
pretrained_model_path=None,
default_mean=[0.485, 0.456, 0.406],
default_std=[0.229, 0.224, 0.225],
):
super(HyperNet, self).__init__()
self.base_model = timm.create_model(
base_model_name, pretrained=True, features_only=True
)
lda_out_channels = 16
hyper_in_channels = 112
target_in_size = 224
hyper_fc_channels = [112, 56, 28, 14, 1]
feature_size = 7 # spatial size of the last features from base model
self.hyper_fc_channels = hyper_fc_channels
# local distortion aware module
self.lda_modules = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(256, 16, kernel_size=1, stride=1, padding=0, bias=False),
nn.AvgPool2d(7, stride=7),
nn.Flatten(),
nn.Linear(16 * 64, lda_out_channels),
),
nn.Sequential(
nn.Conv2d(512, 32, kernel_size=1, stride=1, padding=0, bias=False),
nn.AvgPool2d(7, stride=7),
nn.Flatten(),
nn.Linear(32 * 16, lda_out_channels),
),
nn.Sequential(
nn.Conv2d(1024, 64, kernel_size=1, stride=1, padding=0, bias=False),
nn.AvgPool2d(7, stride=7),
nn.Flatten(),
nn.Linear(64 * 4, lda_out_channels),
),
nn.Sequential(
nn.AvgPool2d(7, stride=7),
nn.Flatten(),
nn.Linear(2048, target_in_size - lda_out_channels * 3),
),
]
)
# Hyper network part, conv for generating target fc weights, fc for generating target fc biases
self.fc_w_modules = nn.ModuleList([])
for i in range(4):
if i == 0:
out_ch = int(target_in_size * hyper_fc_channels[i] / feature_size ** 2)
else:
out_ch = int(
hyper_fc_channels[i - 1] * hyper_fc_channels[i] / feature_size ** 2
)
self.fc_w_modules.append(
nn.Conv2d(hyper_in_channels, out_ch, 3, padding=(1, 1)),
)
self.fc_w_modules.append(
nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(hyper_in_channels, hyper_fc_channels[3]),
)
)
self.fc_b_modules = nn.ModuleList([])
for i in range(5):
self.fc_b_modules.append(
nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(hyper_in_channels, hyper_fc_channels[i]),
)
)
# Conv layers for resnet output features
self.conv1 = nn.Sequential(
nn.Conv2d(2048, 1024, 1, padding=(0, 0)),
nn.ReLU(inplace=True),
nn.Conv2d(1024, 512, 1, padding=(0, 0)),
nn.ReLU(inplace=True),
nn.Conv2d(512, hyper_in_channels, 1, padding=(0, 0)),
nn.ReLU(inplace=True),
)
self.global_pool = nn.Sequential()
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
def load_pretrained_network(self, model_path):
state_dict = torch.load(model_path, map_location=torch.device("cpu"))[
"state_dict"
]
self.net.load_state_dict(state_dict, strict=True)
def preprocess(self, x):
# input must have shape of (224, 224) because of network design
if x.shape[2:] != torch.Size([224, 224]):
x = nn.functional.interpolate(x, (224, 224), mode="bicubic")
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
return x
def random_crop_test(self, x, sample_num=25):
b, c, h, w = x.shape
th = tw = 224
cropped_x = []
for s in range(sample_num):
i = torch.randint(0, h - th + 1, size=(1,)).item()
j = torch.randint(0, w - tw + 1, size=(1,)).item()
cropped_x.append(x[:, :, i : i + th, j : j + tw])
cropped_x = torch.cat(cropped_x, dim=0)
results = self.forward_patch(cropped_x)
results = results.reshape(sample_num, b).mean(dim=0)
return results.unsqueeze(-1)
def forward_patch(self, x):
assert x.shape[2:] == torch.Size(
[224, 224]
), f"Input patch size must be (224, 224), but got {x.shape[2:]}"
x = self.preprocess(x)
base_feats = self.base_model(x)[1:]
# multi-scale local distortion aware features
lda_feat_list = []
for bf, ldam in zip(base_feats, self.lda_modules):
lda_feat_list.append(ldam(bf))
lda_feat = torch.cat(lda_feat_list, dim=1)
# calculate target net weights & bias
target_fc_w = []
target_fc_b = []
hyper_in_feat = self.conv1(base_feats[-1])
batch_size = hyper_in_feat.shape[0]
for i in range(len(self.fc_w_modules)):
tmp_fc_w = self.fc_w_modules[i](hyper_in_feat).reshape(
batch_size, self.hyper_fc_channels[i], -1
)
target_fc_w.append(tmp_fc_w)
target_fc_b.append(self.fc_b_modules[i](hyper_in_feat))
# get final IQA score
x = lda_feat.unsqueeze(1)
for i in range(len(target_fc_w)):
if i != 4:
x = torch.sigmoid(
torch.bmm(x, target_fc_w[i].transpose(1, 2))
+ target_fc_b[i].unsqueeze(1)
)
else:
x = torch.bmm(x, target_fc_w[i].transpose(1, 2)) + target_fc_b[
i
].unsqueeze(1)
return x.squeeze(-1)
def forward(self, x):
r"""HYPERNET model.
Args:
x: A distortion tensor. Shape :math:`(N, C, H, W)`.
"""
# imagenet normalization of input is hard coded
if self.training:
return self.forward_patch(x)
else:
return self.random_crop_test(x)
| 7,284 | 34.364078 | 103 | py |
BVQI | BVQI-master/pyiqa/archs/ssim_arch.py | r"""SSIM, MS-SSIM, CW-SSIM Metric
Created by:
- https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/SSIM.py
- https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/MS_SSIM.py
- https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/CW_SSIM.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
- Offical SSIM matlab code from https://www.cns.nyu.edu/~lcv/ssim/;
- PIQ from https://github.com/photosynthesis-team/piq;
- BasicSR from https://github.com/xinntao/BasicSR/blob/master/basicsr/metrics/psnr_ssim.py;
- Offical MS-SSIM matlab code from https://ece.uwaterloo.ca/~z70wang/research/iwssim/msssim.zip;
- Offical CW-SSIM matlab code from
https://www.mathworks.com/matlabcentral/mlc-downloads/downloads/submissions/43017/versions/1/download/zip;
"""
import numpy as np
import torch
import torch.nn.functional as F
from pyiqa.matlab_utils import SCFpyr_PyTorch, filter2, fspecial, math_util
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
def ssim(
X,
Y,
win,
get_ssim_map=False,
get_cs=False,
get_weight=False,
downsample=False,
data_range=1.0,
test_y_channel=True,
color_space="yiq",
):
data_range = 255
# Whether calculate on y channel of ycbcr
if test_y_channel and X.shape[1] == 3:
X = to_y_channel(X, data_range, color_space)
Y = to_y_channel(Y, data_range, color_space)
else:
X = X * data_range
X = X - X.detach() + X.round()
Y = Y * data_range
Y = Y - Y.detach() + Y.round()
C1 = (0.01 * data_range) ** 2
C2 = (0.03 * data_range) ** 2
# Averagepool image if the size is large enough
f = max(1, round(min(X.size()[-2:]) / 256))
# Downsample operation is used in official matlab code
if (f > 1) and downsample:
X = F.avg_pool2d(X, kernel_size=f)
Y = F.avg_pool2d(Y, kernel_size=f)
win = win.to(X.device)
mu1 = filter2(X, win, "valid")
mu2 = filter2(Y, win, "valid")
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = filter2(X * X, win, "valid") - mu1_sq
sigma2_sq = filter2(Y * Y, win, "valid") - mu2_sq
sigma12 = filter2(X * Y, win, "valid") - mu1_mu2
cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
cs_map = F.relu(
cs_map
) # force the ssim response to be nonnegative to avoid negative results.
ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
ssim_val = ssim_map.mean([1, 2, 3])
if get_weight:
weights = torch.log((1 + sigma1_sq / C2) * (1 + sigma2_sq / C2))
return ssim_map, weights
if get_ssim_map:
return ssim_map
if get_cs:
return ssim_val, cs_map.mean([1, 2, 3])
return ssim_val
@ARCH_REGISTRY.register()
class SSIM(torch.nn.Module):
r"""Args:
channel: number of channel.
downsample: boolean, whether to downsample same as official matlab code.
test_y_channel: boolean, whether to use y channel on ycbcr same as official matlab code.
"""
def __init__(
self,
channels=3,
downsample=False,
test_y_channel=True,
color_space="yiq",
crop_border=0.0,
):
super(SSIM, self).__init__()
self.win = fspecial(11, 1.5, channels)
self.downsample = downsample
self.test_y_channel = test_y_channel
self.color_space = color_space
self.crop_border = crop_border
def forward(self, X, Y):
assert (
X.shape == Y.shape
), f"Input {X.shape} and reference images should have the same shape"
if self.crop_border != 0:
crop_border = self.crop_border
X = X[..., crop_border:-crop_border, crop_border:-crop_border]
Y = Y[..., crop_border:-crop_border, crop_border:-crop_border]
score = ssim(
X,
Y,
win=self.win,
downsample=self.downsample,
test_y_channel=self.test_y_channel,
color_space=self.color_space,
)
return score
def ms_ssim(
X,
Y,
win,
data_range=1.0,
downsample=False,
test_y_channel=True,
is_prod=True,
color_space="yiq",
):
r"""Compute Multiscale structural similarity for a batch of images.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
win: Window setting.
downsample: Boolean, whether to downsample which mimics official SSIM matlab code.
test_y_channel: Boolean, whether to use y channel on ycbcr.
is_prod: Boolean, calculate product or sum between mcs and weight.
Returns:
Index of similarity betwen two images. Usually in [0, 1] interval.
"""
if not X.shape == Y.shape:
raise ValueError("Input images must have the same dimensions.")
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(X)
levels = weights.shape[0]
mcs = []
for _ in range(levels):
ssim_val, cs = ssim(
X,
Y,
win=win,
get_cs=True,
downsample=downsample,
data_range=data_range,
test_y_channel=test_y_channel,
color_space=color_space,
)
mcs.append(cs)
padding = (X.shape[2] % 2, X.shape[3] % 2)
X = F.avg_pool2d(X, kernel_size=2, padding=padding)
Y = F.avg_pool2d(Y, kernel_size=2, padding=padding)
mcs = torch.stack(mcs, dim=0)
if is_prod:
msssim_val = torch.prod((mcs[:-1] ** weights[:-1].unsqueeze(1)), dim=0) * (
ssim_val ** weights[-1]
)
else:
weights = weights / torch.sum(weights)
msssim_val = torch.sum((mcs[:-1] * weights[:-1].unsqueeze(1)), dim=0) + (
ssim_val * weights[-1]
)
return msssim_val
@ARCH_REGISTRY.register()
class MS_SSIM(torch.nn.Module):
r"""Multiscale structure similarity
References:
Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. "Multiscale structural similarity for image
quality assessment." In The Thrity-Seventh Asilomar Conference on Signals, Systems & Computers,
2003, vol. 2, pp. 1398-1402. Ieee, 2003.
Args:
channel: Number of channel.
downsample: Boolean, whether to downsample which mimics official SSIM matlab code.
test_y_channel: Boolean, whether to use y channel on ycbcr which mimics official matlab code.
"""
def __init__(
self,
channels=3,
downsample=False,
test_y_channel=True,
is_prod=True,
color_space="yiq",
):
super(MS_SSIM, self).__init__()
self.win = fspecial(11, 1.5, channels)
self.downsample = downsample
self.test_y_channel = test_y_channel
self.color_space = color_space
self.is_prod = is_prod
def forward(self, X, Y):
"""Computation of MS-SSIM metric.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of MS-SSIM metric in [0, 1] range.
"""
assert (
X.shape == Y.shape
), "Input and reference images should have the same shape, but got"
f"{X.shape} and {Y.shape}"
score = ms_ssim(
X,
Y,
win=self.win,
downsample=self.downsample,
test_y_channel=self.test_y_channel,
is_prod=self.is_prod,
color_space=self.color_space,
)
return score
@ARCH_REGISTRY.register()
class CW_SSIM(torch.nn.Module):
r"""Complex-Wavelet Structural SIMilarity (CW-SSIM) index.
References:
M. P. Sampat, Z. Wang, S. Gupta, A. C. Bovik, M. K. Markey.
"Complex Wavelet Structural Similarity: A New Image Similarity Index",
IEEE Transactions on Image Processing, 18(11), 2385-401, 2009.
Args:
channel: Number of channel.
test_y_channel: Boolean, whether to use y channel on ycbcr.
level: The number of levels to used in the complex steerable pyramid decomposition
ori: The number of orientations to be used in the complex steerable pyramid decomposition
guardb: How much is discarded from the four image boundaries.
K: the constant in the CWSSIM index formula (see the above reference) default value: K=0
"""
def __init__(
self,
channels=1,
level=4,
ori=8,
guardb=0,
K=0,
test_y_channel=True,
color_space="yiq",
):
super(CW_SSIM, self).__init__()
self.channels = channels
self.level = level
self.ori = ori
self.guardb = guardb
self.K = K
self.test_y_channel = test_y_channel
self.color_space = color_space
self.register_buffer("win7", torch.ones(channels, 1, 7, 7) / (7 * 7))
def conj(self, x, y):
a = x[..., 0]
b = x[..., 1]
c = y[..., 0]
d = -y[..., 1]
return torch.stack((a * c - b * d, b * c + a * d), dim=1)
def conv2d_complex(self, x, win, groups=1):
real = F.conv2d(x[:, 0, ...].unsqueeze(1), win, groups=groups)
imaginary = F.conv2d(x[:, 1, ...].unsqueeze(1), win, groups=groups)
return torch.stack((real, imaginary), dim=-1)
def cw_ssim(self, x, y, test_y_channel):
r"""Compute CW-SSIM for a batch of images.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
test_y_channel: Boolean, whether to use y channel on ycbcr.
Returns:
Index of similarity betwen two images. Usually in [0, 1] interval.
"""
# Whether calculate on y channel of ycbcr
if test_y_channel and x.shape[1] == 3:
x = to_y_channel(x, 255, self.color_space)
y = to_y_channel(y, 255, self.color_space)
pyr = SCFpyr_PyTorch(
height=self.level, nbands=self.ori, scale_factor=2, device=x.device
)
cw_x = pyr.build(x)
cw_y = pyr.build(y)
bandind = self.level
band_cssim = []
s = np.array(cw_x[bandind][0].size()[1:3])
w = fspecial(s - 7 + 1, s[0] / 4, 1).to(x.device)
gb = int(self.guardb / (2 ** (self.level - 1)))
for i in range(self.ori):
band1 = cw_x[bandind][i]
band2 = cw_y[bandind][i]
band1 = band1[:, gb : s[0] - gb, gb : s[1] - gb, :]
band2 = band2[:, gb : s[0] - gb, gb : s[1] - gb, :]
corr = self.conj(band1, band2)
corr_band = self.conv2d_complex(corr, self.win7, groups=self.channels)
varr = (
(math_util.abs(band1)) ** 2 + (math_util.abs(band2)) ** 2
).unsqueeze(1)
varr_band = F.conv2d(
varr, self.win7, stride=1, padding=0, groups=self.channels
)
cssim_map = (2 * math_util.abs(corr_band) + self.K) / (varr_band + self.K)
band_cssim.append(
(cssim_map * w.repeat(cssim_map.shape[0], 1, 1, 1)).sum([2, 3]).mean(1)
)
return torch.stack(band_cssim, dim=1).mean(1)
def forward(self, X, Y):
r"""Computation of CW-SSIM metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of CW-SSIM metric in [0, 1] range.
"""
assert (
X.shape == Y.shape
), f"Input {X.shape} and reference images should have the same shape"
score = self.cw_ssim(X, Y, self.test_y_channel)
return score
| 11,902 | 31.433243 | 110 | py |
BVQI | BVQI-master/pyiqa/archs/ahiq_arch.py | import numpy as np
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
from pyexpat import model
from timm.models.resnet import BasicBlock, Bottleneck
from timm.models.vision_transformer import Block
from torchvision.ops.deform_conv import DeformConv2d
from pyiqa.archs.arch_util import (
ExactPadding2d,
default_init_weights,
load_file_from_url,
load_pretrained_network,
to_2tuple,
)
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"pipal": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/AHIQ_vit_p8_epoch33-da3ea303.pth"
}
def random_crop(x, y, crop_size, crop_num):
b, c, h, w = x.shape
ch, cw = to_2tuple(crop_size)
crops_x = []
crops_y = []
for i in range(crop_num):
sh = np.random.randint(0, h - ch)
sw = np.random.randint(0, w - cw)
crops_x.append(x[..., sh : sh + ch, sw : sw + cw])
crops_y.append(y[..., sh : sh + ch, sw : sw + cw])
crops_x = torch.stack(crops_x, dim=1)
crops_y = torch.stack(crops_y, dim=1)
return crops_x.reshape(b * crop_num, c, ch, cw), crops_y.reshape(
b * crop_num, c, ch, cw
)
class SaveOutput:
def __init__(self):
self.outputs = {}
def __call__(self, module, module_in, module_out):
if module_out.device in self.outputs.keys():
self.outputs[module_out.device].append(module_out)
else:
self.outputs[module_out.device] = [module_out]
def clear(self, device):
self.outputs[device] = []
class DeformFusion(nn.Module):
def __init__(
self,
patch_size=8,
in_channels=768 * 5,
cnn_channels=256 * 3,
out_channels=256 * 3,
):
super().__init__()
# in_channels, out_channels, kernel_size, stride, padding
self.d_hidn = 512
if patch_size == 8:
stride = 1
else:
stride = 2
self.conv_offset = nn.Conv2d(in_channels, 2 * 3 * 3, 3, 1, 1)
self.deform = DeformConv2d(cnn_channels, out_channels, 3, 1, 1)
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels=out_channels,
out_channels=self.d_hidn,
kernel_size=3,
padding=1,
stride=2,
),
nn.ReLU(),
nn.Conv2d(
in_channels=self.d_hidn,
out_channels=out_channels,
kernel_size=3,
padding=1,
stride=stride,
),
)
def forward(self, cnn_feat, vit_feat):
vit_feat = F.interpolate(vit_feat, size=cnn_feat.shape[-2:], mode="nearest")
offset = self.conv_offset(vit_feat)
deform_feat = self.deform(cnn_feat, offset)
deform_feat = self.conv1(deform_feat)
return deform_feat
class Pixel_Prediction(nn.Module):
def __init__(self, inchannels=768 * 5 + 256 * 3, outchannels=256, d_hidn=1024):
super().__init__()
self.d_hidn = d_hidn
self.down_channel = nn.Conv2d(inchannels, outchannels, kernel_size=1)
self.feat_smoothing = nn.Sequential(
nn.Conv2d(
in_channels=256 * 3, out_channels=self.d_hidn, kernel_size=3, padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=self.d_hidn, out_channels=512, kernel_size=3, padding=1
),
)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(),
)
self.conv_attent = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1), nn.Sigmoid()
)
self.conv = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1),
)
def forward(self, f_dis, f_ref, cnn_dis, cnn_ref):
f_dis = torch.cat((f_dis, cnn_dis), 1)
f_ref = torch.cat((f_ref, cnn_ref), 1)
f_dis = self.down_channel(f_dis)
f_ref = self.down_channel(f_ref)
f_cat = torch.cat((f_dis - f_ref, f_dis, f_ref), 1)
feat_fused = self.feat_smoothing(f_cat)
feat = self.conv1(feat_fused)
f = self.conv(feat)
w = self.conv_attent(feat)
pred = (f * w).sum(dim=-1).sum(dim=-1) / w.sum(dim=-1).sum(dim=-1)
return pred
@ARCH_REGISTRY.register()
class AHIQ(nn.Module):
def __init__(
self,
num_crop=20,
crop_size=224,
default_mean=[0.485, 0.456, 0.406],
default_std=[0.229, 0.224, 0.225],
pretrained=True,
pretrained_model_path=None,
):
super().__init__()
self.resnet50 = timm.create_model("resnet50", pretrained=True)
self.vit = timm.create_model("vit_base_patch8_224", pretrained=True)
self.fix_network(self.resnet50)
self.fix_network(self.vit)
self.deform_net = DeformFusion()
self.regressor = Pixel_Prediction()
# register hook to get intermediate features
self.init_saveoutput()
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
if pretrained_model_path is not None:
load_pretrained_network(
self, pretrained_model_path, True, weight_keys="params"
)
elif pretrained:
weight_path = load_file_from_url(default_model_urls["pipal"])
checkpoint = torch.load(weight_path)
self.regressor.load_state_dict(checkpoint["regressor_model_state_dict"])
self.deform_net.load_state_dict(checkpoint["deform_net_model_state_dict"])
self.eps = 1e-12
self.crops = num_crop
self.crop_size = crop_size
def init_saveoutput(self):
self.save_output = SaveOutput()
hook_handles = []
for layer in self.resnet50.modules():
if isinstance(layer, Bottleneck):
handle = layer.register_forward_hook(self.save_output)
hook_handles.append(handle)
for layer in self.vit.modules():
if isinstance(layer, Block):
handle = layer.register_forward_hook(self.save_output)
hook_handles.append(handle)
def fix_network(self, model):
for p in model.parameters():
p.requires_grad = False
def preprocess(self, x):
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
return x
@torch.no_grad()
def get_vit_feature(self, x):
self.vit(x)
feat = torch.cat(
(
self.save_output.outputs[x.device][0][:, 1:, :],
self.save_output.outputs[x.device][1][:, 1:, :],
self.save_output.outputs[x.device][2][:, 1:, :],
self.save_output.outputs[x.device][3][:, 1:, :],
self.save_output.outputs[x.device][4][:, 1:, :],
),
dim=2,
)
self.save_output.clear(x.device)
return feat
@torch.no_grad()
def get_resnet_feature(self, x):
self.resnet50(x)
feat = torch.cat(
(
self.save_output.outputs[x.device][0],
self.save_output.outputs[x.device][1],
self.save_output.outputs[x.device][2],
),
dim=1,
)
self.save_output.clear(x.device)
return feat
def regress_score(self, dis, ref):
self.resnet50.eval()
self.vit.eval()
dis = self.preprocess(dis)
ref = self.preprocess(ref)
vit_dis = self.get_vit_feature(dis)
vit_ref = self.get_vit_feature(ref)
B, N, C = vit_ref.shape
H, W = 28, 28
vit_ref = vit_ref.transpose(1, 2).view(B, C, H, W)
vit_dis = vit_dis.transpose(1, 2).view(B, C, H, W)
cnn_dis = self.get_resnet_feature(dis)
cnn_ref = self.get_resnet_feature(ref)
cnn_dis = self.deform_net(cnn_dis, vit_ref)
cnn_ref = self.deform_net(cnn_ref, vit_ref)
score = self.regressor(vit_dis, vit_ref, cnn_dis, cnn_ref)
return score
def forward(self, x, y):
bsz = x.shape[0]
if self.crops > 1 and not self.training:
x, y = random_crop(x, y, self.crop_size, self.crops)
score = self.regress_score(x, y)
score = score.reshape(bsz, self.crops, 1)
score = score.mean(dim=1)
else:
score = self.regress_score(x, y)
return score
| 8,605 | 30.992565 | 119 | py |
BVQI | BVQI-master/pyiqa/archs/paq2piq_arch.py | r"""Paq2piq metric, proposed by
Ying, Zhenqiang, Haoran Niu, Praful Gupta, Dhruv Mahajan, Deepti Ghadiyaram, and Alan Bovik.
"From patches to pictures (PaQ-2-PiQ): Mapping the perceptual space of picture quality."
In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 3575-3585. 2020.
Ref url: https://github.com/baidut/paq2piq/blob/master/paq2piq/model.py
Modified by: Chaofeng Chen (https://github.com/chaofengc)
"""
import torch
import torch.nn as nn
import torchvision as tv
from torchvision.ops import RoIPool
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/"
"P2P_RoIPoolModel-fit.10.bs.120-ca69882e.pth",
}
class AdaptiveConcatPool2d(nn.Module):
def __init__(self, sz=None):
super().__init__()
sz = sz or (1, 1)
self.ap = nn.AdaptiveAvgPool2d(sz)
self.mp = nn.AdaptiveMaxPool2d(sz)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
@ARCH_REGISTRY.register()
class PAQ2PIQ(nn.Module):
def __init__(
self, backbone="resnet18", pretrained=True, pretrained_model_path=None
):
super(PAQ2PIQ, self).__init__()
if backbone == "resnet18":
model = tv.models.resnet18(pretrained=False)
cut = -2
spatial_scale = 1 / 32
self.blk_size = 20, 20
self.model_type = self.__class__.__name__
self.body = nn.Sequential(*list(model.children())[:cut])
self.head = nn.Sequential(
AdaptiveConcatPool2d(),
nn.Flatten(),
nn.BatchNorm1d(
1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
),
nn.Dropout(p=0.25, inplace=False),
nn.Linear(in_features=1024, out_features=512, bias=True),
nn.ReLU(inplace=True),
nn.BatchNorm1d(
512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
),
nn.Dropout(p=0.5, inplace=False),
nn.Linear(in_features=512, out_features=1, bias=True),
)
self.roi_pool = RoIPool((2, 2), spatial_scale)
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path)
elif pretrained:
load_pretrained_network(self, default_model_urls["url"])
def forward(self, x):
im_data = x
batch_size = im_data.shape[0]
feats = self.body(im_data)
global_rois = torch.tensor([0, 0, x.shape[-1], x.shape[-2]]).reshape(1, 4).to(x)
feats = self.roi_pool(feats, [global_rois] * batch_size)
preds = self.head(feats)
return preds.view(batch_size, -1)
| 2,853 | 32.186047 | 106 | py |
BVQI | BVQI-master/pyiqa/archs/ckdn_arch.py | """CKDN model.
Created by: Chaofeng Chen (https://github.com/chaofengc)
Refer to:
https://github.com/researchmm/CKDN.
"""
import math
import torch
import torch.nn as nn
import torchvision as tv
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/CKDN_model_best-38b27dc6.pth"
}
model_urls = {
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
self.k = 3
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.head = 8
self.qse_1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.qse_2 = self._make_layer(block, 64, layers[0])
self.csp = self._make_layer(block, 128, layers[1], stride=2, dilate=False)
self.inplanes = 64
self.dte_1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.dte_2 = self._make_layer(block, 64, layers[0])
self.aux_csp = self._make_layer(block, 128, layers[1], stride=2, dilate=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc_ = nn.Sequential(
nn.Linear((512) * 1 * 1, 2048),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(2048, 2048),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(2048, 1),
)
self.fc1_ = nn.Sequential(
nn.Linear((512) * 1 * 1, 2048),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(2048, 2048),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(2048, 1),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def forward(self, x, y):
rest1 = x
dist1 = y
rest1 = self.qse_2(self.maxpool(self.qse_1(rest1)))
dist1 = self.dte_2(self.maxpool(self.dte_1(dist1)))
x = rest1 - dist1
x = self.csp(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
dr = torch.sigmoid(self.fc_(x))
return dr
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
keys = state_dict.keys()
for key in list(keys):
if "conv1" in key:
state_dict[key.replace("conv1", "qse_1")] = state_dict[key]
state_dict[key.replace("conv1", "dte_1")] = state_dict[key]
if "layer1" in key:
state_dict[key.replace("layer1", "qse_2")] = state_dict[key]
state_dict[key.replace("layer1", "dte_2")] = state_dict[key]
if "layer2" in key:
state_dict[key.replace("layer2", "csp")] = state_dict[key]
state_dict[key.replace("layer2", "aux_csp")] = state_dict[key]
model.load_state_dict(state_dict, strict=False)
return model
@ARCH_REGISTRY.register()
class CKDN(nn.Module):
r"""CKDN metric.
Args:
pretrained_model_path (String): The model path.
use_default_preprocess (Boolean): Whether use default preprocess, default: True.
default_mean (tuple): The mean value.
default_std (tuple): The std value.
Reference:
Zheng, Heliang, Huan Yang, Jianlong Fu, Zheng-Jun Zha, and Jiebo Luo.
"Learning conditional knowledge distillation for degraded-reference image
quality assessment." In Proceedings of the IEEE/CVF International Conference
on Computer Vision (ICCV), pp. 10242-10251. 2021.
"""
def __init__(
self,
pretrained=True,
pretrained_model_path=None,
use_default_preprocess=True,
default_mean=(0.485, 0.456, 0.406),
default_std=(0.229, 0.224, 0.225),
**kwargs,
):
super().__init__()
self.net = _resnet("resnet50", Bottleneck, [3, 4, 6, 3], True, True, **kwargs)
self.use_default_preprocess = use_default_preprocess
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path)
elif pretrained:
load_pretrained_network(self, default_model_urls["url"])
def _default_preprocess(self, x, y):
"""default preprocessing of CKDN: https://github.com/researchmm/CKDN
Useful when using this metric as losses.
Results are slightly different due to different resize behavior of PIL Image and pytorch interpolate function.
Args:
x, y:
shape, (N, C, H, W) in RGB format;
value range, 0 ~ 1
"""
scaled_size = int(math.floor(288 / 0.875))
x = tv.transforms.functional.resize(
x, scaled_size, tv.transforms.InterpolationMode.BICUBIC
)
y = tv.transforms.functional.resize(
y, scaled_size, tv.transforms.InterpolationMode.NEAREST
)
x = tv.transforms.functional.center_crop(x, 288)
y = tv.transforms.functional.center_crop(y, 288)
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
y = (y - self.default_mean.to(y)) / self.default_std.to(y)
return x, y
def forward(self, x, y):
r"""Compute IQA using CKDN model.
Args:
x: An input tensor with (N, C, H, W) shape. RGB channel order for colour images.
y: An reference tensor with (N, C, H, W) shape. RGB channel order for colour images.
Returns:
Value of CKDN model.
"""
if self.use_default_preprocess:
x, y = self._default_preprocess(x, y)
return self.net(x, y)
| 12,466 | 30.803571 | 118 | py |
BVQI | BVQI-master/pyiqa/archs/dists_arch.py | r"""DISTS metric
Created by: https://github.com/dingkeyan93/DISTS/blob/master/DISTS_pytorch/DISTS_pt.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/DISTS_weights-f5e65c96.pth"
}
class L2pooling(nn.Module):
def __init__(self, filter_size=5, stride=2, channels=None, pad_off=0):
super(L2pooling, self).__init__()
self.padding = (filter_size - 2) // 2
self.stride = stride
self.channels = channels
a = np.hanning(filter_size)[1:-1]
g = torch.Tensor(a[:, None] * a[None, :])
g = g / torch.sum(g)
self.register_buffer(
"filter", g[None, None, :, :].repeat((self.channels, 1, 1, 1))
)
def forward(self, input):
input = input ** 2
out = F.conv2d(
input,
self.filter,
stride=self.stride,
padding=self.padding,
groups=input.shape[1],
)
return (out + 1e-12).sqrt()
@ARCH_REGISTRY.register()
class DISTS(torch.nn.Module):
r"""DISTS model.
Args:
pretrained_model_path (String): Pretrained model path.
"""
def __init__(self, pretrained=True, pretrained_model_path=None, **kwargs):
"""Refer to offical code https://github.com/dingkeyan93/DISTS"""
super(DISTS, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features
self.stage1 = torch.nn.Sequential()
self.stage2 = torch.nn.Sequential()
self.stage3 = torch.nn.Sequential()
self.stage4 = torch.nn.Sequential()
self.stage5 = torch.nn.Sequential()
for x in range(0, 4):
self.stage1.add_module(str(x), vgg_pretrained_features[x])
self.stage2.add_module(str(4), L2pooling(channels=64))
for x in range(5, 9):
self.stage2.add_module(str(x), vgg_pretrained_features[x])
self.stage3.add_module(str(9), L2pooling(channels=128))
for x in range(10, 16):
self.stage3.add_module(str(x), vgg_pretrained_features[x])
self.stage4.add_module(str(16), L2pooling(channels=256))
for x in range(17, 23):
self.stage4.add_module(str(x), vgg_pretrained_features[x])
self.stage5.add_module(str(23), L2pooling(channels=512))
for x in range(24, 30):
self.stage5.add_module(str(x), vgg_pretrained_features[x])
for param in self.parameters():
param.requires_grad = False
self.register_buffer(
"mean", torch.tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1)
)
self.register_buffer(
"std", torch.tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1)
)
self.chns = [3, 64, 128, 256, 512, 512]
self.register_parameter(
"alpha", nn.Parameter(torch.randn(1, sum(self.chns), 1, 1))
)
self.register_parameter(
"beta", nn.Parameter(torch.randn(1, sum(self.chns), 1, 1))
)
self.alpha.data.normal_(0.1, 0.01)
self.beta.data.normal_(0.1, 0.01)
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path, False)
elif pretrained:
load_pretrained_network(self, default_model_urls["url"], False)
def forward_once(self, x):
h = (x - self.mean) / self.std
h = self.stage1(h)
h_relu1_2 = h
h = self.stage2(h)
h_relu2_2 = h
h = self.stage3(h)
h_relu3_3 = h
h = self.stage4(h)
h_relu4_3 = h
h = self.stage5(h)
h_relu5_3 = h
return [x, h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3]
def forward(self, x, y):
r"""Compute IQA using DISTS model.
Args:
x: An input tensor with (N, C, H, W) shape. RGB channel order for colour images.
y: An reference tensor with (N, C, H, W) shape. RGB channel order for colour images.
Returns:
Value of DISTS model.
"""
feats0 = self.forward_once(x)
feats1 = self.forward_once(y)
dist1 = 0
dist2 = 0
c1 = 1e-6
c2 = 1e-6
w_sum = self.alpha.sum() + self.beta.sum()
alpha = torch.split(self.alpha / w_sum, self.chns, dim=1)
beta = torch.split(self.beta / w_sum, self.chns, dim=1)
for k in range(len(self.chns)):
x_mean = feats0[k].mean([2, 3], keepdim=True)
y_mean = feats1[k].mean([2, 3], keepdim=True)
S1 = (2 * x_mean * y_mean + c1) / (x_mean ** 2 + y_mean ** 2 + c1)
dist1 = dist1 + (alpha[k] * S1).sum(1, keepdim=True)
x_var = ((feats0[k] - x_mean) ** 2).mean([2, 3], keepdim=True)
y_var = ((feats1[k] - y_mean) ** 2).mean([2, 3], keepdim=True)
xy_cov = (feats0[k] * feats1[k]).mean(
[2, 3], keepdim=True
) - x_mean * y_mean
S2 = (2 * xy_cov + c2) / (x_var + y_var + c2)
dist2 = dist2 + (beta[k] * S2).sum(1, keepdim=True)
score = 1 - (dist1 + dist2).squeeze()
return score
| 5,418 | 33.515924 | 111 | py |
BVQI | BVQI-master/pyiqa/archs/inception.py | """
File from: https://github.com/mseitzer/pytorch-fid
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from .arch_util import load_pretrained_network
# Inception weights ported to Pytorch from
FID_WEIGHTS_URL = "https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth" # noqa: E501
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3, # Final average pooling features
}
def __init__(
self,
output_blocks=(DEFAULT_BLOCK_INDEX,),
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True,
):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, "Last possible output block index is 3"
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = _inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2),
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2),
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp, resize_input=False, normalize_input=False):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if resize_input:
x = F.interpolate(x, size=(299, 299), mode="bilinear", align_corners=False)
if normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
def _inception_v3(*args, **kwargs):
"""Wraps `torchvision.models.inception_v3`
Skips default weight inititialization if supported by torchvision version.
See https://github.com/mseitzer/pytorch-fid/issues/28.
"""
try:
version = tuple(map(int, torchvision.__version__.split(".")[:2]))
except ValueError:
# Just a caution against weird version strings
version = (0,)
if version >= (0, 6):
kwargs["init_weights"] = False
return torchvision.models.inception_v3(*args, **kwargs)
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
inception = _inception_v3(num_classes=1008, aux_logits=False, pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
load_pretrained_network(inception, FID_WEIGHTS_URL)
return inception
class FIDInceptionA(torchvision.models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(
x, kernel_size=3, stride=1, padding=1, count_include_pad=False
)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(torchvision.models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(
x, kernel_size=3, stride=1, padding=1, count_include_pad=False
)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(torchvision.models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(
x, kernel_size=3, stride=1, padding=1, count_include_pad=False
)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(torchvision.models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
| 11,796 | 35.187117 | 140 | py |
BVQI | BVQI-master/pyiqa/archs/maniqa_swin.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from torch import nn
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = (
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(
B, H // window_size, W // window_size, window_size, window_size, -1
)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = (
coords_flatten[:, :, None] - coords_flatten[:, None, :]
) # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0
).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=0.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1],
-1,
) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(
1
).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}"
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinBlock(nn.Module):
r"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
input_resolution,
num_heads,
window_size=7,
shift_size=0,
dim_mlp=1024.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.dim_mlp = dim_mlp
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert (
0 <= self.shift_size < self.window_size
), "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = self.dim_mlp
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(
img_mask, self.window_size
) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(
attn_mask != 0, float(-100.0)
).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(
x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)
)
else:
shifted_x = x
# partition windows
x_windows = window_partition(
shifted_x, self.window_size
) # nW*B, window_size, window_size, C
x_windows = x_windows.view(
-1, self.window_size * self.window_size, C
) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(
x_windows, mask=self.attn_mask
) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(
shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)
)
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
dim,
input_resolution,
depth,
num_heads,
window_size=7,
dim_mlp=1024,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
):
super().__init__()
self.dim = dim
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList(
[
SwinBlock(
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
dim_mlp=dim_mlp,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i]
if isinstance(drop_path, list)
else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(
input_resolution, dim=dim, norm_layer=norm_layer
)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
x = rearrange(
x,
"b (h w) c -> b c h w",
h=self.input_resolution[0],
w=self.input_resolution[1],
)
x = F.relu(self.conv(x))
x = rearrange(x, "b c h w -> b (h w) c")
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class SwinTransformer(nn.Module):
def __init__(
self,
patches_resolution,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
embed_dim=256,
drop=0.1,
drop_rate=0.0,
drop_path_rate=0.1,
dropout=0.0,
window_size=7,
dim_mlp=1024,
qkv_bias=True,
qk_scale=None,
attn_drop_rate=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
scale=0.8,
**kwargs,
):
super().__init__()
self.scale = scale
self.embed_dim = embed_dim
self.depths = depths
self.num_heads = num_heads
self.window_size = window_size
self.dropout = nn.Dropout(p=drop)
self.num_features = embed_dim
self.num_layers = len(depths)
self.patches_resolution = (patches_resolution[0], patches_resolution[1])
self.downsample = nn.Conv2d(
self.embed_dim, self.embed_dim, kernel_size=3, stride=2, padding=1
)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=self.embed_dim,
input_resolution=patches_resolution,
depth=self.depths[i_layer],
num_heads=self.num_heads[i_layer],
window_size=self.window_size,
dim_mlp=dim_mlp,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=dropout,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(self.depths[:i_layer]) : sum(self.depths[: i_layer + 1])
],
norm_layer=norm_layer,
downsample=downsample,
use_checkpoint=use_checkpoint,
)
self.layers.append(layer)
def forward(self, x):
x = self.dropout(x)
x = rearrange(x, "b c h w -> b (h w) c")
for layer in self.layers:
_x = x
x = layer(x)
x = self.scale * x + _x
x = rearrange(
x,
"b (h w) c -> b c h w",
h=self.patches_resolution[0],
w=self.patches_resolution[1],
)
return x
| 17,834 | 32.461538 | 104 | py |
BVQI | BVQI-master/pyiqa/archs/musiq_arch.py | r"""MUSIQ model.
Implemented by: Chaofeng Chen (https://github.com/chaofengc)
Refer to:
Official code from: https://github.com/google-research/google-research/tree/master/musiq
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from pyiqa.data.multiscale_trans_util import get_multiscale_patches
from pyiqa.utils.registry import ARCH_REGISTRY
from .arch_util import (
ExactPadding2d,
dist_to_mos,
excact_padding_2d,
load_pretrained_network,
)
default_model_urls = {
"ava": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/musiq_ava_ckpt-e8d3f067.pth",
"koniq10k": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/musiq_koniq_ckpt-e95806b9.pth",
"spaq": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/musiq_spaq_ckpt-358bb6af.pth",
"paq2piq": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/musiq_paq2piq_ckpt-364c0c84.pth",
"imagenet_pretrain": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/musiq_imagenet_pretrain-51d9b0a5.pth",
}
class StdConv(nn.Conv2d):
"""
Reference: https://github.com/joe-siyuan-qiao/WeightStandardization
"""
def forward(self, x):
# implement same padding
x = excact_padding_2d(x, self.kernel_size, self.stride, mode="same")
weight = self.weight
weight = weight - weight.mean((1, 2, 3), keepdim=True)
weight = weight / (weight.std((1, 2, 3), keepdim=True) + 1e-5)
return F.conv2d(x, weight, self.bias, self.stride)
class Bottleneck(nn.Module):
def __init__(self, inplanes, outplanes, stride=1):
super().__init__()
width = inplanes
self.conv1 = StdConv(inplanes, width, 1, 1, bias=False)
self.gn1 = nn.GroupNorm(32, width, eps=1e-4)
self.conv2 = StdConv(width, width, 3, 1, bias=False)
self.gn2 = nn.GroupNorm(32, width, eps=1e-4)
self.conv3 = StdConv(width, outplanes, 1, 1, bias=False)
self.gn3 = nn.GroupNorm(32, outplanes, eps=1e-4)
self.relu = nn.ReLU(True)
self.needs_projection = inplanes != outplanes or stride != 1
if self.needs_projection:
self.conv_proj = StdConv(inplanes, outplanes, 1, stride, bias=False)
self.gn_proj = nn.GroupNorm(32, outplanes, eps=1e-4)
def forward(self, x):
identity = x
if self.needs_projection:
identity = self.gn_proj(self.conv_proj(identity))
x = self.relu(self.gn1(self.conv1(x)))
x = self.relu(self.gn2(self.conv2(x)))
x = self.gn3(self.conv3(x))
out = self.relu(x + identity)
return out
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, dim, num_heads=6, bias=False, attn_drop=0.0, out_drop=0.0):
super().__init__()
assert dim % num_heads == 0, "dim should be divisible by num_heads"
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.query = nn.Linear(dim, dim, bias=bias)
self.key = nn.Linear(dim, dim, bias=bias)
self.value = nn.Linear(dim, dim, bias=bias)
self.attn_drop = nn.Dropout(attn_drop)
self.out = nn.Linear(dim, dim)
self.out_drop = nn.Dropout(out_drop)
def forward(self, x, mask=None):
B, N, C = x.shape
q = self.query(x)
k = self.key(x)
v = self.value(x)
q = q.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = k.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
v = v.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = (q @ k.transpose(-2, -1)) * self.scale
if mask is not None:
mask_h = mask.reshape(B, 1, N, 1)
mask_w = mask.reshape(B, 1, 1, N)
mask2d = mask_h * mask_w
attn = attn.masked_fill(mask2d == 0, -1e3)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.out(x)
x = self.out_drop(x)
return x
class TransformerBlock(nn.Module):
def __init__(
self,
dim,
mlp_dim,
num_heads,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim, eps=1e-6)
self.attention = MultiHeadAttention(
dim, num_heads, bias=True, attn_drop=attn_drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim, eps=1e-6)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_dim, act_layer=act_layer, drop=drop
)
def forward(self, x, inputs_masks):
y = self.norm1(x)
y = self.attention(y, inputs_masks)
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class AddHashSpatialPositionEmbs(nn.Module):
"""Adds learnable hash-based spatial embeddings to the inputs."""
def __init__(self, spatial_pos_grid_size, dim):
super().__init__()
self.position_emb = nn.parameter.Parameter(
torch.randn(1, spatial_pos_grid_size * spatial_pos_grid_size, dim)
)
nn.init.normal_(self.position_emb, std=0.02)
def forward(self, inputs, inputs_positions):
return inputs + self.position_emb.squeeze(0)[inputs_positions.long()]
class AddScaleEmbs(nn.Module):
"""Adds learnable scale embeddings to the inputs."""
def __init__(self, num_scales, dim):
super().__init__()
self.scale_emb = nn.parameter.Parameter(torch.randn(num_scales, dim))
nn.init.normal_(self.scale_emb, std=0.02)
def forward(self, inputs, inputs_scale_positions):
return inputs + self.scale_emb[inputs_scale_positions.long()]
class TransformerEncoder(nn.Module):
def __init__(
self,
input_dim,
mlp_dim=1152,
attention_dropout_rate=0.0,
dropout_rate=0,
num_heads=6,
num_layers=14,
num_scales=3,
spatial_pos_grid_size=10,
use_scale_emb=True,
use_sinusoid_pos_emb=False,
):
super().__init__()
self.use_scale_emb = use_scale_emb
self.posembed_input = AddHashSpatialPositionEmbs(
spatial_pos_grid_size, input_dim
)
self.scaleembed_input = AddScaleEmbs(num_scales, input_dim)
self.cls = nn.parameter.Parameter(torch.zeros(1, 1, input_dim))
self.dropout = nn.Dropout(dropout_rate)
self.encoder_norm = nn.LayerNorm(input_dim, eps=1e-6)
self.transformer = nn.ModuleDict()
for i in range(num_layers):
self.transformer[f"encoderblock_{i}"] = TransformerBlock(
input_dim, mlp_dim, num_heads, dropout_rate, attention_dropout_rate
)
def forward(
self, x, inputs_spatial_positions, inputs_scale_positions, inputs_masks
):
n, _, c = x.shape
x = self.posembed_input(x, inputs_spatial_positions)
if self.use_scale_emb:
x = self.scaleembed_input(x, inputs_scale_positions)
cls_token = self.cls.repeat(n, 1, 1)
x = torch.cat([cls_token, x], dim=1)
cls_mask = torch.ones((n, 1)).to(inputs_masks)
inputs_mask = torch.cat([cls_mask, inputs_masks], dim=1)
x = self.dropout(x)
for k, m in self.transformer.items():
x = m(x, inputs_mask)
x = self.encoder_norm(x)
return x
@ARCH_REGISTRY.register()
class MUSIQ(nn.Module):
r"""
Evaluation:
- n_crops: currently only test with 1 crop evaluation
Reference:
Ke, Junjie, Qifei Wang, Yilin Wang, Peyman Milanfar, and Feng Yang.
"Musiq: Multi-scale image quality transformer." In Proceedings of the
IEEE/CVF International Conference on Computer Vision (ICCV), pp. 5148-5157. 2021.
"""
def __init__(
self,
patch_size=32,
num_class=1,
hidden_size=384,
mlp_dim=1152,
attention_dropout_rate=0.0,
dropout_rate=0,
num_heads=6,
num_layers=14,
num_scales=3,
spatial_pos_grid_size=10,
use_scale_emb=True,
use_sinusoid_pos_emb=False,
pretrained=True,
pretrained_model_path=None,
# data opts
longer_side_lengths=[224, 384],
max_seq_len_from_original_res=-1,
):
super(MUSIQ, self).__init__()
resnet_token_dim = 64
self.patch_size = patch_size
self.data_preprocess_opts = {
"patch_size": patch_size,
"patch_stride": patch_size,
"hse_grid_size": spatial_pos_grid_size,
"longer_side_lengths": longer_side_lengths,
"max_seq_len_from_original_res": max_seq_len_from_original_res,
}
# set num_class to 10 if pretrained model used AVA dataset
# if not specified pretrained dataset, use AVA for default
if pretrained_model_path is None and pretrained:
url_key = "ava" if isinstance(pretrained, bool) else pretrained
num_class = 10 if url_key == "ava" else num_class
pretrained_model_path = default_model_urls[url_key]
self.conv_root = StdConv(3, resnet_token_dim, 7, 2, bias=False)
self.gn_root = nn.GroupNorm(32, resnet_token_dim, eps=1e-6)
self.root_pool = nn.Sequential(
nn.ReLU(True),
ExactPadding2d(3, 2, mode="same"),
nn.MaxPool2d(3, 2),
)
token_patch_size = patch_size // 4
self.block1 = Bottleneck(resnet_token_dim, resnet_token_dim * 4)
self.embedding = nn.Linear(
resnet_token_dim * 4 * token_patch_size ** 2, hidden_size
)
self.transformer_encoder = TransformerEncoder(
hidden_size,
mlp_dim,
attention_dropout_rate,
dropout_rate,
num_heads,
num_layers,
num_scales,
spatial_pos_grid_size,
use_scale_emb,
use_sinusoid_pos_emb,
)
if num_class > 1:
self.head = nn.Sequential(
nn.Linear(hidden_size, num_class),
nn.Softmax(dim=-1),
)
else:
self.head = nn.Linear(hidden_size, num_class)
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path, True)
def forward(self, x, return_mos=True, return_dist=False):
if not self.training:
# normalize inputs to [-1, 1] as the official code
x = (x - 0.5) * 2
x = get_multiscale_patches(x, **self.data_preprocess_opts)
assert len(x.shape) in [3, 4]
if len(x.shape) == 4:
b, num_crops, seq_len, dim = x.shape
x = x.reshape(b * num_crops, seq_len, dim)
else:
b, seq_len, dim = x.shape
num_crops = 1
inputs_spatial_positions = x[:, :, -3]
inputs_scale_positions = x[:, :, -2]
inputs_masks = x[:, :, -1].bool()
x = x[:, :, :-3]
x = x.reshape(-1, 3, self.patch_size, self.patch_size)
x = self.conv_root(x)
x = self.gn_root(x)
x = self.root_pool(x)
x = self.block1(x)
# to match tensorflow channel order
x = x.permute(0, 2, 3, 1)
x = x.reshape(b, seq_len, -1)
x = self.embedding(x)
x = self.transformer_encoder(
x, inputs_spatial_positions, inputs_scale_positions, inputs_masks
)
q = self.head(x[:, 0])
q = q.reshape(b, num_crops, -1)
q = q.mean(dim=1) # for multiple crops evaluation
mos = dist_to_mos(q)
return_list = []
if return_mos:
return_list.append(mos)
if return_dist:
return_list.append(q)
if len(return_list) > 1:
return return_list
else:
return return_list[0]
| 13,679 | 31.494062 | 136 | py |
BVQI | BVQI-master/pyiqa/archs/nlpd_arch.py | r"""NLPD Metric
Created by: https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/NLPD.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
Matlab code from https://www.cns.nyu.edu/~lcv/NLPyr/NLP_dist.m;
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms.functional as tf
from pyiqa.archs.arch_util import ExactPadding2d
from pyiqa.archs.ssim_arch import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
LAPLACIAN_FILTER = np.array(
[
[0.0025, 0.0125, 0.0200, 0.0125, 0.0025],
[0.0125, 0.0625, 0.1000, 0.0625, 0.0125],
[0.0200, 0.1000, 0.1600, 0.1000, 0.0200],
[0.0125, 0.0625, 0.1000, 0.0625, 0.0125],
[0.0025, 0.0125, 0.0200, 0.0125, 0.0025],
],
dtype=np.float32,
)
@ARCH_REGISTRY.register()
class NLPD(nn.Module):
r"""Normalised lapalcian pyramid distance
Args:
channels: Number of channel expected to calculate.
test_y_channel: Boolean, whether to use y channel on ycbcr which mimics official matlab code.
References:
Laparra, Valero, Johannes Ballé, Alexander Berardino, and Eero P. Simoncelli.
"Perceptual image quality assessment using a normalized Laplacian pyramid."
Electronic Imaging 2016, no. 16 (2016): 1-6.
"""
def __init__(self, channels=1, test_y_channel=True, k=6, filt=None):
super(NLPD, self).__init__()
if filt is None:
filt = np.reshape(
np.tile(LAPLACIAN_FILTER, (channels, 1, 1)), (channels, 1, 5, 5)
)
self.k = k
self.channels = channels
self.test_y_channel = test_y_channel
self.filt = nn.Parameter(torch.Tensor(filt), requires_grad=False)
self.dn_filts, self.sigmas = self.DN_filters()
self.pad_zero_one = nn.ZeroPad2d(1)
self.pad_zero_two = nn.ZeroPad2d(2)
self.pad_sym = ExactPadding2d(5, mode="symmetric")
self.rep_one = nn.ReplicationPad2d(1)
self.ps = nn.PixelShuffle(2)
def DN_filters(self):
r"""Define parameters for the divisive normalization"""
sigmas = [0.0248, 0.0185, 0.0179, 0.0191, 0.0220, 0.2782]
dn_filts = []
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0.1011, 0], [0.1493, 0, 0.1460], [0, 0.1015, 0.0]]
* self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0.0757, 0], [0.1986, 0, 0.1846], [0, 0.0837, 0]]
* self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0.0477, 0], [0.2138, 0, 0.2243], [0, 0.0467, 0]]
* self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0, 0], [0.2503, 0, 0.2616], [0, 0, 0]] * self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0, 0], [0.2598, 0, 0.2552], [0, 0, 0]] * self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts.append(
torch.Tensor(
np.reshape(
[[0, 0, 0], [0.2215, 0, 0.0717], [0, 0, 0]] * self.channels,
(self.channels, 1, 3, 3),
).astype(np.float32)
)
)
dn_filts = nn.ParameterList(
[nn.Parameter(x, requires_grad=False) for x in dn_filts]
)
sigmas = nn.ParameterList(
[
nn.Parameter(torch.Tensor(np.array(x)), requires_grad=False)
for x in sigmas
]
)
return dn_filts, sigmas
def pyramid(self, im):
r"""Compute Laplacian Pyramid
Args:
im: An input tensor. Shape :math:`(N, C, H, W)`.
"""
out = []
J = im
pyr = []
for i in range(0, self.k - 1):
# Downsample. Official matlab code use 'symmetric' for padding.
I = F.conv2d(
self.pad_sym(J), self.filt, stride=2, padding=0, groups=self.channels
)
# for each dimension, check if the upsampled version has to be odd.
odd_h, odd_w = 2 * I.size(2) - J.size(2), 2 * I.size(3) - J.size(3)
# Upsample. Official matlab code interpolate '0' to upsample.
I_pad = self.rep_one(I)
I_rep1, I_rep2, I_rep3 = (
torch.zeros_like(I_pad),
torch.zeros_like(I_pad),
torch.zeros_like(I_pad),
)
R = torch.cat([I_pad * 4, I_rep1, I_rep2, I_rep3], dim=1)
I_up = self.ps(R)
I_up_conv = F.conv2d(
self.pad_zero_two(I_up),
self.filt,
stride=1,
padding=0,
groups=self.channels,
)
I_up_conv = I_up_conv[
:, :, 2 : (I_up.shape[2] - 2 - odd_h), 2 : (I_up.shape[3] - 2 - odd_w)
]
out = J - I_up_conv
# NLP Transformation, conv2 in matlab rotate filters by 180 degrees.
out_conv = F.conv2d(
self.pad_zero_one(torch.abs(out)),
tf.rotate(self.dn_filts[i], 180),
stride=1,
groups=self.channels,
)
out_norm = out / (self.sigmas[i] + out_conv)
pyr.append(out_norm)
J = I
# NLP Transformation for top layer, the coarest level contains the residual low pass image
out_conv = F.conv2d(
self.pad_zero_one(torch.abs(J)),
tf.rotate(self.dn_filts[-1], 180),
stride=1,
groups=self.channels,
)
out_norm = J / (self.sigmas[-1] + out_conv)
pyr.append(out_norm)
return pyr
def nlpd(self, x1, x2):
r"""Compute Normalised lapalcian pyramid distance for a batch of images.
Args:
x1: An input tensor. Shape :math:`(N, C, H, W)`.
x2: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Index of similarity betwen two images. Usually in [0, 1] interval.
"""
assert (self.test_y_channel and self.channels == 1) or (
not self.test_y_channel and self.channels == 3
), "Number of channel and convert to YCBCR should be match"
if self.test_y_channel and self.channels == 1:
x1 = to_y_channel(x1)
x2 = to_y_channel(x2)
y1 = self.pyramid(x1)
y2 = self.pyramid(x2)
total = []
for z1, z2 in zip(y1, y2):
diff = (z1 - z2) ** 2
sqrt = torch.sqrt(torch.mean(diff, (1, 2, 3)))
total.append(sqrt)
score = torch.stack(total, dim=1).mean(1)
return score
def forward(self, X, Y):
"""Computation of NLPD metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of NLPD metric in [0, 1] range.
"""
assert (
X.shape == Y.shape
), f"Input {X.shape} and reference images should have the same shape"
score = self.nlpd(X, Y)
return score
| 7,884 | 31.854167 | 101 | py |
BVQI | BVQI-master/pyiqa/archs/mad_arch.py | r"""MAD Metric
Created by: https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/MAD.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Note:
Offical matlab code is not available;
Pytorch version >= 1.8.0;
"""
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy.fft import fftshift
from pyiqa.matlab_utils import math_util
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
MAX = nn.MaxPool2d((2, 2), stride=1, padding=1)
def extract_patches_2d(
img: torch.Tensor,
patch_shape: list = [64, 64],
step: list = [27, 27],
batch_first: bool = True,
keep_last_patch: bool = False,
) -> torch.Tensor:
patch_H, patch_W = patch_shape[0], patch_shape[1]
if img.size(2) < patch_H:
num_padded_H_Top = (patch_H - img.size(2)) // 2
num_padded_H_Bottom = patch_H - img.size(2) - num_padded_H_Top
padding_H = nn.ConstantPad2d((0, 0, num_padded_H_Top, num_padded_H_Bottom), 0)
img = padding_H(img)
if img.size(3) < patch_W:
num_padded_W_Left = (patch_W - img.size(3)) // 2
num_padded_W_Right = patch_W - img.size(3) - num_padded_W_Left
padding_W = nn.ConstantPad2d((num_padded_W_Left, num_padded_W_Right, 0, 0), 0)
img = padding_W(img)
step_int = [0, 0]
step_int[0] = int(patch_H * step[0]) if (isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W * step[1]) if (isinstance(step[1], float)) else step[1]
patches_fold_H = img.unfold(2, patch_H, step_int[0])
if ((img.size(2) - patch_H) % step_int[0] != 0) and keep_last_patch:
patches_fold_H = torch.cat(
(
patches_fold_H,
img[
:,
:,
-patch_H:,
]
.permute(0, 1, 3, 2)
.unsqueeze(2),
),
dim=2,
)
patches_fold_HW = patches_fold_H.unfold(3, patch_W, step_int[1])
if ((img.size(3) - patch_W) % step_int[1] != 0) and keep_last_patch:
patches_fold_HW = torch.cat(
(
patches_fold_HW,
patches_fold_H[:, :, :, -patch_W:, :]
.permute(0, 1, 2, 4, 3)
.unsqueeze(3),
),
dim=3,
)
patches = patches_fold_HW.permute(2, 3, 0, 1, 4, 5)
patches = patches.reshape(-1, img.size(0), img.size(1), patch_H, patch_W)
if batch_first:
patches = patches.permute(1, 0, 2, 3, 4)
return patches
def make_csf(rows, cols, nfreq):
xvals = np.arange(-(cols - 1) / 2.0, (cols + 1) / 2.0)
yvals = np.arange(-(rows - 1) / 2.0, (rows + 1) / 2.0)
xplane, yplane = np.meshgrid(xvals, yvals) # generate mesh
plane = ((xplane + 1j * yplane) / cols) * 2 * nfreq
radfreq = np.abs(plane) # radial frequency
w = 0.7
s = (1 - w) / 2 * np.cos(4 * np.angle(plane)) + (1 + w) / 2
radfreq = radfreq / s
# Now generate the CSF
csf = 2.6 * (0.0192 + 0.114 * radfreq) * np.exp(-((0.114 * radfreq) ** 1.1))
csf[radfreq < 7.8909] = 0.9809
return np.transpose(csf)
def get_moments(d, sk=False):
# Return the first 4 moments of the data provided
mean = torch.mean(d, dim=[3, 4], keepdim=True)
diffs = d - mean
var = torch.mean(torch.pow(diffs, 2.0), dim=[3, 4], keepdim=True)
std = torch.pow(var + 1e-12, 0.5)
if sk:
zscores = diffs / std
skews = torch.mean(torch.pow(zscores, 3.0), dim=[3, 4], keepdim=True)
kurtoses = (
torch.mean(torch.pow(zscores, 4.0), dim=[3, 4], keepdim=True) - 3.0
) # excess kurtosis, should be 0 for Gaussian
return mean, std, skews, kurtoses
else:
return mean, std
def ical_stat(x, p=16, s=4):
B, C, H, W = x.shape
x1 = extract_patches_2d(x, patch_shape=[p, p], step=[s, s])
_, std, skews, kurt = get_moments(x1, sk=True)
STD = std.reshape(B, C, (H - (p - s)) // s, (W - (p - s)) // s)
SKEWS = skews.reshape(B, C, (H - (p - s)) // s, (W - (p - s)) // s)
KURT = kurt.reshape(B, C, (H - (p - s)) // s, (W - (p - s)) // s)
return STD, SKEWS, KURT # different with original version
def ical_std(x, p=16, s=4):
B, C, H, W = x.shape
x1 = extract_patches_2d(x, patch_shape=[p, p], step=[s, s])
mean, std = get_moments(x1)
mean = mean.reshape(B, C, (H - (p - s)) // s, (W - (p - s)) // s)
std = std.reshape(B, C, (H - (p - s)) // s, (W - (p - s)) // s)
return mean, std
def hi_index(ref_img, dst_img):
k = 0.02874
G = 0.5
C_slope = 1
Ci_thrsh = -5
Cd_thrsh = -5
ref = k * (ref_img + 1e-12) ** (2.2 / 3)
dst = k * (torch.abs(dst_img) + 1e-12) ** (2.2 / 3)
B, C, H, W = ref.shape
csf = make_csf(H, W, 32)
csf = (
torch.from_numpy(csf.reshape(1, 1, H, W, 1))
.float()
.repeat(1, C, 1, 1, 2)
.to(ref.device)
)
x = torch.fft.fft2(ref)
x1 = math_util.batch_fftshift2d(x)
x2 = math_util.batch_ifftshift2d(x1 * csf)
ref = torch.fft.ifft2(x2).real
x = torch.fft.fft2(dst)
x1 = math_util.batch_fftshift2d(x)
x2 = math_util.batch_ifftshift2d(x1 * csf)
dst = torch.fft.ifft2(x2).real
m1_1, std_1 = ical_std(ref)
B, C, H1, W1 = m1_1.shape
std_1 = (-MAX(-std_1) / 2)[:, :, :H1, :W1]
_, std_2 = ical_std(dst - ref)
BSIZE = 16
eps = 1e-12
Ci_ref = torch.log(torch.abs((std_1 + eps) / (m1_1 + eps)))
Ci_dst = torch.log(torch.abs((std_2 + eps) / (m1_1 + eps)))
Ci_dst = Ci_dst.masked_fill(m1_1 < G, -1000)
idx1 = (Ci_ref > Ci_thrsh) & (Ci_dst > (C_slope * (Ci_ref - Ci_thrsh) + Cd_thrsh))
idx2 = (Ci_ref <= Ci_thrsh) & (Ci_dst > Cd_thrsh)
msk = Ci_ref.clone()
msk = msk.masked_fill(~idx1, 0)
msk = msk.masked_fill(~idx2, 0)
msk[idx1] = Ci_dst[idx1] - (C_slope * (Ci_ref[idx1] - Ci_thrsh) + Cd_thrsh)
msk[idx2] = Ci_dst[idx2] - Cd_thrsh
win = (
torch.ones((1, 1, BSIZE, BSIZE)).repeat(C, 1, 1, 1).to(ref.device) / BSIZE ** 2
)
xx = (ref_img - dst_img) ** 2
lmse = F.conv2d(xx, win, stride=4, padding=0, groups=C)
mp = msk * lmse
B, C, H, W = mp.shape
return torch.norm(mp.reshape(B, C, -1), dim=2) / math.sqrt(H * W) * 200
def gaborconvolve(im):
nscale = 5 # Number of wavelet scales.
norient = 4 # Number of filter orientations.
minWaveLength = 3 # Wavelength of smallest scale filter.
mult = 3 # Scaling factor between successive filters.
sigmaOnf = 0.55 # Ratio of the standard deviation of the
wavelength = [
minWaveLength,
minWaveLength * mult,
minWaveLength * mult ** 2,
minWaveLength * mult ** 3,
minWaveLength * mult ** 4,
]
# Ratio of angular interval between filter orientations
dThetaOnSigma = 1.5
# Fourier transform of image
B, C, rows, cols = im.shape
# imagefft = torch.rfft(im,2, onesided=False)
imagefft = torch.fft.fft2(im)
# Pre-compute to speed up filter construction
x = np.ones((rows, 1)) * np.arange(-cols / 2.0, (cols / 2.0)) / (cols / 2.0)
y = np.dot(
np.expand_dims(np.arange(-rows / 2.0, (rows / 2.0)), 1),
np.ones((1, cols)) / (rows / 2.0),
)
# Matrix values contain *normalised* radius from centre.
radius = np.sqrt(x ** 2 + y ** 2)
# Get rid of the 0 radius value in the middle
radius[int(np.round(rows / 2 + 1)), int(np.round(cols / 2 + 1))] = 1
radius = np.log(radius + 1e-12)
# Matrix values contain polar angle.
theta = np.arctan2(-y, x)
sintheta = np.sin(theta)
costheta = np.cos(theta)
# Calculate the standard deviation
thetaSigma = math.pi / norient / dThetaOnSigma
logGabors = []
for s in range(nscale):
# Construct the filter - first calculate the radial filter component.
fo = 1.0 / wavelength[s] # Centre frequency of filter.
rfo = fo / 0.5 # Normalised radius from centre of frequency plane
# corresponding to fo.
tmp = -(2 * np.log(sigmaOnf) ** 2)
tmp2 = np.log(rfo)
logGabors.append(np.exp((radius - tmp2) ** 2 / tmp))
logGabors[s][int(np.round(rows / 2)), int(np.round(cols / 2))] = 0
E0 = [[], [], [], []]
for o in range(norient):
# Calculate filter angle.
angl = o * math.pi / norient
ds = sintheta * np.cos(angl) - costheta * np.sin(angl) # Difference in sine.
dc = costheta * np.cos(angl) + sintheta * np.sin(angl) # Difference in cosine.
dtheta = np.abs(np.arctan2(ds, dc)) # Absolute angular distance.
spread = np.exp(
(-(dtheta ** 2)) / (2 * thetaSigma ** 2)
) # Calculate the angular filter component.
for s in range(nscale):
filter = fftshift(logGabors[s] * spread)
filter = torch.from_numpy(filter).reshape(1, 1, rows, cols).to(im.device)
e0 = torch.fft.ifft2(imagefft * filter)
E0[o].append(torch.stack((e0.real, e0.imag), -1))
return E0
def lo_index(ref, dst):
gabRef = gaborconvolve(ref)
gabDst = gaborconvolve(dst)
s = [0.5 / 13.25, 0.75 / 13.25, 1 / 13.25, 5 / 13.25, 6 / 13.25]
mp = 0
for gb_i in range(4):
for gb_j in range(5):
stdref, skwref, krtref = ical_stat(math_util.abs(gabRef[gb_i][gb_j]))
stddst, skwdst, krtdst = ical_stat(math_util.abs(gabDst[gb_i][gb_j]))
mp = mp + s[gb_i] * (
torch.abs(stdref - stddst)
+ 2 * torch.abs(skwref - skwdst)
+ torch.abs(krtref - krtdst)
)
B, C, rows, cols = mp.shape
return torch.norm(mp.reshape(B, C, -1), dim=2) / np.sqrt(rows * cols)
@ARCH_REGISTRY.register()
class MAD(torch.nn.Module):
r"""Args:
channel: Number of input channel.
test_y_channel: bool, whether to use y channel on ycbcr which mimics official matlab code.
References:
Larson, Eric Cooper, and Damon Michael Chandler. "Most apparent distortion: full-reference
image quality assessment and the role of strategy." Journal of electronic imaging 19, no. 1
(2010): 011006.
"""
def __init__(self, channels=3, test_y_channel=True):
super(MAD, self).__init__()
self.channels = channels
self.test_y_channel = test_y_channel
def mad(self, ref, dst):
r"""Compute MAD for a batch of images.
Args:
ref: An reference tensor. Shape :math:`(N, C, H, W)`.
dst: A distortion tensor. Shape :math:`(N, C, H, W)`.
"""
if self.test_y_channel and ref.shape[1] == 3:
ref = to_y_channel(ref, 255.0)
dst = to_y_channel(dst, 255.0)
self.channels = 1
HI = hi_index(ref, dst)
LO = lo_index(ref, dst)
thresh1 = 2.55
thresh2 = 3.35
b1 = math.exp(-thresh1 / thresh2)
b2 = 1 / (math.log(10) * thresh2)
sig = 1 / (1 + b1 * HI ** b2)
MAD = LO ** (1 - sig) * HI ** (sig)
return MAD.mean(1)
def forward(self, X, Y):
r"""Computation of CW-SSIM metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of MAD metric in [0, 1] range.
"""
assert (
X.shape == Y.shape
), f"Input and reference images should have the same shape, but got {X.shape} and {Y.shape}"
score = self.mad(Y, X)
return score
| 11,628 | 31.66573 | 100 | py |
BVQI | BVQI-master/pyiqa/archs/arch_util.py | import collections.abc
import math
from builtins import ValueError
from collections import OrderedDict
from itertools import repeat
from typing import Tuple
import numpy as np
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.nn import init as init
from torch.nn.modules.batchnorm import _BatchNorm
from pyiqa.utils.download_util import load_file_from_url
# --------------------------------------------
# IQA utils
# --------------------------------------------
def dist_to_mos(dist_score: torch.Tensor) -> torch.Tensor:
"""Convert distribution prediction to mos score.
For datasets with detailed score labels, such as AVA
Args:
dist_score (tensor): (*, C), C is the class number
Output:
mos_score (tensor): (*, 1)
"""
num_classes = dist_score.shape[-1]
mos_score = dist_score * torch.arange(1, num_classes + 1).to(dist_score)
mos_score = mos_score.sum(dim=-1, keepdim=True)
return mos_score
# --------------------------------------------
# Common utils
# --------------------------------------------
def clean_state_dict(state_dict):
# 'clean' checkpoint by removing .module prefix from state dict if it exists from parallel training
cleaned_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] if k.startswith("module.") else k
cleaned_state_dict[name] = v
return cleaned_state_dict
def load_pretrained_network(net, model_path, strict=True, weight_keys=None):
if model_path.startswith("https://") or model_path.startswith("http://"):
model_path = load_file_from_url(model_path)
state_dict = torch.load(model_path, map_location=torch.device("cpu"))
if weight_keys is not None:
state_dict = state_dict[weight_keys]
state_dict = clean_state_dict(state_dict)
net.load_state_dict(state_dict, strict=strict)
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
@torch.no_grad()
def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
r"""Initialize network weights.
Args:
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0.
kwargs (dict): Other arguments for initialization function.
"""
if not isinstance(module_list, list):
module_list = [module_list]
for module in module_list:
for m in module.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, _BatchNorm):
init.constant_(m.weight, 1)
if m.bias is not None:
m.bias.data.fill_(bias_fill)
def symm_pad(im: torch.Tensor, padding: Tuple[int, int, int, int]):
"""Symmetric padding same as tensorflow.
Ref: https://discuss.pytorch.org/t/symmetric-padding/19866/3
"""
h, w = im.shape[-2:]
left, right, top, bottom = padding
x_idx = np.arange(-left, w + right)
y_idx = np.arange(-top, h + bottom)
def reflect(x, minx, maxx):
"""Reflects an array around two points making a triangular waveform that ramps up
and down, allowing for pad lengths greater than the input length"""
rng = maxx - minx
double_rng = 2 * rng
mod = np.fmod(x - minx, double_rng)
normed_mod = np.where(mod < 0, mod + double_rng, mod)
out = np.where(normed_mod >= rng, double_rng - normed_mod, normed_mod) + minx
return np.array(out, dtype=x.dtype)
x_pad = reflect(x_idx, -0.5, w - 0.5)
y_pad = reflect(y_idx, -0.5, h - 0.5)
xx, yy = np.meshgrid(x_pad, y_pad)
return im[..., yy, xx]
def excact_padding_2d(x, kernel, stride=1, dilation=1, mode="same"):
assert len(x.shape) == 4, f"Only support 4D tensor input, but got {x.shape}"
kernel = to_2tuple(kernel)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
b, c, h, w = x.shape
h2 = math.ceil(h / stride[0])
w2 = math.ceil(w / stride[1])
pad_row = (h2 - 1) * stride[0] + (kernel[0] - 1) * dilation[0] + 1 - h
pad_col = (w2 - 1) * stride[1] + (kernel[1] - 1) * dilation[1] + 1 - w
pad_l, pad_r, pad_t, pad_b = (
pad_col // 2,
pad_col - pad_col // 2,
pad_row // 2,
pad_row - pad_row // 2,
)
mode = mode if mode != "same" else "constant"
if mode != "symmetric":
x = F.pad(x, (pad_l, pad_r, pad_t, pad_b), mode=mode)
elif mode == "symmetric":
x = symm_pad(x, (pad_l, pad_r, pad_t, pad_b))
return x
class ExactPadding2d(nn.Module):
r"""This function calculate exact padding values for 4D tensor inputs,
and support the same padding mode as tensorflow.
Args:
kernel (int or tuple): kernel size.
stride (int or tuple): stride size.
dilation (int or tuple): dilation size, default with 1.
mode (srt): padding mode can be ('same', 'symmetric', 'replicate', 'circular')
"""
def __init__(self, kernel, stride=1, dilation=1, mode="same"):
super().__init__()
self.kernel = to_2tuple(kernel)
self.stride = to_2tuple(stride)
self.dilation = to_2tuple(dilation)
self.mode = mode
def forward(self, x):
return excact_padding_2d(x, self.kernel, self.stride, self.dilation, self.mode)
| 6,039 | 32.005464 | 103 | py |
BVQI | BVQI-master/pyiqa/archs/fsim_arch.py | r"""FSIM Metric
Created by: https://github.com/photosynthesis-team/piq/blob/master/piq/fsim.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
Official matlab code from https://www4.comp.polyu.edu.hk/~cslzhang/IQA/FSIM/Files/FeatureSIM.m
PIQA from https://github.com/francois-rozet/piqa/blob/master/piqa/fsim.py
"""
import functools
import math
from typing import Tuple
import torch
import torch.nn as nn
from pyiqa.utils.color_util import rgb2yiq
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import get_meshgrid, gradient_map, ifftshift, similarity_map
def fsim(
x: torch.Tensor,
y: torch.Tensor,
chromatic: bool = True,
scales: int = 4,
orientations: int = 4,
min_length: int = 6,
mult: int = 2,
sigma_f: float = 0.55,
delta_theta: float = 1.2,
k: float = 2.0,
) -> torch.Tensor:
r"""Compute Feature Similarity Index Measure for a batch of images.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
chromatic: Flag to compute FSIMc, which also takes into account chromatic components
scales: Number of wavelets used for computation of phase congruensy maps
orientations: Number of filter orientations used for computation of phase congruensy maps
min_length: Wavelength of smallest scale filter
mult: Scaling factor between successive filters
sigma_f: Ratio of the standard deviation of the Gaussian describing the log Gabor filter's
transfer function in the frequency domain to the filter center frequency.
delta_theta: Ratio of angular interval between filter orientations and the standard deviation
of the angular Gaussian function used to construct filters in the frequency plane.
k: No of standard deviations of the noise energy beyond the mean at which we set the noise
threshold point, below which phase congruency values get penalized.
Returns:
Index of similarity betwen two images. Usually in [0, 1] interval.
Can be bigger than 1 for predicted :math:`x` images with higher contrast than the original ones.
References:
L. Zhang, L. Zhang, X. Mou and D. Zhang, "FSIM: A Feature Similarity Index for Image Quality Assessment,"
IEEE Transactions on Image Processing, vol. 20, no. 8, pp. 2378-2386, Aug. 2011, doi: 10.1109/TIP.2011.2109730.
https://ieeexplore.ieee.org/document/5705575
"""
# Rescale to [0, 255] range, because all constant are calculated for this factor
x = x / float(1.0) * 255
y = y / float(1.0) * 255
# Apply average pooling
kernel_size = max(1, round(min(x.shape[-2:]) / 256))
x = torch.nn.functional.avg_pool2d(x, kernel_size)
y = torch.nn.functional.avg_pool2d(y, kernel_size)
num_channels = x.size(1)
# Convert RGB to YIQ color space
if num_channels == 3:
x_yiq = rgb2yiq(x)
y_yiq = rgb2yiq(y)
x_lum = x_yiq[:, :1]
y_lum = y_yiq[:, :1]
x_i = x_yiq[:, 1:2]
y_i = y_yiq[:, 1:2]
x_q = x_yiq[:, 2:]
y_q = y_yiq[:, 2:]
else:
x_lum = x
y_lum = y
# Compute phase congruency maps
pc_x = _phase_congruency(
x_lum,
scales=scales,
orientations=orientations,
min_length=min_length,
mult=mult,
sigma_f=sigma_f,
delta_theta=delta_theta,
k=k,
)
pc_y = _phase_congruency(
y_lum,
scales=scales,
orientations=orientations,
min_length=min_length,
mult=mult,
sigma_f=sigma_f,
delta_theta=delta_theta,
k=k,
)
# Gradient maps
scharr_filter = (
torch.tensor([[[-3.0, 0.0, 3.0], [-10.0, 0.0, 10.0], [-3.0, 0.0, 3.0]]]) / 16
)
kernels = torch.stack([scharr_filter, scharr_filter.transpose(-1, -2)])
grad_map_x = gradient_map(x_lum, kernels)
grad_map_y = gradient_map(y_lum, kernels)
# Constants from the paper
T1, T2, T3, T4, lmbda = 0.85, 160, 200, 200, 0.03
# Compute FSIM
PC = similarity_map(pc_x, pc_y, T1)
GM = similarity_map(grad_map_x, grad_map_y, T2)
pc_max = torch.where(pc_x > pc_y, pc_x, pc_y)
score = GM * PC * pc_max # torch.sum(score)/torch.sum(pc_max)
if chromatic:
assert (
num_channels == 3
), "Chromatic component can be computed only for RGB images!"
S_I = similarity_map(x_i, y_i, T3)
S_Q = similarity_map(x_q, y_q, T4)
score = score * torch.abs(S_I * S_Q) ** lmbda
# Complex gradients will work in PyTorch 1.6.0
# score = score * torch.real((S_I * S_Q).to(torch.complex64) ** lmbda)
result = score.sum(dim=[1, 2, 3]) / pc_max.sum(dim=[1, 2, 3])
return result
def _construct_filters(
x: torch.Tensor,
scales: int = 4,
orientations: int = 4,
min_length: int = 6,
mult: int = 2,
sigma_f: float = 0.55,
delta_theta: float = 1.2,
k: float = 2.0,
use_lowpass_filter=True,
):
"""Creates a stack of filters used for computation of phase congruensy maps
Args:
x: Tensor. Shape :math:`(N, 1, H, W)`.
scales: Number of wavelets
orientations: Number of filter orientations
min_length: Wavelength of smallest scale filter
mult: Scaling factor between successive filters
sigma_f: Ratio of the standard deviation of the Gaussian
describing the log Gabor filter's transfer function
in the frequency domain to the filter center frequency.
delta_theta: Ratio of angular interval between filter orientations
and the standard deviation of the angular Gaussian function
used to construct filters in the freq. plane.
k: No of standard deviations of the noise energy beyond the mean
at which we set the noise threshold point, below which phase
congruency values get penalized.
"""
N, _, H, W = x.shape
# Calculate the standard deviation of the angular Gaussian function
# used to construct filters in the freq. plane.
theta_sigma = math.pi / (orientations * delta_theta)
# Pre-compute some stuff to speed up filter construction
grid_x, grid_y = get_meshgrid((H, W))
radius = torch.sqrt(grid_x ** 2 + grid_y ** 2)
theta = torch.atan2(-grid_y, grid_x)
# Quadrant shift radius and theta so that filters are constructed with 0 frequency at the corners.
# Get rid of the 0 radius value at the 0 frequency point (now at top-left corner)
# so that taking the log of the radius will not cause trouble.
radius = ifftshift(radius)
theta = ifftshift(theta)
radius[0, 0] = 1
sintheta = torch.sin(theta)
costheta = torch.cos(theta)
# Filters are constructed in terms of two components.
# 1) The radial component, which controls the frequency band that the filter responds to
# 2) The angular component, which controls the orientation that the filter responds to.
# The two components are multiplied together to construct the overall filter.
# First construct a low-pass filter that is as large as possible, yet falls
# away to zero at the boundaries. All log Gabor filters are multiplied by
# this to ensure no extra frequencies at the 'corners' of the FFT are
# incorporated as this seems to upset the normalisation process when
lp = _lowpassfilter(size=(H, W), cutoff=0.45, n=15)
# Construct the radial filter components...
log_gabor = []
for s in range(scales):
wavelength = min_length * mult ** s
omega_0 = 1.0 / wavelength
gabor_filter = torch.exp(
(-torch.log(radius / omega_0) ** 2) / (2 * math.log(sigma_f) ** 2)
)
if use_lowpass_filter:
gabor_filter = gabor_filter * lp
gabor_filter[0, 0] = 0
log_gabor.append(gabor_filter)
# Then construct the angular filter components...
spread = []
for o in range(orientations):
angl = o * math.pi / orientations
# For each point in the filter matrix calculate the angular distance from
# the specified filter orientation. To overcome the angular wrap-around
# problem sine difference and cosine difference values are first computed
# and then the atan2 function is used to determine angular distance.
ds = sintheta * math.cos(angl) - costheta * math.sin(
angl
) # Difference in sine.
dc = costheta * math.cos(angl) + sintheta * math.sin(
angl
) # Difference in cosine.
dtheta = torch.abs(torch.atan2(ds, dc))
spread.append(torch.exp((-(dtheta ** 2)) / (2 * theta_sigma ** 2)))
spread = torch.stack(spread)
log_gabor = torch.stack(log_gabor)
# Multiply, add batch dimension and transfer to correct device.
filters = (
(spread.repeat_interleave(scales, dim=0) * log_gabor.repeat(orientations, 1, 1))
.unsqueeze(0)
.to(x)
)
return filters
def _phase_congruency(
x: torch.Tensor,
scales: int = 4,
orientations: int = 4,
min_length: int = 6,
mult: int = 2,
sigma_f: float = 0.55,
delta_theta: float = 1.2,
k: float = 2.0,
) -> torch.Tensor:
r"""Compute Phase Congruence for a batch of greyscale images
Args:
x: Tensor. Shape :math:`(N, 1, H, W)`.
scales: Number of wavelet scales
orientations: Number of filter orientations
min_length: Wavelength of smallest scale filter
mult: Scaling factor between successive filters
sigma_f: Ratio of the standard deviation of the Gaussian
describing the log Gabor filter's transfer function
in the frequency domain to the filter center frequency.
delta_theta: Ratio of angular interval between filter orientations
and the standard deviation of the angular Gaussian function
used to construct filters in the freq. plane.
k: No of standard deviations of the noise energy beyond the mean
at which we set the noise threshold point, below which phase
congruency values get penalized.
Returns:
Phase Congruency map with shape :math:`(N, H, W)`
"""
EPS = torch.finfo(x.dtype).eps
N, _, H, W = x.shape
# Fourier transform
filters = _construct_filters(
x, scales, orientations, min_length, mult, sigma_f, delta_theta, k
)
imagefft = torch.fft.fft2(x)
filters_ifft = torch.fft.ifft2(filters)
filters_ifft = filters_ifft.real * math.sqrt(H * W)
even_odd = torch.view_as_real(torch.fft.ifft2(imagefft * filters)).view(
N, orientations, scales, H, W, 2
)
# Amplitude of even & odd filter response. An = sqrt(real^2 + imag^2)
an = torch.sqrt(torch.sum(even_odd ** 2, dim=-1))
# Take filter at scale 0 and sum spatially
# Record mean squared filter value at smallest scale.
# This is used for noise estimation.
em_n = (filters.view(1, orientations, scales, H, W)[:, :, :1, ...] ** 2).sum(
dim=[-2, -1], keepdims=True
)
# Sum of even filter convolution results.
sum_e = even_odd[..., 0].sum(dim=2, keepdims=True)
# Sum of odd filter convolution results.
sum_o = even_odd[..., 1].sum(dim=2, keepdims=True)
# Get weighted mean filter response vector, this gives the weighted mean phase angle.
x_energy = torch.sqrt(sum_e ** 2 + sum_o ** 2) + EPS
mean_e = sum_e / x_energy
mean_o = sum_o / x_energy
# Now calculate An(cos(phase_deviation) - | sin(phase_deviation)) | by
# using dot and cross products between the weighted mean filter response
# vector and the individual filter response vectors at each scale.
# This quantity is phase congruency multiplied by An, which we call energy.
# Extract even and odd convolution results.
even = even_odd[..., 0]
odd = even_odd[..., 1]
energy = (
even * mean_e + odd * mean_o - torch.abs(even * mean_o - odd * mean_e)
).sum(dim=2, keepdim=True)
# Compensate for noise
# We estimate the noise power from the energy squared response at the
# smallest scale. If the noise is Gaussian the energy squared will have a
# Chi-squared 2DOF pdf. We calculate the median energy squared response
# as this is a robust statistic. From this we estimate the mean.
# The estimate of noise power is obtained by dividing the mean squared
# energy value by the mean squared filter value
abs_eo = torch.sqrt(torch.sum(even_odd[:, :, :1, ...] ** 2, dim=-1)).reshape(
N, orientations, 1, 1, H * W
)
median_e2n = torch.median(abs_eo ** 2, dim=-1, keepdim=True).values
mean_e2n = -median_e2n / math.log(0.5)
# Estimate of noise power.
noise_power = mean_e2n / em_n
# Now estimate the total energy^2 due to noise
# Estimate for sum(An^2) + sum(Ai.*Aj.*(cphi.*cphj + sphi.*sphj))
filters_ifft = filters_ifft.view(1, orientations, scales, H, W)
sum_an2 = torch.sum(filters_ifft ** 2, dim=-3, keepdim=True)
sum_ai_aj = torch.zeros(N, orientations, 1, H, W).to(x)
for s in range(scales - 1):
sum_ai_aj = sum_ai_aj + (
filters_ifft[:, :, s : s + 1] * filters_ifft[:, :, s + 1 :]
).sum(dim=-3, keepdim=True)
sum_an2 = torch.sum(sum_an2, dim=[-1, -2], keepdim=True)
sum_ai_aj = torch.sum(sum_ai_aj, dim=[-1, -2], keepdim=True)
noise_energy2 = 2 * noise_power * sum_an2 + 4 * noise_power * sum_ai_aj
# Rayleigh parameter
tau = torch.sqrt(noise_energy2 / 2)
# Expected value of noise energy
noise_energy = tau * math.sqrt(math.pi / 2)
moise_energy_sigma = torch.sqrt((2 - math.pi / 2) * tau ** 2)
# Noise threshold
T = noise_energy + k * moise_energy_sigma
# The estimated noise effect calculated above is only valid for the PC_1 measure.
# The PC_2 measure does not lend itself readily to the same analysis. However
# empirically it seems that the noise effect is overestimated roughly by a factor
# of 1.7 for the filter parameters used here.
# Empirical rescaling of the estimated noise effect to suit the PC_2 phase congruency measure
T = T / 1.7
# Apply noise threshold
energy = torch.max(energy - T, torch.zeros_like(T))
eps = torch.finfo(energy.dtype).eps
energy_all = energy.sum(dim=[1, 2]) + eps
an_all = an.sum(dim=[1, 2]) + eps
result_pc = energy_all / an_all
return result_pc.unsqueeze(1)
def _lowpassfilter(size: Tuple[int, int], cutoff: float, n: int) -> torch.Tensor:
r"""
Constructs a low-pass Butterworth filter.
Args:
size: Tuple with heigth and width of filter to construct
cutoff: Cutoff frequency of the filter in (0, 0.5()
n: Filter order. Higher `n` means sharper transition.
Note that `n` is doubled so that it is always an even integer.
Returns:
f = 1 / (1 + w/cutoff) ^ 2n
"""
assert 0 < cutoff <= 0.5, "Cutoff frequency must be between 0 and 0.5"
assert n > 1 and int(n) == n, "n must be an integer >= 1"
grid_x, grid_y = get_meshgrid(size)
# A matrix with every pixel = radius relative to centre.
radius = torch.sqrt(grid_x ** 2 + grid_y ** 2)
return ifftshift(1.0 / (1.0 + (radius / cutoff) ** (2 * n)))
@ARCH_REGISTRY.register()
class FSIM(nn.Module):
r"""Args:
chromatic: Flag to compute FSIMc, which also takes into account chromatic components
scales: Number of wavelets used for computation of phase congruensy maps
orientations: Number of filter orientations used for computation of phase congruensy maps
min_length: Wavelength of smallest scale filter
mult: Scaling factor between successive filters
sigma_f: Ratio of the standard deviation of the Gaussian describing the log Gabor filter's
transfer function in the frequency domain to the filter center frequency.
delta_theta: Ratio of angular interval between filter orientations and the standard deviation
of the angular Gaussian function used to construct filters in the frequency plane.
k: No of standard deviations of the noise energy beyond the mean at which we set the noise
threshold point, below which phase congruency values get penalized.
References:
L. Zhang, L. Zhang, X. Mou and D. Zhang, "FSIM: A Feature Similarity Index for Image Quality Assessment,"
IEEE Transactions on Image Processing, vol. 20, no. 8, pp. 2378-2386, Aug. 2011, doi: 10.1109/TIP.2011.2109730.
https://ieeexplore.ieee.org/document/5705575
"""
def __init__(
self,
chromatic: bool = True,
scales: int = 4,
orientations: int = 4,
min_length: int = 6,
mult: int = 2,
sigma_f: float = 0.55,
delta_theta: float = 1.2,
k: float = 2.0,
) -> None:
super().__init__()
# Save function with predefined parameters, rather than parameters themself
self.fsim = functools.partial(
fsim,
chromatic=chromatic,
scales=scales,
orientations=orientations,
min_length=min_length,
mult=mult,
sigma_f=sigma_f,
delta_theta=delta_theta,
k=k,
)
def forward(
self,
X: torch.Tensor,
Y: torch.Tensor,
) -> torch.Tensor:
r"""Computation of FSIM as a loss function.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of FSIM loss to be minimized in [0, 1] range.
"""
assert (
X.shape == Y.shape
), f"Input and reference images should have the same shape, but got {X.shape} and {Y.shape}"
score = self.fsim(X, Y)
return score
| 18,004 | 36.354772 | 119 | py |
BVQI | BVQI-master/pyiqa/archs/niqe_arch.py | r"""NIQE and ILNIQE Metrics
NIQE Metric
Created by: https://github.com/xinntao/BasicSR/blob/5668ba75eb8a77e8d2dd46746a36fee0fbb0fdcd/basicsr/metrics/niqe.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Reference:
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
ILNIQE Metric
Created by: Chaofeng Chen (https://github.com/chaofengc)
Reference:
- Python codes: https://github.com/IceClear/IL-NIQE/blob/master/IL-NIQE.py
- Matlab codes: https://www4.comp.polyu.edu.hk/~cslzhang/IQA/ILNIQE/Files/ILNIQE.zip
"""
import math
import numpy as np
import scipy
import scipy.io
import torch
from pyiqa.archs.fsim_arch import _construct_filters
from pyiqa.matlab_utils import (
blockproc,
conv2d,
fitweibull,
fspecial,
imfilter,
imresize,
nancov,
nanmean,
)
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.download_util import load_file_from_url
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import diff_round, estimate_aggd_param, normalize_img_with_guass
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/niqe_modelparameters.mat",
"niqe": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/niqe_modelparameters.mat",
"ilniqe": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/ILNIQE_templateModel.mat",
}
def compute_feature(
block: torch.Tensor,
ilniqe: bool = False,
) -> torch.Tensor:
"""Compute features.
Args:
block (Tensor): Image block in shape (b, c, h, w).
Returns:
list: Features with length of 18.
"""
bsz = block.shape[0]
aggd_block = block[:, [0]]
alpha, beta_l, beta_r = estimate_aggd_param(aggd_block)
feat = [alpha, (beta_l + beta_r) / 2]
# distortions disturb the fairly regular structure of natural images.
# This deviation can be captured by analyzing the sample distribution of
# the products of pairs of adjacent coefficients computed along
# horizontal, vertical and diagonal orientations.
shifts = [[0, 1], [1, 0], [1, 1], [1, -1]]
for i in range(len(shifts)):
shifted_block = torch.roll(aggd_block, shifts[i], dims=(2, 3))
alpha, beta_l, beta_r = estimate_aggd_param(aggd_block * shifted_block)
# Eq. 8
mean = (beta_r - beta_l) * (
torch.lgamma(2 / alpha) - torch.lgamma(1 / alpha)
).exp()
feat.extend((alpha, mean, beta_l, beta_r))
feat = [x.reshape(bsz, 1) for x in feat]
if ilniqe:
tmp_block = block[:, 1:4]
channels = 4 - 1
shape_scale = fitweibull(tmp_block.reshape(bsz * channels, -1))
scale_shape = shape_scale[:, [1, 0]].reshape(bsz, -1)
feat.append(scale_shape)
mu = torch.mean(block[:, 4:7], dim=(2, 3))
sigmaSquare = torch.var(block[:, 4:7], dim=(2, 3))
mu_sigma = torch.stack((mu, sigmaSquare), dim=-1).reshape(bsz, -1)
feat.append(mu_sigma)
channels = 85 - 7
tmp_block = block[:, 7:85].reshape(bsz * channels, 1, *block.shape[2:])
alpha_data, beta_l_data, beta_r_data = estimate_aggd_param(tmp_block)
alpha_data = alpha_data.reshape(bsz, channels)
beta_l_data = beta_l_data.reshape(bsz, channels)
beta_r_data = beta_r_data.reshape(bsz, channels)
alpha_beta = torch.stack(
[alpha_data, (beta_l_data + beta_r_data) / 2], dim=-1
).reshape(bsz, -1)
feat.append(alpha_beta)
tmp_block = block[:, 85:109]
channels = 109 - 85
shape_scale = fitweibull(tmp_block.reshape(bsz * channels, -1))
scale_shape = shape_scale[:, [1, 0]].reshape(bsz, -1)
feat.append(scale_shape)
feat = torch.cat(feat, dim=-1)
return feat
def niqe(
img: torch.Tensor,
mu_pris_param: torch.Tensor,
cov_pris_param: torch.Tensor,
block_size_h: int = 96,
block_size_w: int = 96,
) -> torch.Tensor:
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image.
mu_pris_param (Tensor): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (Tensor): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
gaussian_window (Tensor): A 7x7 Gaussian window used for smoothing the image.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 96 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 96 (the official recommended value).
"""
assert (
img.ndim == 4
), "Input image must be a gray or Y (of YCbCr) image with shape (b, c, h, w)."
# crop image
b, c, h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[..., 0 : num_block_h * block_size_h, 0 : num_block_w * block_size_w]
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
img_normalized = normalize_img_with_guass(img, padding="replicate")
distparam.append(
blockproc(
img_normalized,
[block_size_h // scale, block_size_w // scale],
fun=compute_feature,
)
)
if scale == 1:
img = imresize(img / 255.0, scale=0.5, antialiasing=True)
img = img * 255.0
distparam = torch.cat(distparam, -1)
# fit a MVG (multivariate Gaussian) model to distorted patch features
mu_distparam = nanmean(distparam, dim=1)
cov_distparam = nancov(distparam)
# compute niqe quality, Eq. 10 in the paper
invcov_param = torch.linalg.pinv((cov_pris_param + cov_distparam) / 2)
diff = (mu_pris_param - mu_distparam).unsqueeze(1)
quality = torch.bmm(torch.bmm(diff, invcov_param), diff.transpose(1, 2)).squeeze()
quality = torch.sqrt(quality)
return quality
def calculate_niqe(
img: torch.Tensor,
crop_border: int = 0,
test_y_channel: bool = True,
pretrained_model_path: str = None,
color_space: str = "yiq",
**kwargs,
) -> torch.Tensor:
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image whose quality needs to be computed.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
test_y_channel (Bool): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'.
pretrained_model_path (str): The pretrained model path.
Returns:
Tensor: NIQE result.
"""
params = scipy.io.loadmat(pretrained_model_path)
mu_pris_param = np.ravel(params["mu_prisparam"])
cov_pris_param = params["cov_prisparam"]
mu_pris_param = torch.from_numpy(mu_pris_param).to(img)
cov_pris_param = torch.from_numpy(cov_pris_param).to(img)
mu_pris_param = mu_pris_param.repeat(img.size(0), 1)
cov_pris_param = cov_pris_param.repeat(img.size(0), 1, 1)
if test_y_channel and img.shape[1] == 3:
print(img.shape)
img = to_y_channel(img, 255, color_space)
img = diff_round(img)
img = img.to(torch.float64)
if crop_border != 0:
img = img[..., crop_border:-crop_border, crop_border:-crop_border]
niqe_result = niqe(img, mu_pris_param, cov_pris_param)
return niqe_result
def gauDerivative(sigma, in_ch=1, out_ch=1, device=None):
halfLength = math.ceil(3 * sigma)
x, y = np.meshgrid(
np.linspace(-halfLength, halfLength, 2 * halfLength + 1),
np.linspace(-halfLength, halfLength, 2 * halfLength + 1),
)
gauDerX = x * np.exp(-(x ** 2 + y ** 2) / 2 / sigma / sigma)
gauDerY = y * np.exp(-(x ** 2 + y ** 2) / 2 / sigma / sigma)
dx = torch.from_numpy(gauDerX).to(device)
dy = torch.from_numpy(gauDerY).to(device)
dx = dx.repeat(out_ch, in_ch, 1, 1)
dy = dy.repeat(out_ch, in_ch, 1, 1)
return dx, dy
def ilniqe(
img: torch.Tensor,
mu_pris_param: torch.Tensor,
cov_pris_param: torch.Tensor,
principleVectors: torch.Tensor,
meanOfSampleData: torch.Tensor,
resize: bool = True,
block_size_h: int = 84,
block_size_w: int = 84,
) -> torch.Tensor:
"""Calculate IL-NIQE (Integrated Local Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image.
mu_pris_param (Tensor): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (Tensor): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
principleVectors (Tensor): Features from official .mat file.
meanOfSampleData (Tensor): Features from official .mat file.
resize (Bloolean): resize image. Default: True.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 84 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 84 (the official recommended value).
"""
assert (
img.ndim == 4
), "Input image must be a gray or Y (of YCbCr) image with shape (b, c, h, w)."
sigmaForGauDerivative = 1.66
KforLog = 0.00001
normalizedWidth = 524
minWaveLength = 2.4
sigmaOnf = 0.55
mult = 1.31
dThetaOnSigma = 1.10
scaleFactorForLoG = 0.87
scaleFactorForGaussianDer = 0.28
sigmaForDownsample = 0.9
EPS = 1e-8
scales = 3
orientations = 4
infConst = 10000
nanConst = 2000
if resize:
img = imresize(img, sizes=(normalizedWidth, normalizedWidth))
img = img.clamp(0.0, 255.0)
# crop image
b, c, h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[..., 0 : num_block_h * block_size_h, 0 : num_block_w * block_size_w]
ospace_weight = torch.tensor(
[
[0.3, 0.04, -0.35],
[0.34, -0.6, 0.17],
[0.06, 0.63, 0.27],
]
).to(img)
O_img = img.permute(0, 2, 3, 1) @ ospace_weight.T
O_img = O_img.permute(0, 3, 1, 2)
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
struct_dis = normalize_img_with_guass(
O_img[:, [2]], kernel_size=5, sigma=5.0 / 6, padding="replicate"
)
dx, dy = gauDerivative(
sigmaForGauDerivative / (scale ** scaleFactorForGaussianDer), device=img
)
Ix = conv2d(O_img, dx.repeat(3, 1, 1, 1), groups=3)
Iy = conv2d(O_img, dy.repeat(3, 1, 1, 1), groups=3)
GM = torch.sqrt(Ix ** 2 + Iy ** 2 + EPS)
Ixy = torch.stack((Ix, Iy), dim=2).reshape(
Ix.shape[0], Ix.shape[1] * 2, *Ix.shape[2:]
) # reshape to (IxO1, IxO1, IxO2, IyO2, IxO3, IyO3)
logRGB = torch.log(img + KforLog)
logRGBMS = logRGB - logRGB.mean(dim=(2, 3), keepdim=True)
Intensity = logRGBMS.sum(dim=1, keepdim=True) / np.sqrt(3)
BY = (logRGBMS[:, [0]] + logRGBMS[:, [1]] - 2 * logRGBMS[:, [2]]) / np.sqrt(6)
RG = (logRGBMS[:, [0]] - logRGBMS[:, [1]]) / np.sqrt(2)
compositeMat = torch.cat([struct_dis, GM, Intensity, BY, RG, Ixy], dim=1)
O3 = O_img[:, [2]]
# gabor filter in shape (b, ori * scale, h, w)
LGFilters = _construct_filters(
O3,
scales=scales,
orientations=orientations,
min_length=minWaveLength / (scale ** scaleFactorForLoG),
sigma_f=sigmaOnf,
mult=mult,
delta_theta=dThetaOnSigma,
use_lowpass_filter=False,
)
# reformat to scale * ori
b, _, h, w = LGFilters.shape
LGFilters = (
LGFilters.reshape(b, orientations, scales, h, w)
.transpose(1, 2)
.reshape(b, -1, h, w)
)
# TODO: current filters needs to be transposed to get same results as matlab, find the bug
LGFilters = LGFilters.transpose(-1, -2)
fftIm = torch.fft.fft2(O3)
logResponse = []
partialDer = []
GM = []
for index in range(LGFilters.shape[1]):
filter = LGFilters[:, [index]]
response = torch.fft.ifft2(filter * fftIm)
realRes = torch.real(response)
imagRes = torch.imag(response)
partialXReal = conv2d(realRes, dx)
partialYReal = conv2d(realRes, dy)
realGM = torch.sqrt(partialXReal ** 2 + partialYReal ** 2 + EPS)
partialXImag = conv2d(imagRes, dx)
partialYImag = conv2d(imagRes, dy)
imagGM = torch.sqrt(partialXImag ** 2 + partialYImag ** 2 + EPS)
logResponse.append(realRes)
logResponse.append(imagRes)
partialDer.append(partialXReal)
partialDer.append(partialYReal)
partialDer.append(partialXImag)
partialDer.append(partialYImag)
GM.append(realGM)
GM.append(imagGM)
logResponse = torch.cat(logResponse, dim=1)
partialDer = torch.cat(partialDer, dim=1)
GM = torch.cat(GM, dim=1)
compositeMat = torch.cat((compositeMat, logResponse, partialDer, GM), dim=1)
distparam.append(
blockproc(
compositeMat,
[block_size_h // scale, block_size_w // scale],
fun=compute_feature,
ilniqe=True,
)
)
gauForDS = fspecial(math.ceil(6 * sigmaForDownsample), sigmaForDownsample).to(
img
)
filterResult = imfilter(
O_img, gauForDS.repeat(3, 1, 1, 1), padding="replicate", groups=3
)
O_img = filterResult[..., ::2, ::2]
filterResult = imfilter(
img, gauForDS.repeat(3, 1, 1, 1), padding="replicate", groups=3
)
img = filterResult[..., ::2, ::2]
distparam = torch.cat(distparam, dim=-1) # b, block_num, feature_num
distparam[distparam > infConst] = infConst
# fit a MVG (multivariate Gaussian) model to distorted patch features
coefficientsViaPCA = torch.bmm(
principleVectors.transpose(1, 2),
(distparam - meanOfSampleData.unsqueeze(1)).transpose(1, 2),
)
final_features = coefficientsViaPCA.transpose(1, 2)
b, blk_num, feat_num = final_features.shape
# remove block features with nan and compute nonan cov
cov_distparam = nancov(final_features)
# replace nan in final features with mu
mu_final_features = nanmean(final_features, dim=1, keepdim=True)
final_features_withmu = torch.where(
torch.isnan(final_features), mu_final_features, final_features
)
# compute ilniqe quality
invcov_param = torch.linalg.pinv((cov_pris_param + cov_distparam) / 2)
diff = final_features_withmu - mu_pris_param.unsqueeze(1)
quality = (torch.bmm(diff, invcov_param) * diff).sum(dim=-1)
quality = torch.sqrt(quality).mean(dim=1)
return quality
def calculate_ilniqe(
img: torch.Tensor, crop_border: int = 0, pretrained_model_path: str = None, **kwargs
) -> torch.Tensor:
"""Calculate IL-NIQE metric.
Args:
img (Tensor): Input image whose quality needs to be computed.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
Returns:
Tensor: IL-NIQE result.
"""
params = scipy.io.loadmat(pretrained_model_path)
img = img * 255.0
img = diff_round(img)
# float64 precision is critical to be consistent with matlab codes
img = img.to(torch.float64)
mu_pris_param = np.ravel(params["templateModel"][0][0])
cov_pris_param = params["templateModel"][0][1]
meanOfSampleData = np.ravel(params["templateModel"][0][2])
principleVectors = params["templateModel"][0][3]
mu_pris_param = torch.from_numpy(mu_pris_param).to(img)
cov_pris_param = torch.from_numpy(cov_pris_param).to(img)
meanOfSampleData = torch.from_numpy(meanOfSampleData).to(img)
principleVectors = torch.from_numpy(principleVectors).to(img)
mu_pris_param = mu_pris_param.repeat(img.size(0), 1)
cov_pris_param = cov_pris_param.repeat(img.size(0), 1, 1)
meanOfSampleData = meanOfSampleData.repeat(img.size(0), 1)
principleVectors = principleVectors.repeat(img.size(0), 1, 1)
if crop_border != 0:
img = img[..., crop_border:-crop_border, crop_border:-crop_border]
ilniqe_result = ilniqe(
img, mu_pris_param, cov_pris_param, principleVectors, meanOfSampleData
)
return ilniqe_result
@ARCH_REGISTRY.register()
class NIQE(torch.nn.Module):
r"""Args:
channels (int): Number of processed channel.
test_y_channel (bool): whether to use y channel on ycbcr.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
References:
Mittal, Anish, Rajiv Soundararajan, and Alan C. Bovik.
"Making a “completely blind” image quality analyzer."
IEEE Signal Processing Letters (SPL) 20.3 (2012): 209-212.
"""
def __init__(
self,
channels: int = 1,
test_y_channel: bool = True,
color_space: str = "yiq",
crop_border: int = 0,
pretrained_model_path: str = None,
) -> None:
super(NIQE, self).__init__()
self.channels = channels
self.test_y_channel = test_y_channel
self.color_space = color_space
self.crop_border = crop_border
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(default_model_urls["url"])
def forward(self, X: torch.Tensor) -> torch.Tensor:
r"""Computation of NIQE metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of niqe metric in [0, 1] range.
"""
score = calculate_niqe(
X,
self.crop_border,
self.test_y_channel,
self.pretrained_model_path,
self.color_space,
)
return score
@ARCH_REGISTRY.register()
class ILNIQE(torch.nn.Module):
r"""Args:
channels (int): Number of processed channel.
test_y_channel (bool): whether to use y channel on ycbcr.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
References:
Zhang, Lin, Lei Zhang, and Alan C. Bovik. "A feature-enriched
completely blind image quality evaluator." IEEE Transactions
on Image Processing 24.8 (2015): 2579-2591.
"""
def __init__(
self, channels: int = 3, crop_border: int = 0, pretrained_model_path: str = None
) -> None:
super(ILNIQE, self).__init__()
self.channels = channels
self.crop_border = crop_border
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(
default_model_urls["ilniqe"]
)
def forward(self, X: torch.Tensor) -> torch.Tensor:
r"""Computation of NIQE metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of niqe metric in [0, 1] range.
"""
score = calculate_ilniqe(X, self.crop_border, self.pretrained_model_path)
return score
| 20,124 | 35.196043 | 120 | py |
BVQI | BVQI-master/pyiqa/archs/wadiqam_arch.py | r"""WaDIQaM model.
Reference:
Bosse, Sebastian, Dominique Maniry, Klaus-Robert Müller, Thomas Wiegand,
and Wojciech Samek. "Deep neural networks for no-reference and full-reference
image quality assessment." IEEE Transactions on image processing 27, no. 1
(2017): 206-219.
Created by: https://github.com/lidq92/WaDIQaM
Modified by: Chaofeng Chen (https://github.com/chaofengc)
Refer to:
Official code from https://github.com/dmaniry/deepIQA
"""
from typing import List, Union, cast
import torch
import torch.nn as nn
from pyiqa.utils.registry import ARCH_REGISTRY
def make_layers(cfg: List[Union[str, int]]) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = 3
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
@ARCH_REGISTRY.register()
class WaDIQaM(nn.Module):
"""WaDIQaM model.
Args:
metric_mode (String): Choose metric mode.
weighted_average (Boolean): Average the weight.
train_patch_num (int): Number of patch trained. Default: 32.
pretrained_model_path (String): The pretrained model path.
load_feature_weight_only (Boolean): Only load featureweight.
eps (float): Constant value.
"""
def __init__(
self,
metric_mode="FR",
weighted_average=True,
train_patch_num=32,
pretrained_model_path=None,
load_feature_weight_only=False,
eps=1e-8,
):
super(WaDIQaM, self).__init__()
backbone_cfg = [
32,
32,
"M",
64,
64,
"M",
128,
128,
"M",
256,
256,
"M",
512,
512,
"M",
]
self.features = make_layers(backbone_cfg)
self.train_patch_num = train_patch_num
self.patch_size = 32 # This cannot be changed due to network design
self.metric_mode = metric_mode
fc_in_channel = 512 * 3 if metric_mode == "FR" else 512
self.eps = eps
self.fc_q = nn.Sequential(
nn.Linear(fc_in_channel, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 1),
)
self.weighted_average = weighted_average
if weighted_average:
self.fc_w = nn.Sequential(
nn.Linear(fc_in_channel, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 1),
nn.ReLU(True),
)
if pretrained_model_path is not None:
self.load_pretrained_network(
pretrained_model_path, load_feature_weight_only
)
def load_pretrained_network(self, model_path, load_feature_weight_only=False):
state_dict = torch.load(model_path, map_location=torch.device("cpu"))[
"state_dict"
]
if load_feature_weight_only:
print("Only load backbone feature net")
new_state_dict = {}
for k in state_dict.keys():
if "features" in k:
new_state_dict[k] = state_dict[k]
self.net.load_state_dict(new_state_dict, strict=False)
else:
self.net.load_state_dict(state_dict, strict=True)
def _get_random_patches(self, x, y=None):
"""train with random crop patches"""
self.patch_num = self.train_patch_num
b, c, h, w = x.shape
th = tw = self.patch_size
cropped_x = []
cropped_y = []
for s in range(self.train_patch_num):
i = torch.randint(0, h - th + 1, size=(1,)).item()
j = torch.randint(0, w - tw + 1, size=(1,)).item()
cropped_x.append(x[:, :, i : i + th, j : j + tw])
if y is not None:
cropped_y.append(y[:, :, i : i + th, j : j + tw])
if y is not None:
cropped_x = torch.stack(cropped_x, dim=1).reshape(-1, c, th, tw)
cropped_y = torch.stack(cropped_y, dim=1).reshape(-1, c, th, tw)
return cropped_x, cropped_y
else:
cropped_x = torch.stack(cropped_x, dim=1).reshape(-1, c, th, tw)
return cropped_x
def _get_nonoverlap_patches(self, x, y=None):
"""test with non overlap patches"""
self.patch_num = 0
b, c, h, w = x.shape
th = tw = self.patch_size
cropped_x = []
cropped_y = []
for i in range(0, h - th, th):
for j in range(0, w - tw, tw):
cropped_x.append(x[:, :, i : i + th, j : j + tw])
if y is not None:
cropped_y.append(y[:, :, i : i + th, j : j + tw])
self.patch_num += 1
if y is not None:
cropped_x = torch.stack(cropped_x, dim=1).reshape(-1, c, th, tw)
cropped_y = torch.stack(cropped_y, dim=1).reshape(-1, c, th, tw)
return cropped_x, cropped_y
else:
cropped_x = torch.stack(cropped_x, dim=1).reshape(-1, c, th, tw)
return cropped_x
def get_patches(self, x, y=None):
if self.training:
return self._get_random_patches(x, y)
else:
return self._get_nonoverlap_patches(x, y)
def extract_features(self, patches):
h = self.features(patches)
h = h.reshape(-1, self.patch_num, 512)
return h
def forward(self, x, y=None):
r"""WaDIQaM model.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A reference tensor. Shape :math:`(N, C, H, W)`.
"""
if self.metric_mode == "FR":
assert y is not None, "Full reference metric requires reference input"
x_patches, y_patches = self.get_patches(x, y)
feat_img = self.extract_features(x_patches)
feat_ref = self.extract_features(y_patches)
feat_q = torch.cat((feat_ref, feat_img, feat_img - feat_ref), dim=-1)
else:
x_patches = self.get_patches(x)
feat_q = self.extract_features(x_patches)
q_score = self.fc_q(feat_q)
weight = self.fc_w(feat_q) + self.eps # add eps to avoid training collapse
if self.weighted_average:
q_final = torch.sum(q_score * weight, dim=1) / torch.sum(weight, dim=1)
else:
q_final = q_score.mean(dim=1)
return q_final.reshape(-1, 1)
| 6,704 | 31.391304 | 83 | py |
BVQI | BVQI-master/pyiqa/archs/cnniqa_arch.py | r"""CNNIQA Model.
Created by: https://github.com/lidq92/CNNIQA
Modified by: Chaofeng Chen (https://github.com/chaofengc)
Modification:
- We use 3 channel RGB input.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from pyiqa.archs.arch_util import load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"koniq10k": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/CNNIQA_koniq10k-fd89516f.pth"
}
@ARCH_REGISTRY.register()
class CNNIQA(nn.Module):
r"""CNNIQA model.
Args:
ker_size (int): Kernel size.
n_kers (int): Number of kernals.
n1_nodes (int): Number of n1 nodes.
n2_nodes (int): Number of n2 nodes.
pretrained_model_path (String): Pretrained model path.
Reference:
Kang, Le, Peng Ye, Yi Li, and David Doermann. "Convolutional
neural networks for no-reference image quality assessment."
In Proceedings of the IEEE conference on computer vision and
pattern recognition, pp. 1733-1740. 2014.
"""
def __init__(
self,
ker_size=7,
n_kers=50,
n1_nodes=800,
n2_nodes=800,
pretrained="koniq10k",
pretrained_model_path=None,
):
super(CNNIQA, self).__init__()
self.conv1 = nn.Conv2d(3, n_kers, ker_size)
self.fc1 = nn.Linear(2 * n_kers, n1_nodes)
self.fc2 = nn.Linear(n1_nodes, n2_nodes)
self.fc3 = nn.Linear(n2_nodes, 1)
self.dropout = nn.Dropout()
if pretrained_model_path is None and pretrained is not None:
pretrained_model_path = default_model_urls[pretrained]
if pretrained_model_path is not None:
load_pretrained_network(self, pretrained_model_path, True, "params")
def forward(self, x):
r"""Compute IQA using CNNIQA model.
Args:
x: An input tensor with (N, C, H, W) shape. RGB channel order for colour images.
Returns:
Value of CNNIQA model.
"""
h = self.conv1(x)
h1 = F.max_pool2d(h, (h.size(-2), h.size(-1)))
h2 = -F.max_pool2d(-h, (h.size(-2), h.size(-1)))
h = torch.cat((h1, h2), 1) # max-min pooling
h = h.squeeze(3).squeeze(2)
h = F.relu(self.fc1(h))
h = self.dropout(h)
h = F.relu(self.fc2(h))
q = self.fc3(h)
return q
| 2,425 | 26.568182 | 118 | py |
BVQI | BVQI-master/pyiqa/archs/iqt_arch.py | import numpy as np
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from pyexpat import model
from timm.models.resnet import BasicBlock, Bottleneck
from timm.models.vision_transformer import Block
from torchvision.ops.deform_conv import DeformConv2d
from pyiqa.archs.arch_util import (
ExactPadding2d,
default_init_weights,
load_pretrained_network,
to_2tuple,
)
from pyiqa.utils.registry import ARCH_REGISTRY
class IQARegression(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.conv_enc = nn.Conv2d(
in_channels=320 * 6, out_channels=config.d_hidn, kernel_size=1
)
self.conv_dec = nn.Conv2d(
in_channels=320 * 6, out_channels=config.d_hidn, kernel_size=1
)
self.transformer = Transformer(self.config)
self.projection = nn.Sequential(
nn.Linear(self.config.d_hidn, self.config.d_MLP_head, bias=False),
nn.ReLU(),
nn.Linear(self.config.d_MLP_head, self.config.n_output, bias=False),
)
def forward(self, enc_inputs, enc_inputs_embed, dec_inputs, dec_inputs_embed):
# batch x (320*6) x 29 x 29 -> batch x 256 x 29 x 29
enc_inputs_embed = self.conv_enc(enc_inputs_embed)
dec_inputs_embed = self.conv_dec(dec_inputs_embed)
# batch x 256 x 29 x 29 -> batch x 256 x (29*29)
b, c, h, w = enc_inputs_embed.size()
enc_inputs_embed = torch.reshape(enc_inputs_embed, (b, c, h * w))
enc_inputs_embed = enc_inputs_embed.permute(0, 2, 1)
# batch x 256 x (29*29) -> batch x (29*29) x 256
dec_inputs_embed = torch.reshape(dec_inputs_embed, (b, c, h * w))
dec_inputs_embed = dec_inputs_embed.permute(0, 2, 1)
# (bs, n_dec_seq+1, d_hidn), [(bs, n_head, n_enc_seq+1, n_enc_seq+1)], [(bs, n_head, n_dec_seq+1, n_dec_seq+1)], [(bs, n_head, n_dec_seq+1, n_enc_seq+1)]
(
dec_outputs,
enc_self_attn_probs,
dec_self_attn_probs,
dec_enc_attn_probs,
) = self.transformer(enc_inputs, enc_inputs_embed, dec_inputs, dec_inputs_embed)
# (bs, n_dec_seq+1, d_hidn) -> (bs, d_hidn)
# dec_outputs, _ = torch.max(dec_outputs, dim=1) # original transformer
dec_outputs = dec_outputs[:, 0, :] # in the IQA paper
# dec_outputs = torch.mean(dec_outputs, dim=1) # general idea
# (bs, n_output)
pred = self.projection(dec_outputs)
return pred
""" transformer """
class Transformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.encoder = Encoder(self.config)
self.decoder = Decoder(self.config)
def forward(self, enc_inputs, enc_inputs_embed, dec_inputs, dec_inputs_embed):
# (bs, n_enc_seq, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
enc_outputs, enc_self_attn_probs = self.encoder(enc_inputs, enc_inputs_embed)
# (bs, n_seq, d_hidn), [(bs, n_head, n_dec_seq, n_dec_seq)], [(bs, n_head, n_dec_seq, n_enc_seq)]
dec_outputs, dec_self_attn_probs, dec_enc_attn_probs = self.decoder(
dec_inputs, dec_inputs_embed, enc_inputs, enc_outputs
)
# (bs, n_dec_seq, n_dec_vocab), [(bs, n_head, n_enc_seq, n_enc_seq)], [(bs, n_head, n_dec_seq, n_dec_seq)], [(bs, n_head, n_dec_seq, n_enc_seq)]
return dec_outputs, enc_self_attn_probs, dec_self_attn_probs, dec_enc_attn_probs
""" encoder """
class Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# fixed position embedding
# sinusoid_table = torch.FloatTensor(get_sinusoid_encoding_table(self.config.n_enc_seq+1, self.config.d_hidn))
# self.pos_emb = nn.Embedding.from_pretrained(sinusoid_table, freeze=True)
# learnable position embedding
self.pos_embedding = nn.Parameter(
torch.randn(1, self.config.n_enc_seq + 1, self.config.d_hidn)
)
self.cls_token = nn.Parameter(torch.randn(1, 1, self.config.d_hidn))
self.dropout = nn.Dropout(self.config.emb_dropout)
self.layers = nn.ModuleList(
[EncoderLayer(self.config) for _ in range(self.config.n_layer)]
)
def forward(self, inputs, inputs_embed):
# inputs: batch x (len_seq+1) / inputs_embed: batch x len_seq x n_feat
b, n, _ = inputs_embed.shape
# positions: batch x (len_seq+1)
positions = (
torch.arange(inputs.size(1), device=inputs.device, dtype=torch.int64)
.expand(inputs.size(0), inputs.size(1))
.contiguous()
+ 1
)
pos_mask = inputs.eq(self.config.i_pad)
positions.masked_fill_(pos_mask, 0)
# outputs: batch x (len_seq+1) x n_feat
cls_tokens = repeat(self.cls_token, "() n d -> b n d", b=b)
x = torch.cat((cls_tokens, inputs_embed), dim=1)
x += self.pos_embedding
# x += self.pos_emb(positions)
outputs = self.dropout(x)
# (bs, n_enc_seq+1, n_enc_seq+1)
attn_mask = get_attn_pad_mask(inputs, inputs, self.config.i_pad)
attn_probs = []
for layer in self.layers:
# (bs, n_enc_seq+1, d_hidn), (bs, n_head, n_enc_seq+1, n_enc_seq+1)
outputs, attn_prob = layer(outputs, attn_mask)
attn_probs.append(attn_prob)
# (bs, n_enc_seq+1, d_hidn), [(bs, n_head, n_enc_seq+1, n_enc_seq+1)]
return outputs, attn_probs
""" encoder layer """
class EncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.self_attn = MultiHeadAttention(self.config)
self.layer_norm1 = nn.LayerNorm(
self.config.d_hidn, eps=self.config.layer_norm_epsilon
)
self.pos_ffn = PoswiseFeedForwardNet(self.config)
self.layer_norm2 = nn.LayerNorm(
self.config.d_hidn, eps=self.config.layer_norm_epsilon
)
def forward(self, inputs, attn_mask):
# (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)
att_outputs, attn_prob = self.self_attn(inputs, inputs, inputs, attn_mask)
att_outputs = self.layer_norm1(inputs + att_outputs)
# (bs, n_enc_seq, d_hidn)
ffn_outputs = self.pos_ffn(att_outputs)
ffn_outputs = self.layer_norm2(ffn_outputs + att_outputs)
# (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)
return ffn_outputs, attn_prob
def get_sinusoid_encoding_table(n_seq, d_hidn):
def cal_angle(position, i_hidn):
return position / np.power(10000, 2 * (i_hidn // 2) / d_hidn)
def get_posi_angle_vec(position):
return [cal_angle(position, i_hidn) for i_hidn in range(d_hidn)]
sinusoid_table = np.array([get_posi_angle_vec(i_seq) for i_seq in range(n_seq)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # even index sin
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # odd index cos
return sinusoid_table
""" attention pad mask """
def get_attn_pad_mask(seq_q, seq_k, i_pad):
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(i_pad)
pad_attn_mask = pad_attn_mask.unsqueeze(1).expand(batch_size, len_q, len_k)
return pad_attn_mask
""" multi head attention """
class MultiHeadAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.W_Q = nn.Linear(
self.config.d_hidn, self.config.n_head * self.config.d_head
)
self.W_K = nn.Linear(
self.config.d_hidn, self.config.n_head * self.config.d_head
)
self.W_V = nn.Linear(
self.config.d_hidn, self.config.n_head * self.config.d_head
)
self.scaled_dot_attn = ScaledDotProductAttention(self.config)
self.linear = nn.Linear(
self.config.n_head * self.config.d_head, self.config.d_hidn
)
self.dropout = nn.Dropout(config.dropout)
def forward(self, Q, K, V, attn_mask):
batch_size = Q.size(0)
# (bs, n_head, n_q_seq, d_head)
q_s = (
self.W_Q(Q)
.view(batch_size, -1, self.config.n_head, self.config.d_head)
.transpose(1, 2)
)
# (bs, n_head, n_k_seq, d_head)
k_s = (
self.W_K(K)
.view(batch_size, -1, self.config.n_head, self.config.d_head)
.transpose(1, 2)
)
# (bs, n_head, n_v_seq, d_head)
v_s = (
self.W_V(V)
.view(batch_size, -1, self.config.n_head, self.config.d_head)
.transpose(1, 2)
)
# (bs, n_head, n_q_seq, n_k_seq)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.config.n_head, 1, 1)
# (bs, n_head, n_q_seq, d_head), (bs, n_head, n_q_seq, n_k_seq)
context, attn_prob = self.scaled_dot_attn(q_s, k_s, v_s, attn_mask)
# (bs, n_head, n_q_seq, h_head * d_head)
context = (
context.transpose(1, 2)
.contiguous()
.view(batch_size, -1, self.config.n_head * self.config.d_head)
)
# (bs, n_head, n_q_seq, e_embd)
output = self.linear(context)
output = self.dropout(output)
# (bs, n_q_seq, d_hidn), (bs, n_head, n_q_seq, n_k_seq)
return output, attn_prob
""" scale dot product attention """
class ScaledDotProductAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dropout = nn.Dropout(config.dropout)
self.scale = 1 / (self.config.d_head ** 0.5)
def forward(self, Q, K, V, attn_mask):
# (bs, n_head, n_q_seq, n_k_seq)
scores = torch.matmul(Q, K.transpose(-1, -2))
scores = scores.mul_(self.scale)
scores.masked_fill_(attn_mask, -1e9)
# (bs, n_head, n_q_seq, n_k_seq)
attn_prob = nn.Softmax(dim=-1)(scores)
attn_prob = self.dropout(attn_prob)
# (bs, n_head, n_q_seq, d_v)
context = torch.matmul(attn_prob, V)
# (bs, n_head, n_q_seq, d_v), (bs, n_head, n_q_seq, n_v_seq)
return context, attn_prob
""" feed forward """
class PoswiseFeedForwardNet(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.conv1 = nn.Conv1d(
in_channels=self.config.d_hidn, out_channels=self.config.d_ff, kernel_size=1
)
self.conv2 = nn.Conv1d(
in_channels=self.config.d_ff, out_channels=self.config.d_hidn, kernel_size=1
)
self.active = F.gelu
self.dropout = nn.Dropout(config.dropout)
def forward(self, inputs):
# (bs, d_ff, n_seq)
output = self.conv1(inputs.transpose(1, 2))
output = self.active(output)
# (bs, n_seq, d_hidn)
output = self.conv2(output).transpose(1, 2)
output = self.dropout(output)
# (bs, n_seq, d_hidn)
return output
""" decoder """
class Decoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_embedding = nn.Parameter(
torch.randn(1, self.config.n_enc_seq + 1, self.config.d_hidn)
)
self.cls_token = nn.Parameter(torch.randn(1, 1, self.config.d_hidn))
self.dropout = nn.Dropout(self.config.emb_dropout)
self.layers = nn.ModuleList(
[DecoderLayer(self.config) for _ in range(self.config.n_layer)]
)
def forward(self, dec_inputs, dec_inputs_embed, enc_inputs, enc_outputs):
# enc_inputs: batch x (len_seq+1) / enc_outputs: batch x (len_seq+1) x n_feat
# dec_inputs: batch x (len_seq+1) / dec_inputs_embed: batch x len_seq x n_feat
b, n, _ = dec_inputs_embed.shape
cls_tokens = repeat(self.cls_token, "() n d -> b n d", b=b)
x = torch.cat((cls_tokens, dec_inputs_embed), dim=1)
x += self.pos_embedding[:, : (n + 1)]
# (bs, n_dec_seq+1, d_hidn)
dec_outputs = self.dropout(x)
# (bs, n_dec_seq+1, n_dec_seq+1)
dec_attn_pad_mask = get_attn_pad_mask(dec_inputs, dec_inputs, self.config.i_pad)
# (bs, n_dec_seq+1, n_dec_seq+1)
dec_attn_decoder_mask = get_attn_decoder_mask(dec_inputs)
# (bs, n_dec_seq+1, n_dec_seq+1)
dec_self_attn_mask = torch.gt((dec_attn_pad_mask + dec_attn_decoder_mask), 0)
# (bs, n_dec_seq+1, n_enc_seq+1)
dec_enc_attn_mask = get_attn_pad_mask(dec_inputs, enc_inputs, self.config.i_pad)
self_attn_probs, dec_enc_attn_probs = [], []
for layer in self.layers:
# (bs, n_dec_seq+1, d_hidn), (bs, n_dec_seq+1, n_dec_seq+1), (bs, n_dec_seq+1, n_enc_seq+1)
dec_outputs, self_attn_prob, dec_enc_attn_prob = layer(
dec_outputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask
)
self_attn_probs.append(self_attn_prob)
dec_enc_attn_probs.append(dec_enc_attn_prob)
# (bs, n_dec_seq+1, d_hidn), [(bs, n_dec_seq+1, n_dec_seq+1)], [(bs, n_dec_seq+1, n_enc_seq+1)]
return dec_outputs, self_attn_probs, dec_enc_attn_probs
""" decoder layer """
class DecoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.self_attn = MultiHeadAttention(self.config)
self.layer_norm1 = nn.LayerNorm(
self.config.d_hidn, eps=self.config.layer_norm_epsilon
)
self.dec_enc_attn = MultiHeadAttention(self.config)
self.layer_norm2 = nn.LayerNorm(
self.config.d_hidn, eps=self.config.layer_norm_epsilon
)
self.pos_ffn = PoswiseFeedForwardNet(self.config)
self.layer_norm3 = nn.LayerNorm(
self.config.d_hidn, eps=self.config.layer_norm_epsilon
)
def forward(self, dec_inputs, enc_outputs, self_attn_mask, dec_enc_attn_mask):
# (bs, n_dec_seq, d_hidn), (bs, n_head, n_dec_seq, n_dec_seq)
self_att_outputs, self_attn_prob = self.self_attn(
dec_inputs, dec_inputs, dec_inputs, self_attn_mask
)
self_att_outputs = self.layer_norm1(dec_inputs + self_att_outputs)
# (bs, n_dec_seq, d_hidn), (bs, n_head, n_dec_seq, n_enc_seq)
dec_enc_att_outputs, dec_enc_attn_prob = self.dec_enc_attn(
self_att_outputs, enc_outputs, enc_outputs, dec_enc_attn_mask
)
dec_enc_att_outputs = self.layer_norm2(self_att_outputs + dec_enc_att_outputs)
# (bs, n_dec_seq, d_hidn)
ffn_outputs = self.pos_ffn(dec_enc_att_outputs)
ffn_outputs = self.layer_norm3(dec_enc_att_outputs + ffn_outputs)
# (bs, n_dec_seq, d_hidn), (bs, n_head, n_dec_seq, n_dec_seq), (bs, n_head, n_dec_seq, n_enc_seq)
return ffn_outputs, self_attn_prob, dec_enc_attn_prob
""" attention decoder mask """
def get_attn_decoder_mask(seq):
subsequent_mask = (
torch.ones_like(seq).unsqueeze(-1).expand(seq.size(0), seq.size(1), seq.size(1))
)
subsequent_mask = subsequent_mask.triu(
diagonal=1
) # upper triangular part of a matrix(2-D)
return subsequent_mask
def random_crop(x, y, crop_size, crop_num):
b, c, h, w = x.shape
ch, cw = to_2tuple(crop_size)
crops_x = []
crops_y = []
for i in range(crop_num):
sh = np.random.randint(0, h - ch)
sw = np.random.randint(0, w - cw)
crops_x.append(x[..., sh : sh + ch, sw : sw + cw])
crops_y.append(y[..., sh : sh + ch, sw : sw + cw])
crops_x = torch.stack(crops_x, dim=1)
crops_y = torch.stack(crops_y, dim=1)
return crops_x.reshape(b * crop_num, c, ch, cw), crops_y.reshape(
b * crop_num, c, ch, cw
)
class SaveOutput:
def __init__(self):
self.outputs = {}
def __call__(self, module, module_in, module_out):
if module_out.device in self.outputs.keys():
self.outputs[module_out.device].append(module_out)
else:
self.outputs[module_out.device] = [module_out]
def clear(self, device):
self.outputs[device] = []
class DeformFusion(nn.Module):
def __init__(
self,
patch_size=8,
in_channels=768 * 5,
cnn_channels=256 * 3,
out_channels=256 * 3,
):
super().__init__()
# in_channels, out_channels, kernel_size, stride, padding
self.d_hidn = 512
if patch_size == 8:
stride = 1
else:
stride = 2
self.conv_offset = nn.Conv2d(in_channels, 2 * 3 * 3, 3, 1, 1)
self.deform = DeformConv2d(cnn_channels, out_channels, 3, 1, 1)
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels=out_channels,
out_channels=self.d_hidn,
kernel_size=3,
padding=1,
stride=2,
),
nn.ReLU(),
nn.Conv2d(
in_channels=self.d_hidn,
out_channels=out_channels,
kernel_size=3,
padding=1,
stride=stride,
),
)
def forward(self, cnn_feat, vit_feat):
vit_feat = F.interpolate(vit_feat, size=cnn_feat.shape[-2:], mode="nearest")
offset = self.conv_offset(vit_feat)
deform_feat = self.deform(cnn_feat, offset)
deform_feat = self.conv1(deform_feat)
return deform_feat
class Pixel_Prediction(nn.Module):
def __init__(self, inchannels=768 * 5 + 256 * 3, outchannels=256, d_hidn=1024):
super().__init__()
self.d_hidn = d_hidn
self.down_channel = nn.Conv2d(inchannels, outchannels, kernel_size=1)
self.feat_smoothing = nn.Sequential(
nn.Conv2d(
in_channels=256 * 3, out_channels=self.d_hidn, kernel_size=3, padding=1
),
nn.ReLU(),
nn.Conv2d(
in_channels=self.d_hidn, out_channels=512, kernel_size=3, padding=1
),
)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(),
)
self.conv_attent = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1), nn.Sigmoid()
)
self.conv = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1),
)
def forward(self, f_dis, f_ref, cnn_dis, cnn_ref):
f_dis = torch.cat((f_dis, cnn_dis), 1)
f_ref = torch.cat((f_ref, cnn_ref), 1)
f_dis = self.down_channel(f_dis)
f_ref = self.down_channel(f_ref)
f_cat = torch.cat((f_dis - f_ref, f_dis, f_ref), 1)
feat_fused = self.feat_smoothing(f_cat)
feat = self.conv1(feat_fused)
f = self.conv(feat)
w = self.conv_attent(feat)
pred = (f * w).sum(dim=-1).sum(dim=-1) / w.sum(dim=-1).sum(dim=-1)
return pred
@ARCH_REGISTRY.register()
class IQT(nn.Module):
def __init__(
self,
num_crop=20,
config_dataset="live",
default_mean=timm.data.IMAGENET_INCEPTION_MEAN,
default_std=timm.data.IMAGENET_INCEPTION_STD,
pretrained=False,
pretrained_model_path=None,
):
super().__init__()
self.backbone = timm.create_model("inception_resnet_v2", pretrained=True)
self.fix_network(self.backbone)
class Config:
def __init__(self, dataset=config_dataset) -> None:
if dataset in ["live", "csiq", "tid"]:
# model for PIPAL (NTIRE2021 Challenge)
self.n_enc_seq = (
29 * 29
) # feature map dimension (H x W) from backbone, this size is related to crop_size
self.n_dec_seq = (
29 * 29
) # feature map dimension (H x W) from backbone
self.n_layer = 2 # number of encoder/decoder layers
self.d_hidn = (
256 # input channel (C) of encoder / decoder (input: C x N)
)
self.i_pad = 0
self.d_ff = 1024 # feed forward hidden layer dimension
self.d_MLP_head = 512 # hidden layer of final MLP
self.n_head = 4 # number of head (in multi-head attention)
self.d_head = 256 # input channel (C) of each head (input: C x N) -> same as d_hidn
self.dropout = 0.1 # dropout ratio of transformer
self.emb_dropout = 0.1 # dropout ratio of input embedding
self.layer_norm_epsilon = 1e-12
self.n_output = 1 # dimension of final prediction
self.crop_size = 256 # input image crop size
elif dataset == "pipal":
# model for PIPAL (NTIRE2021 Challenge)
self.n_enc_seq = (
21 * 21
) # feature map dimension (H x W) from backbone, this size is related to crop_size
self.n_dec_seq = (
21 * 21
) # feature map dimension (H x W) from backbone
self.n_layer = 1 # number of encoder/decoder layers
self.d_hidn = (
128 # input channel (C) of encoder / decoder (input: C x N)
)
self.i_pad = 0
self.d_ff = 1024 # feed forward hidden layer dimension
self.d_MLP_head = 128 # hidden layer of final MLP
self.n_head = 4 # number of head (in multi-head attention)
self.d_head = 128 # input channel (C) of each head (input: C x N) -> same as d_hidn
self.dropout = 0.1 # dropout ratio of transformer
self.emb_dropout = 0.1 # dropout ratio of input embedding
self.layer_norm_epsilon = 1e-12
self.n_output = 1 # dimension of final prediction
self.crop_size = 192 # input image crop size
config = Config()
self.config = config
self.register_buffer("enc_inputs", torch.ones(1, config.n_enc_seq + 1))
self.register_buffer("dec_inputs", torch.ones(1, config.n_dec_seq + 1))
self.regressor = IQARegression(config)
# register hook to get intermediate features
self.init_saveoutput()
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
if pretrained_model_path is not None:
load_pretrained_network(
self, pretrained_model_path, False, weight_keys="params"
)
self.eps = 1e-12
self.crops = num_crop
self.crop_size = config.crop_size
def init_saveoutput(self):
self.save_output = SaveOutput()
hook_handles = []
for layer in self.backbone.modules():
if type(layer).__name__ == "Mixed_5b":
handle = layer.register_forward_hook(self.save_output)
hook_handles.append(handle)
elif type(layer).__name__ == "Block35":
handle = layer.register_forward_hook(self.save_output)
def fix_network(self, model):
for p in model.parameters():
p.requires_grad = False
def preprocess(self, x):
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
return x
@torch.no_grad()
def get_backbone_feature(self, x):
self.backbone(x)
feat = torch.cat(
(
self.save_output.outputs[x.device][0],
self.save_output.outputs[x.device][2],
self.save_output.outputs[x.device][4],
self.save_output.outputs[x.device][6],
self.save_output.outputs[x.device][8],
self.save_output.outputs[x.device][10],
),
dim=1,
)
self.save_output.clear(x.device)
return feat
def regress_score(self, dis, ref):
assert (
dis.shape[-1] == dis.shape[-2] == self.config.crop_size
), f"Input shape should be {self.config.crop_size, self.config.crop_size} but got {dis.shape[2:]}"
self.backbone.eval()
dis = self.preprocess(dis)
ref = self.preprocess(ref)
feat_dis = self.get_backbone_feature(dis)
feat_ref = self.get_backbone_feature(ref)
feat_diff = feat_ref - feat_dis
score = self.regressor(self.enc_inputs, feat_diff, self.dec_inputs, feat_ref)
return score
def forward(self, x, y):
bsz = x.shape[0]
if self.crops > 1 and not self.training:
x, y = random_crop(x, y, self.crop_size, self.crops)
score = self.regress_score(x, y)
score = score.reshape(bsz, self.crops, 1)
score = score.mean(dim=1)
else:
score = self.regress_score(x, y)
return score
| 25,389 | 35.323319 | 161 | py |
BVQI | BVQI-master/pyiqa/archs/func_util.py | from typing import Tuple
import torch
import torch.nn.functional as F
from pyiqa.matlab_utils import fspecial, imfilter
from .arch_util import excact_padding_2d
EPS = torch.finfo(torch.float32).eps
def extract_2d_patches(x, kernel, stride=1, dilation=1, padding="same"):
"""
Ref: https://stackoverflow.com/a/65886666
"""
b, c, h, w = x.shape
if padding != "none":
x = excact_padding_2d(x, kernel, stride, dilation, mode=padding)
# Extract patches
patches = F.unfold(x, kernel, dilation, stride=stride)
b, _, pnum = patches.shape
patches = patches.transpose(1, 2).reshape(b, pnum, c, kernel, kernel)
return patches
def torch_cov(tensor, rowvar=True, bias=False):
r"""Estimate a covariance matrix (np.cov)
Ref: https://gist.github.com/ModarTensai/5ab449acba9df1a26c12060240773110
"""
tensor = tensor if rowvar else tensor.transpose(-1, -2)
tensor = tensor - tensor.mean(dim=-1, keepdim=True)
factor = 1 / (tensor.shape[-1] - int(not bool(bias)))
return factor * tensor @ tensor.transpose(-1, -2)
def safe_sqrt(x: torch.Tensor) -> torch.Tensor:
r"""Safe sqrt with EPS to ensure numeric stability.
Args:
x (torch.Tensor): should be non-negative
"""
EPS = torch.finfo(x.dtype).eps
return torch.sqrt(x + EPS)
def diff_round(x: torch.Tensor) -> torch.Tensor:
r"""Differentiable round."""
return x - x.detach() + x.round()
def normalize_img_with_guass(
img: torch.Tensor,
kernel_size: int = 7,
sigma: float = 7.0 / 6,
C: int = 1,
padding: str = "same",
):
kernel = fspecial(kernel_size, sigma, 1).to(img)
mu = imfilter(img, kernel, padding=padding)
std = imfilter(img ** 2, kernel, padding=padding)
sigma = safe_sqrt((std - mu ** 2).abs())
img_normalized = (img - mu) / (sigma + C)
return img_normalized
# Gradient operator kernels
def scharr_filter() -> torch.Tensor:
r"""Utility function that returns a normalized 3x3 Scharr kernel in X direction
Returns:
kernel: Tensor with shape (1, 3, 3)
"""
return torch.tensor([[[-3.0, 0.0, 3.0], [-10.0, 0.0, 10.0], [-3.0, 0.0, 3.0]]]) / 16
def gradient_map(x: torch.Tensor, kernels: torch.Tensor) -> torch.Tensor:
r"""Compute gradient map for a given tensor and stack of kernels.
Args:
x: Tensor with shape (N, C, H, W).
kernels: Stack of tensors for gradient computation with shape (k_N, k_H, k_W)
Returns:
Gradients of x per-channel with shape (N, C, H, W)
"""
padding = kernels.size(-1) // 2
grads = torch.nn.functional.conv2d(x, kernels.to(x), padding=padding)
return safe_sqrt(torch.sum(grads ** 2, dim=-3, keepdim=True))
def similarity_map(
map_x: torch.Tensor, map_y: torch.Tensor, constant: float, alpha: float = 0.0
) -> torch.Tensor:
r"""Compute similarity_map between two tensors using Dice-like equation.
Args:
map_x: Tensor with map to be compared
map_y: Tensor with map to be compared
constant: Used for numerical stability
alpha: Masking coefficient. Substracts - `alpha` * map_x * map_y from denominator and nominator
"""
return (2.0 * map_x * map_y - alpha * map_x * map_y + constant) / (
map_x ** 2 + map_y ** 2 - alpha * map_x * map_y + constant + EPS
)
def ifftshift(x: torch.Tensor) -> torch.Tensor:
r"""Similar to np.fft.ifftshift but applies to PyTorch Tensors"""
shift = [-(ax // 2) for ax in x.size()]
return torch.roll(x, shift, tuple(range(len(shift))))
def get_meshgrid(size: Tuple[int, int]) -> torch.Tensor:
r"""Return coordinate grid matrices centered at zero point.
Args:
size: Shape of meshgrid to create
"""
if size[0] % 2:
# Odd
x = torch.arange(-(size[0] - 1) / 2, size[0] / 2) / (size[0] - 1)
else:
# Even
x = torch.arange(-size[0] / 2, size[0] / 2) / size[0]
if size[1] % 2:
# Odd
y = torch.arange(-(size[1] - 1) / 2, size[1] / 2) / (size[1] - 1)
else:
# Even
y = torch.arange(-size[1] / 2, size[1] / 2) / size[1]
return torch.meshgrid(x, y, indexing="ij")
def estimate_ggd_param(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Estimate general gaussian distribution.
Args:
x (Tensor): shape (b, 1, h, w)
"""
gamma = torch.arange(0.2, 10 + 0.001, 0.001).to(x)
r_table = (
torch.lgamma(1.0 / gamma)
+ torch.lgamma(3.0 / gamma)
- 2 * torch.lgamma(2.0 / gamma)
).exp()
r_table = r_table.repeat(x.size(0), 1)
sigma_sq = x.pow(2).mean(dim=(-1, -2))
sigma = sigma_sq.sqrt().squeeze(dim=-1)
assert not torch.isclose(
sigma, torch.zeros_like(sigma)
).all(), "Expected image with non zero variance of pixel values"
E = x.abs().mean(dim=(-1, -2))
rho = sigma_sq / E ** 2
indexes = (rho - r_table).abs().argmin(dim=-1)
solution = gamma[indexes]
return solution, sigma
def estimate_aggd_param(
block: torch.Tensor, return_sigma=False
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Estimate AGGD (Asymmetric Generalized Gaussian Distribution) parameters.
Args:
block (Tensor): Image block with shape (b, 1, h, w).
Returns:
Tensor: alpha, beta_l and beta_r for the AGGD distribution
(Estimating the parames in Equation 7 in the paper).
"""
gam = torch.arange(0.2, 10 + 0.001, 0.001).to(block)
r_gam = (
2 * torch.lgamma(2.0 / gam)
- (torch.lgamma(1.0 / gam) + torch.lgamma(3.0 / gam))
).exp()
r_gam = r_gam.repeat(block.shape[0], 1)
mask_left = block < 0
mask_right = block > 0
count_left = mask_left.sum(dim=(-1, -2), dtype=torch.float32)
count_right = mask_right.sum(dim=(-1, -2), dtype=torch.float32)
left_std = torch.sqrt((block * mask_left).pow(2).sum(dim=(-1, -2)) / (count_left))
right_std = torch.sqrt(
(block * mask_right).pow(2).sum(dim=(-1, -2)) / (count_right)
)
gammahat = left_std / right_std
rhat = block.abs().mean(dim=(-1, -2)).pow(2) / block.pow(2).mean(dim=(-1, -2))
rhatnorm = (rhat * (gammahat.pow(3) + 1) * (gammahat + 1)) / (
gammahat.pow(2) + 1
).pow(2)
array_position = (r_gam - rhatnorm).abs().argmin(dim=-1)
alpha = gam[array_position]
beta_l = (
left_std.squeeze(-1)
* (torch.lgamma(1 / alpha) - torch.lgamma(3 / alpha)).exp().sqrt()
)
beta_r = (
right_std.squeeze(-1)
* (torch.lgamma(1 / alpha) - torch.lgamma(3 / alpha)).exp().sqrt()
)
if return_sigma:
return alpha, left_std.squeeze(-1), right_std.squeeze(-1)
else:
return alpha, beta_l, beta_r
| 6,729 | 31.047619 | 103 | py |
BVQI | BVQI-master/pyiqa/archs/vsi_arch.py | r"""VSI Metric.
Created by: https://github.com/photosynthesis-team/piq/blob/master/piq/vsi.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
IQA-Optimization from https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/VSI.py
Offical matlab code is not available
"""
import functools
import warnings
from typing import Tuple, Union
import torch
import torch.nn as nn
from torch.nn.functional import avg_pool2d, interpolate, pad
from pyiqa.utils.color_util import rgb2lab, rgb2lmn
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import (
get_meshgrid,
gradient_map,
ifftshift,
safe_sqrt,
scharr_filter,
similarity_map,
)
def vsi(
x: torch.Tensor,
y: torch.Tensor,
data_range: Union[int, float] = 1.0,
c1: float = 1.27,
c2: float = 386.0,
c3: float = 130.0,
alpha: float = 0.4,
beta: float = 0.02,
omega_0: float = 0.021,
sigma_f: float = 1.34,
sigma_d: float = 145.0,
sigma_c: float = 0.001,
) -> torch.Tensor:
r"""Compute Visual Saliency-induced Index for a batch of images.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
data_range: Maximum value range of images (usually 1.0 or 255).
c1: coefficient to calculate saliency component of VSI.
c2: coefficient to calculate gradient component of VSI.
c3: coefficient to calculate color component of VSI.
alpha: power for gradient component of VSI.
beta: power for color component of VSI.
omega_0: coefficient to get log Gabor filter at SDSP.
sigma_f: coefficient to get log Gabor filter at SDSP.
sigma_d: coefficient to get SDSP.
sigma_c: coefficient to get SDSP.
Returns:
Index of similarity between two images. Usually in [0, 1] range.
References:
L. Zhang, Y. Shen and H. Li, "VSI: A Visual Saliency-Induced Index for Perceptual
Image Quality Assessment," IEEE Transactions on Image Processing, vol. 23, no. 10,
pp. 4270-4281, Oct. 2014, doi: 10.1109/TIP.2014.2346028
https://ieeexplore.ieee.org/document/6873260
Note:
The original method supports only RGB image.
"""
if x.size(1) == 1:
x = x.repeat(1, 3, 1, 1)
y = y.repeat(1, 3, 1, 1)
warnings.warn(
"The original VSI supports only RGB images. The input images were converted to RGB by copying "
"the grey channel 3 times."
)
# Scale to [0, 255] range to match scale of constant
x = x * 255.0 / data_range
y = y * 255.0 / data_range
vs_x = sdsp(
x,
data_range=255,
omega_0=omega_0,
sigma_f=sigma_f,
sigma_d=sigma_d,
sigma_c=sigma_c,
)
vs_y = sdsp(
y,
data_range=255,
omega_0=omega_0,
sigma_f=sigma_f,
sigma_d=sigma_d,
sigma_c=sigma_c,
)
# Convert to LMN colour space
x_lmn = rgb2lmn(x)
y_lmn = rgb2lmn(y)
# Averaging image if the size is large enough
kernel_size = max(1, round(min(vs_x.size()[-2:]) / 256))
padding = kernel_size // 2
if padding:
upper_pad = padding
bottom_pad = (kernel_size - 1) // 2
pad_to_use = [upper_pad, bottom_pad, upper_pad, bottom_pad]
mode = "replicate"
vs_x = pad(vs_x, pad=pad_to_use, mode=mode)
vs_y = pad(vs_y, pad=pad_to_use, mode=mode)
x_lmn = pad(x_lmn, pad=pad_to_use, mode=mode)
y_lmn = pad(y_lmn, pad=pad_to_use, mode=mode)
vs_x = avg_pool2d(vs_x, kernel_size=kernel_size)
vs_y = avg_pool2d(vs_y, kernel_size=kernel_size)
x_lmn = avg_pool2d(x_lmn, kernel_size=kernel_size)
y_lmn = avg_pool2d(y_lmn, kernel_size=kernel_size)
# Calculate gradient map
kernels = torch.stack([scharr_filter(), scharr_filter().transpose(1, 2)]).to(x_lmn)
gm_x = gradient_map(x_lmn[:, :1], kernels)
gm_y = gradient_map(y_lmn[:, :1], kernels)
# Calculate all similarity maps
s_vs = similarity_map(vs_x, vs_y, c1)
s_gm = similarity_map(gm_x, gm_y, c2)
s_m = similarity_map(x_lmn[:, 1:2], y_lmn[:, 1:2], c3)
s_n = similarity_map(x_lmn[:, 2:], y_lmn[:, 2:], c3)
s_c = s_m * s_n
s_c_complex = [s_c.abs(), torch.atan2(torch.zeros_like(s_c), s_c)]
s_c_complex_pow = [s_c_complex[0] ** beta, s_c_complex[1] * beta]
s_c_real_pow = s_c_complex_pow[0] * torch.cos(s_c_complex_pow[1])
s = s_vs * s_gm.pow(alpha) * s_c_real_pow
vs_max = torch.max(vs_x, vs_y)
eps = torch.finfo(vs_max.dtype).eps
output = s * vs_max
output = (
(output.sum(dim=(-1, -2)) + eps) / (vs_max.sum(dim=(-1, -2)) + eps)
).squeeze(-1)
return output
def sdsp(
x: torch.Tensor,
data_range: Union[int, float] = 255,
omega_0: float = 0.021,
sigma_f: float = 1.34,
sigma_d: float = 145.0,
sigma_c: float = 0.001,
) -> torch.Tensor:
r"""SDSP algorithm for salient region detection from a given image.
Supports only colour images with RGB channel order.
Args:
x: Tensor. Shape :math:`(N, 3, H, W)`.
data_range: Maximum value range of images (usually 1.0 or 255).
omega_0: coefficient for log Gabor filter
sigma_f: coefficient for log Gabor filter
sigma_d: coefficient for the central areas, which have a bias towards attention
sigma_c: coefficient for the warm colors, which have a bias towards attention
Returns:
torch.Tensor: Visual saliency map
"""
x = x / data_range * 255
size = x.size()
size_to_use = (256, 256)
x = interpolate(input=x, size=size_to_use, mode="bilinear", align_corners=False)
x_lab = rgb2lab(x, data_range=255)
lg = _log_gabor(size_to_use, omega_0, sigma_f).to(x).view(1, 1, *size_to_use)
# torch version >= '1.8.0'
x_fft = torch.fft.fft2(x_lab)
x_ifft_real = torch.fft.ifft2(x_fft * lg).real
s_f = safe_sqrt(x_ifft_real.pow(2).sum(dim=1, keepdim=True))
coordinates = torch.stack(get_meshgrid(size_to_use), dim=0).to(x)
coordinates = coordinates * size_to_use[0] + 1
s_d = torch.exp(-torch.sum(coordinates ** 2, dim=0) / sigma_d ** 2).view(
1, 1, *size_to_use
)
eps = torch.finfo(x_lab.dtype).eps
min_x = x_lab.min(dim=-1, keepdim=True).values.min(dim=-2, keepdim=True).values
max_x = x_lab.max(dim=-1, keepdim=True).values.max(dim=-2, keepdim=True).values
normalized = (x_lab - min_x) / (max_x - min_x + eps)
norm = normalized[:, 1:].pow(2).sum(dim=1, keepdim=True)
s_c = 1 - torch.exp(-norm / sigma_c ** 2)
vs_m = s_f * s_d * s_c
vs_m = interpolate(vs_m, size[-2:], mode="bilinear", align_corners=True)
min_vs_m = vs_m.min(dim=-1, keepdim=True).values.min(dim=-2, keepdim=True).values
max_vs_m = vs_m.max(dim=-1, keepdim=True).values.max(dim=-2, keepdim=True).values
return (vs_m - min_vs_m) / (max_vs_m - min_vs_m + eps)
def _log_gabor(size: Tuple[int, int], omega_0: float, sigma_f: float) -> torch.Tensor:
r"""Creates log Gabor filter
Args:
size: size of the requires log Gabor filter
omega_0: center frequency of the filter
sigma_f: bandwidth of the filter
Returns:
log Gabor filter
"""
xx, yy = get_meshgrid(size)
radius = (xx ** 2 + yy ** 2).sqrt()
mask = radius <= 0.5
r = radius * mask
r = ifftshift(r)
r[0, 0] = 1
lg = torch.exp((-(r / omega_0).log().pow(2)) / (2 * sigma_f ** 2))
lg[0, 0] = 0
return lg
@ARCH_REGISTRY.register()
class VSI(nn.Module):
r"""Creates a criterion that measures Visual Saliency-induced Index error between
each element in the input and target.
Args:
data_range: Maximum value range of images (usually 1.0 or 255).
c1: coefficient to calculate saliency component of VSI
c2: coefficient to calculate gradient component of VSI
c3: coefficient to calculate color component of VSI
alpha: power for gradient component of VSI
beta: power for color component of VSI
omega_0: coefficient to get log Gabor filter at SDSP
sigma_f: coefficient to get log Gabor filter at SDSP
sigma_d: coefficient to get SDSP
sigma_c: coefficient to get SDSP
References:
L. Zhang, Y. Shen and H. Li, "VSI: A Visual Saliency-Induced Index for Perceptual
Image Quality Assessment," IEEE Transactions on Image Processing, vol. 23, no. 10,
pp. 4270-4281, Oct. 2014, doi: 10.1109/TIP.2014.2346028
https://ieeexplore.ieee.org/document/6873260
"""
def __init__(
self,
c1: float = 1.27,
c2: float = 386.0,
c3: float = 130.0,
alpha: float = 0.4,
beta: float = 0.02,
data_range: Union[int, float] = 1.0,
omega_0: float = 0.021,
sigma_f: float = 1.34,
sigma_d: float = 145.0,
sigma_c: float = 0.001,
) -> None:
super().__init__()
self.data_range = data_range
self.vsi = functools.partial(
vsi,
c1=c1,
c2=c2,
c3=c3,
alpha=alpha,
beta=beta,
omega_0=omega_0,
sigma_f=sigma_f,
sigma_d=sigma_d,
sigma_c=sigma_c,
data_range=data_range,
)
def forward(self, x, y):
r"""Computation of VSI as a loss function.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of VSI loss to be minimized in [0, 1] range.
Note:
Both inputs are supposed to have RGB channels order in accordance with the original approach.
Nevertheless, the method supports greyscale images, which they are converted to RGB by copying the grey
channel 3 times.
"""
return self.vsi(x=x, y=y)
| 9,991 | 31.868421 | 115 | py |
BVQI | BVQI-master/pyiqa/archs/nrqm_arch.py | r"""NRQM Metric, proposed in
Chao Ma, Chih-Yuan Yang, Xiaokang Yang, Ming-Hsuan Yang
"Learning a No-Reference Quality Metric for Single-Image Super-Resolution"
Computer Vision and Image Understanding (CVIU), 2017
Matlab reference: https://github.com/chaoma99/sr-metric
This PyTorch implementation by: Chaofeng Chen (https://github.com/chaofengc)
"""
import math
from warnings import warn
import scipy.io
import torch
import torch.nn.functional as F
from torch import Tensor
from pyiqa.archs.arch_util import ExactPadding2d
from pyiqa.archs.func_util import extract_2d_patches
from pyiqa.archs.niqe_arch import NIQE
from pyiqa.archs.ssim_arch import SSIM
from pyiqa.matlab_utils import SCFpyr_PyTorch, dct2d, fspecial, im2col, imresize
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.download_util import load_file_from_url
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/NRQM_model.mat"
}
def get_guass_pyramid(x: Tensor, scale: int = 2):
r"""Get gaussian pyramid images with gaussian kernel."""
pyr = [x]
kernel = fspecial(3, 0.5, x.shape[1]).to(x)
pad_func = ExactPadding2d(3, stride=1, mode="same")
for i in range(scale):
x = F.conv2d(pad_func(x), kernel, groups=x.shape[1])
x = x[:, :, 1::2, 1::2]
pyr.append(x)
return pyr
def get_var_gen_gauss(x, eps=1e-7):
r"""Get mean and variance of input local patch."""
std = x.abs().std(dim=-1, unbiased=True)
mean = x.abs().mean(dim=-1)
rho = std / (mean + eps)
return rho
def gamma_gen_gauss(x: Tensor, block_seg=1e4):
r"""General gaussian distribution estimation.
Args:
block_seg: maximum number of blocks in parallel to avoid OOM
"""
pshape = x.shape[:-1]
x = x.reshape(-1, x.shape[-1])
eps = 1e-7
gamma = torch.arange(0.03, 10 + 0.001, 0.001).to(x)
r_table = (
torch.lgamma(1.0 / gamma)
+ torch.lgamma(3.0 / gamma)
- 2 * torch.lgamma(2.0 / gamma)
).exp()
r_table = r_table.unsqueeze(0)
mean = x.mean(dim=-1, keepdim=True)
var = x.var(dim=-1, keepdim=True, unbiased=True)
mean_abs = (x - mean).abs().mean(dim=-1, keepdim=True) ** 2
rho = var / (mean_abs + eps)
if rho.shape[0] > block_seg:
rho_seg = rho.chunk(int(rho.shape[0] // block_seg))
indexes = []
for r in rho_seg:
tmp_idx = (r - r_table).abs().argmin(dim=-1)
indexes.append(tmp_idx)
indexes = torch.cat(indexes)
else:
indexes = (rho - r_table).abs().argmin(dim=-1)
solution = gamma[indexes].reshape(*pshape)
return solution
def gamma_dct(dct_img_block: torch.Tensor):
r"""Generalized gaussian distribution features"""
b, _, _, h, w = dct_img_block.shape
dct_flatten = dct_img_block.reshape(b, -1, h * w)[:, :, 1:]
g = gamma_gen_gauss(dct_flatten)
g = torch.sort(g, dim=-1)[0]
return g
def coeff_var_dct(dct_img_block: torch.Tensor):
r"""Gaussian var, mean features"""
b, _, _, h, w = dct_img_block.shape
dct_flatten = dct_img_block.reshape(b, -1, h * w)[:, :, 1:]
rho = get_var_gen_gauss(dct_flatten)
rho = torch.sort(rho, dim=-1)[0]
return rho
def oriented_dct_rho(dct_img_block: torch.Tensor):
r"""Oriented frequency features"""
eps = 1e-8
# oriented 1
feat1 = torch.cat(
[
dct_img_block[..., 0, 1:],
dct_img_block[..., 1, 2:],
dct_img_block[..., 2, 4:],
dct_img_block[..., 3, 5:],
],
dim=-1,
).squeeze(-2)
g1 = get_var_gen_gauss(feat1, eps)
# oriented 2
feat2 = torch.cat(
[
dct_img_block[..., 1, [1]],
dct_img_block[..., 2, 2:4],
dct_img_block[..., 3, 2:5],
dct_img_block[..., 4, 3:],
dct_img_block[..., 5, 4:],
dct_img_block[..., 6, 4:],
],
dim=-1,
).squeeze(-2)
g2 = get_var_gen_gauss(feat2, eps)
# oriented 3
feat3 = torch.cat(
[
dct_img_block[..., 1:, 0],
dct_img_block[..., 2:, 1],
dct_img_block[..., 4:, 2],
dct_img_block[..., 5:, 3],
],
dim=-1,
).squeeze(-2)
g3 = get_var_gen_gauss(feat3, eps)
rho = torch.stack([g1, g2, g3], dim=-1).var(dim=-1)
rho = torch.sort(rho, dim=-1)[0]
return rho
def block_dct(img: Tensor):
r"""Get local frequency features"""
img_blocks = extract_2d_patches(img, 3 + 2 * 2, 3)
dct_img_blocks = dct2d(img_blocks)
features = []
# general gaussian distribution features
gamma_L1 = gamma_dct(dct_img_blocks)
p10_gamma_L1 = gamma_L1[:, : math.ceil(0.1 * gamma_L1.shape[-1]) + 1].mean(dim=-1)
p100_gamma_L1 = gamma_L1.mean(dim=-1)
features += [p10_gamma_L1, p100_gamma_L1]
# coefficient variation estimation
coeff_var_L1 = coeff_var_dct(dct_img_blocks)
p10_last_cv_L1 = coeff_var_L1[:, math.floor(0.9 * coeff_var_L1.shape[-1]) :].mean(
dim=-1
)
p100_cv_L1 = coeff_var_L1.mean(dim=-1)
features += [p10_last_cv_L1, p100_cv_L1]
# oriented dct features
ori_dct_feat = oriented_dct_rho(dct_img_blocks)
p10_last_orientation_L1 = ori_dct_feat[
:, math.floor(0.9 * ori_dct_feat.shape[-1]) :
].mean(dim=-1)
p100_orientation_L1 = ori_dct_feat.mean(dim=-1)
features += [p10_last_orientation_L1, p100_orientation_L1]
dct_feat = torch.stack(features, dim=1)
return dct_feat
def norm_sender_normalized(pyr, num_scale=2, num_bands=6, blksz=3, eps=1e-12):
r"""Normalize pyramid with local spatial neighbor and band neighbor"""
border = blksz // 2
guardband = 16
subbands = []
for si in range(num_scale):
for bi in range(num_bands):
idx = si * num_bands + bi
current_band = pyr[idx]
N = blksz ** 2
# 3x3 window pixels
tmp = F.unfold(current_band.unsqueeze(1), 3, stride=1)
tmp = tmp.transpose(1, 2)
b, hw = tmp.shape[:2]
# parent pixels
parent_idx = idx + num_bands
if parent_idx < len(pyr):
tmp_parent = pyr[parent_idx]
tmp_parent = imresize(tmp_parent, sizes=current_band.shape[-2:])
tmp_parent = tmp_parent[:, border:-border, border:-border].reshape(
b, hw, 1
)
tmp = torch.cat((tmp, tmp_parent), dim=-1)
N += 1
# neighbor band pixels
for ni in range(num_bands):
if ni != bi:
ni_idx = si * num_bands + ni
tmp_nei = pyr[ni_idx]
tmp_nei = tmp_nei[:, border:-border, border:-border].reshape(
b, hw, 1
)
tmp = torch.cat((tmp, tmp_nei), dim=-1)
C_x = tmp.transpose(1, 2) @ tmp / tmp.shape[1]
# correct possible negative eigenvalue
L, Q = torch.linalg.eigh(C_x)
L_pos = L * (L > 0)
L_pos_sum = L_pos.sum(dim=1, keepdim=True)
L = (
L_pos
* L.sum(dim=1, keepdim=True)
/ (L_pos_sum + (L_pos_sum == 0).float())
)
C_x = Q @ torch.diag_embed(L) @ Q.transpose(1, 2)
o_c = current_band[:, border:-border, border:-border]
b, h, w = o_c.shape
o_c = o_c.reshape(b, hw)
o_c = o_c - o_c.mean(dim=1, keepdim=True)
if hasattr(torch.linalg, "lstsq"):
tmp_y = (
torch.linalg.lstsq(
C_x.transpose(1, 2), tmp.transpose(1, 2)
).solution.transpose(1, 2)
* tmp
/ N
)
else:
warn(
"For numerical stability, we use torch.linal.lstsq to calculate matrix inverse for PyTorch > 1.9.0. The results might be slightly different if you use older version of PyTorch."
)
tmp_y = (tmp @ torch.linalg.pinv(C_x)) * tmp / N
z = tmp_y.sum(dim=2).sqrt()
mask = z != 0
g_c = o_c * mask / (z * mask + eps)
g_c = g_c.reshape(b, h, w)
gb = int(guardband / (2 ** (si)))
g_c = g_c[:, gb:-gb, gb:-gb]
g_c = g_c - g_c.mean(dim=(1, 2), keepdim=True)
subbands.append(g_c)
return subbands
def global_gsm(img: Tensor):
"""Global feature from gassian scale mixture model"""
batch_size = img.shape[0]
num_bands = 6
pyr = SCFpyr_PyTorch(height=2, nbands=num_bands, device=img.device).build(img)
lp_bands = [x[..., 0] for x in pyr[1]] + [x[..., 0] for x in pyr[2]]
subbands = norm_sender_normalized(lp_bands)
feat = []
# gamma
for sb in subbands:
feat.append(gamma_gen_gauss(sb.reshape(batch_size, -1)))
# gamma cross scale
for i in range(num_bands):
sb1 = subbands[i].reshape(batch_size, -1)
sb2 = subbands[i + num_bands].reshape(batch_size, -1)
gs = gamma_gen_gauss(torch.cat((sb1, sb2), dim=1))
feat.append(gs)
# structure correlation between scales
hp_band = pyr[0]
ssim_func = SSIM(channels=1, test_y_channel=False)
for sb in subbands:
sb_tmp = imresize(sb, sizes=hp_band.shape[1:]).unsqueeze(1)
tmp_ssim = ssim_func(sb_tmp, hp_band.unsqueeze(1))
feat.append(tmp_ssim)
# structure correlation between orientations
for i in range(num_bands):
for j in range(i + 1, num_bands):
feat.append(ssim_func(subbands[i].unsqueeze(1), subbands[j].unsqueeze(1)))
feat = torch.stack(feat, dim=1)
return feat
def tree_regression(feat, ldau, rdau, threshold_value, pred_value, best_attri):
r"""Simple decision tree regression."""
prev_k = k = 0
for i in range(ldau.shape[0]):
best_col = best_attri[k] - 1
threshold = threshold_value[k]
key_value = feat[best_col]
prev_k = k
k = ldau[k] - 1 if key_value <= threshold else rdau[k] - 1
if k == -1:
break
y_pred = pred_value[prev_k]
return y_pred
def random_forest_regression(feat, ldau, rdau, threshold_value, pred_value, best_attri):
r"""Simple random forest regression.
Note: currently, this is non-differentiable and only support CPU.
"""
feat = feat.cpu().data.numpy()
b, dim = feat.shape
node_num, tree_num = ldau.shape
pred = []
for i in range(b):
tmp_feat = feat[i]
tmp_pred = []
for i in range(tree_num):
tmp_result = tree_regression(
tmp_feat,
ldau[:, i],
rdau[:, i],
threshold_value[:, i],
pred_value[:, i],
best_attri[:, i],
)
tmp_pred.append(tmp_result)
pred.append(tmp_pred)
pred = torch.Tensor(pred)
return pred.mean(dim=1, keepdim=True)
def nrqm(
img: Tensor,
linear_param,
rf_param,
) -> Tensor:
"""Calculate NRQM
Args:
img (Tensor): Input image.
linear_param (np.array): (4, 1) linear regression params
rf_param: params of 3 random forest for 3 kinds of features
"""
assert (
img.ndim == 4
), "Input image must be a gray or Y (of YCbCr) image with shape (b, c, h, w)."
# crop image
b, c, h, w = img.shape
img_pyr = get_guass_pyramid(img.float() / 255.0)
# DCT features
f1 = []
for im in img_pyr:
f1.append(block_dct(im))
f1 = torch.cat(f1, dim=1)
# gsm features
f2 = global_gsm(img)
# svd features
f3 = []
for im in img_pyr:
col = im2col(im, 5, "distinct")
_, s, _ = torch.linalg.svd(col, full_matrices=False)
f3.append(s)
f3 = torch.cat(f3, dim=1)
# Random forest regression. Currently not differentiable and only support CPU
preds = torch.ones(b, 1)
for feat, rf in zip([f1, f2, f3], rf_param):
tmp_pred = random_forest_regression(feat, *rf)
preds = torch.cat((preds, tmp_pred), dim=1)
quality = preds @ torch.Tensor(linear_param)
return quality.squeeze()
def calculate_nrqm(
img: torch.Tensor,
crop_border: int = 0,
test_y_channel: bool = True,
pretrained_model_path: str = None,
color_space: str = "yiq",
**kwargs,
) -> torch.Tensor:
"""Calculate NRQM
Args:
img (Tensor): Input image whose quality needs to be computed.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
test_y_channel (Bool): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'.
pretrained_model_path (String): The pretrained model path.
Returns:
Tensor: NIQE result.
"""
params = scipy.io.loadmat(pretrained_model_path)["model"]
linear_param = params["linear"][0, 0]
rf_params_list = []
for i in range(3):
tmp_list = []
tmp_param = params["rf"][0, 0][0, i][0, 0]
tmp_list.append(tmp_param[0]) # ldau
tmp_list.append(tmp_param[1]) # rdau
tmp_list.append(tmp_param[4]) # threshold value
tmp_list.append(tmp_param[5]) # pred value
tmp_list.append(tmp_param[6]) # best attribute index
rf_params_list.append(tmp_list)
if test_y_channel and img.shape[1] == 3:
img = to_y_channel(img, 255, color_space)
if crop_border != 0:
img = img[..., crop_border:-crop_border, crop_border:-crop_border]
nrqm_result = nrqm(img, linear_param, rf_params_list)
return nrqm_result.to(img)
@ARCH_REGISTRY.register()
class NRQM(torch.nn.Module):
r"""NRQM metric
Ma, Chao, Chih-Yuan Yang, Xiaokang Yang, and Ming-Hsuan Yang.
"Learning a no-reference quality metric for single-image super-resolution."
Computer Vision and Image Understanding 158 (2017): 1-16.
Args:
channels (int): Number of processed channel.
test_y_channel (Boolean): whether to use y channel on ycbcr.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (String): The pretrained model path.
"""
def __init__(
self,
test_y_channel: bool = True,
color_space: str = "yiq",
crop_border: int = 0,
pretrained_model_path: str = None,
) -> None:
super(NRQM, self).__init__()
self.test_y_channel = test_y_channel
self.crop_border = crop_border
self.color_space = color_space
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(default_model_urls["url"])
def forward(self, X: torch.Tensor) -> torch.Tensor:
r"""Computation of NRQM metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of nrqm metric.
"""
score = calculate_nrqm(
X,
self.crop_border,
self.test_y_channel,
self.pretrained_model_path,
self.color_space,
)
return score
@ARCH_REGISTRY.register()
class PI(torch.nn.Module):
r"""Perceptual Index (PI), introduced by
Blau, Yochai, Roey Mechrez, Radu Timofte, Tomer Michaeli, and Lihi Zelnik-Manor.
"The 2018 pirm challenge on perceptual image super-resolution."
In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pp. 0-0. 2018.
Ref url: https://github.com/roimehrez/PIRM2018
It is a combination of NIQE and NRQM: 1/2 * ((10 - NRQM) + NIQE)
Args:
color_space (str): color space of y channel, default ycbcr.
crop_border (int): Cropped pixels in each edge of an image, default 4.
"""
def __init__(self, crop_border=4, color_space="ycbcr"):
super(PI, self).__init__()
self.nrqm = NRQM(crop_border=crop_border, color_space=color_space)
self.niqe = NIQE(crop_border=crop_border, color_space=color_space)
def forward(self, X: Tensor) -> Tensor:
r"""Computation of PI metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of PI metric.
"""
nrqm_score = self.nrqm(X)
niqe_score = self.niqe(X)
score = 1 / 2 * (10 - nrqm_score + niqe_score)
return score
| 16,689 | 31.157996 | 197 | py |
BVQI | BVQI-master/pyiqa/archs/nima_arch.py | r"""NIMA model.
Reference:
Talebi, Hossein, and Peyman Milanfar. "NIMA: Neural image assessment."
IEEE transactions on image processing 27, no. 8 (2018): 3998-4011.
Created by: https://github.com/yunxiaoshi/Neural-IMage-Assessment/blob/master/model/model.py
Modified by: Chaofeng Chen (https://github.com/chaofengc)
"""
import timm
import torch
import torch.nn as nn
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from pyiqa.archs.arch_util import dist_to_mos, load_pretrained_network
from pyiqa.utils.registry import ARCH_REGISTRY
default_model_urls = {
"vgg16-ava": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/NIMA_VGG16_ava-dc4e8265.pth",
"inception_resnet_v2-ava": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/NIMA_InceptionV2_ava-b0c77c00.pth",
}
@ARCH_REGISTRY.register()
class NIMA(nn.Module):
"""Neural IMage Assessment model.
Modification:
- for simplicity, we use global average pool for all models
- we remove the dropout, because parameters with avg pool is much less.
Args:
base_model_name: pretrained model to extract features, can be any models supported by timm.
Models used in the paper: vgg16, inception_resnet_v2, mobilenetv2_100
default input shape:
- vgg and mobilenet: (N, 3, 224, 224)
- inception: (N, 3, 299, 299)
"""
def __init__(
self,
base_model_name="vgg16",
num_classes=10,
dropout_rate=0.0,
pretrained=True,
pretrained_model_path=None,
default_mean=[0.485, 0.456, 0.406],
default_std=[0.229, 0.224, 0.225],
):
super(NIMA, self).__init__()
self.base_model = timm.create_model(
base_model_name, pretrained=True, features_only=True
)
# set output number of classes
num_classes = 10 if "ava" in pretrained else num_classes
self.global_pool = nn.AdaptiveAvgPool2d(1)
in_ch = self.base_model.feature_info.channels()[-1]
self.num_classes = num_classes
self.classifier = [
nn.Flatten(),
nn.Dropout(p=dropout_rate),
nn.Linear(in_features=in_ch, out_features=num_classes),
]
if num_classes > 1:
self.classifier.append(nn.Softmax(dim=-1))
self.classifier = nn.Sequential(*self.classifier)
if "inception" in base_model_name:
default_mean = IMAGENET_INCEPTION_MEAN
default_std = IMAGENET_INCEPTION_STD
self.default_mean = torch.Tensor(default_mean).view(1, 3, 1, 1)
self.default_std = torch.Tensor(default_std).view(1, 3, 1, 1)
if pretrained and pretrained_model_path is None:
url_key = f"{base_model_name}-{pretrained}"
load_pretrained_network(
self, default_model_urls[url_key], True, weight_keys="params"
)
elif pretrained_model_path is not None:
load_pretrained_network(
self, pretrained_model_path, True, weight_keys="params"
)
def preprocess(self, x):
x = (x - self.default_mean.to(x)) / self.default_std.to(x)
return x
def forward(self, x, return_mos=True, return_dist=False):
r"""Computation image quality using NIMA.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
return_mos: Whether to return mos_score.
retuen_dist: Whether to return dist_score.
"""
# imagenet normalization of input is hard coded
x = self.preprocess(x)
x = self.base_model(x)[-1]
x = self.global_pool(x)
dist = self.classifier(x)
mos = dist_to_mos(dist)
return_list = []
if return_mos:
return_list.append(mos)
if return_dist:
return_list.append(dist)
if len(return_list) > 1:
return return_list
else:
return return_list[0]
| 4,045 | 33 | 139 | py |
BVQI | BVQI-master/pyiqa/archs/vif_arch.py | r"""VIF Metric
Created by: https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/VIF.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
Matlab code from http://live.ece.utexas.edu/research/Quality/vifvec_release.zip;
"""
import numpy as np
import torch
from torch.nn import functional as F
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
def sp5_filters():
r"""Define spatial filters."""
filters = {}
filters["harmonics"] = np.array([1, 3, 5])
filters["mtx"] = np.array(
[
[0.3333, 0.2887, 0.1667, 0.0000, -0.1667, -0.2887],
[0.0000, 0.1667, 0.2887, 0.3333, 0.2887, 0.1667],
[0.3333, -0.0000, -0.3333, -0.0000, 0.3333, -0.0000],
[0.0000, 0.3333, 0.0000, -0.3333, 0.0000, 0.3333],
[0.3333, -0.2887, 0.1667, -0.0000, -0.1667, 0.2887],
[-0.0000, 0.1667, -0.2887, 0.3333, -0.2887, 0.1667],
]
)
filters["hi0filt"] = np.array(
[
[
-0.00033429,
-0.00113093,
-0.00171484,
-0.00133542,
-0.00080639,
-0.00133542,
-0.00171484,
-0.00113093,
-0.00033429,
],
[
-0.00113093,
-0.00350017,
-0.00243812,
0.00631653,
0.01261227,
0.00631653,
-0.00243812,
-0.00350017,
-0.00113093,
],
[
-0.00171484,
-0.00243812,
-0.00290081,
-0.00673482,
-0.00981051,
-0.00673482,
-0.00290081,
-0.00243812,
-0.00171484,
],
[
-0.00133542,
0.00631653,
-0.00673482,
-0.07027679,
-0.11435863,
-0.07027679,
-0.00673482,
0.00631653,
-0.00133542,
],
[
-0.00080639,
0.01261227,
-0.00981051,
-0.11435863,
0.81380200,
-0.11435863,
-0.00981051,
0.01261227,
-0.00080639,
],
[
-0.00133542,
0.00631653,
-0.00673482,
-0.07027679,
-0.11435863,
-0.07027679,
-0.00673482,
0.00631653,
-0.00133542,
],
[
-0.00171484,
-0.00243812,
-0.00290081,
-0.00673482,
-0.00981051,
-0.00673482,
-0.00290081,
-0.00243812,
-0.00171484,
],
[
-0.00113093,
-0.00350017,
-0.00243812,
0.00631653,
0.01261227,
0.00631653,
-0.00243812,
-0.00350017,
-0.00113093,
],
[
-0.00033429,
-0.00113093,
-0.00171484,
-0.00133542,
-0.00080639,
-0.00133542,
-0.00171484,
-0.00113093,
-0.00033429,
],
]
)
filters["lo0filt"] = np.array(
[
[0.00341614, -0.01551246, -0.03848215, -0.01551246, 0.00341614],
[-0.01551246, 0.05586982, 0.15925570, 0.05586982, -0.01551246],
[-0.03848215, 0.15925570, 0.40304148, 0.15925570, -0.03848215],
[-0.01551246, 0.05586982, 0.15925570, 0.05586982, -0.01551246],
[0.00341614, -0.01551246, -0.03848215, -0.01551246, 0.00341614],
]
)
filters["lofilt"] = 2 * np.array(
[
[
0.00085404,
-0.00244917,
-0.00387812,
-0.00944432,
-0.00962054,
-0.00944432,
-0.00387812,
-0.00244917,
0.00085404,
],
[
-0.00244917,
-0.00523281,
-0.00661117,
0.00410600,
0.01002988,
0.00410600,
-0.00661117,
-0.00523281,
-0.00244917,
],
[
-0.00387812,
-0.00661117,
0.01396746,
0.03277038,
0.03981393,
0.03277038,
0.01396746,
-0.00661117,
-0.00387812,
],
[
-0.00944432,
0.00410600,
0.03277038,
0.06426333,
0.08169618,
0.06426333,
0.03277038,
0.00410600,
-0.00944432,
],
[
-0.00962054,
0.01002988,
0.03981393,
0.08169618,
0.10096540,
0.08169618,
0.03981393,
0.01002988,
-0.00962054,
],
[
-0.00944432,
0.00410600,
0.03277038,
0.06426333,
0.08169618,
0.06426333,
0.03277038,
0.00410600,
-0.00944432,
],
[
-0.00387812,
-0.00661117,
0.01396746,
0.03277038,
0.03981393,
0.03277038,
0.01396746,
-0.00661117,
-0.00387812,
],
[
-0.00244917,
-0.00523281,
-0.00661117,
0.00410600,
0.01002988,
0.00410600,
-0.00661117,
-0.00523281,
-0.00244917,
],
[
0.00085404,
-0.00244917,
-0.00387812,
-0.00944432,
-0.00962054,
-0.00944432,
-0.00387812,
-0.00244917,
0.00085404,
],
]
)
filters["bfilts"] = np.array(
[
[
0.00277643,
0.00496194,
0.01026699,
0.01455399,
0.01026699,
0.00496194,
0.00277643,
-0.00986904,
-0.00893064,
0.01189859,
0.02755155,
0.01189859,
-0.00893064,
-0.00986904,
-0.01021852,
-0.03075356,
-0.08226445,
-0.11732297,
-0.08226445,
-0.03075356,
-0.01021852,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.00000000,
0.01021852,
0.03075356,
0.08226445,
0.11732297,
0.08226445,
0.03075356,
0.01021852,
0.00986904,
0.00893064,
-0.01189859,
-0.02755155,
-0.01189859,
0.00893064,
0.00986904,
-0.00277643,
-0.00496194,
-0.01026699,
-0.01455399,
-0.01026699,
-0.00496194,
-0.00277643,
],
[
-0.00343249,
-0.00640815,
-0.00073141,
0.01124321,
0.00182078,
0.00285723,
0.01166982,
-0.00358461,
-0.01977507,
-0.04084211,
-0.00228219,
0.03930573,
0.01161195,
0.00128000,
0.01047717,
0.01486305,
-0.04819057,
-0.12227230,
-0.05394139,
0.00853965,
-0.00459034,
0.00790407,
0.04435647,
0.09454202,
-0.00000000,
-0.09454202,
-0.04435647,
-0.00790407,
0.00459034,
-0.00853965,
0.05394139,
0.12227230,
0.04819057,
-0.01486305,
-0.01047717,
-0.00128000,
-0.01161195,
-0.03930573,
0.00228219,
0.04084211,
0.01977507,
0.00358461,
-0.01166982,
-0.00285723,
-0.00182078,
-0.01124321,
0.00073141,
0.00640815,
0.00343249,
],
[
0.00343249,
0.00358461,
-0.01047717,
-0.00790407,
-0.00459034,
0.00128000,
0.01166982,
0.00640815,
0.01977507,
-0.01486305,
-0.04435647,
0.00853965,
0.01161195,
0.00285723,
0.00073141,
0.04084211,
0.04819057,
-0.09454202,
-0.05394139,
0.03930573,
0.00182078,
-0.01124321,
0.00228219,
0.12227230,
-0.00000000,
-0.12227230,
-0.00228219,
0.01124321,
-0.00182078,
-0.03930573,
0.05394139,
0.09454202,
-0.04819057,
-0.04084211,
-0.00073141,
-0.00285723,
-0.01161195,
-0.00853965,
0.04435647,
0.01486305,
-0.01977507,
-0.00640815,
-0.01166982,
-0.00128000,
0.00459034,
0.00790407,
0.01047717,
-0.00358461,
-0.00343249,
],
[
-0.00277643,
0.00986904,
0.01021852,
-0.00000000,
-0.01021852,
-0.00986904,
0.00277643,
-0.00496194,
0.00893064,
0.03075356,
-0.00000000,
-0.03075356,
-0.00893064,
0.00496194,
-0.01026699,
-0.01189859,
0.08226445,
-0.00000000,
-0.08226445,
0.01189859,
0.01026699,
-0.01455399,
-0.02755155,
0.11732297,
-0.00000000,
-0.11732297,
0.02755155,
0.01455399,
-0.01026699,
-0.01189859,
0.08226445,
-0.00000000,
-0.08226445,
0.01189859,
0.01026699,
-0.00496194,
0.00893064,
0.03075356,
-0.00000000,
-0.03075356,
-0.00893064,
0.00496194,
-0.00277643,
0.00986904,
0.01021852,
-0.00000000,
-0.01021852,
-0.00986904,
0.00277643,
],
[
-0.01166982,
-0.00128000,
0.00459034,
0.00790407,
0.01047717,
-0.00358461,
-0.00343249,
-0.00285723,
-0.01161195,
-0.00853965,
0.04435647,
0.01486305,
-0.01977507,
-0.00640815,
-0.00182078,
-0.03930573,
0.05394139,
0.09454202,
-0.04819057,
-0.04084211,
-0.00073141,
-0.01124321,
0.00228219,
0.12227230,
-0.00000000,
-0.12227230,
-0.00228219,
0.01124321,
0.00073141,
0.04084211,
0.04819057,
-0.09454202,
-0.05394139,
0.03930573,
0.00182078,
0.00640815,
0.01977507,
-0.01486305,
-0.04435647,
0.00853965,
0.01161195,
0.00285723,
0.00343249,
0.00358461,
-0.01047717,
-0.00790407,
-0.00459034,
0.00128000,
0.01166982,
],
[
-0.01166982,
-0.00285723,
-0.00182078,
-0.01124321,
0.00073141,
0.00640815,
0.00343249,
-0.00128000,
-0.01161195,
-0.03930573,
0.00228219,
0.04084211,
0.01977507,
0.00358461,
0.00459034,
-0.00853965,
0.05394139,
0.12227230,
0.04819057,
-0.01486305,
-0.01047717,
0.00790407,
0.04435647,
0.09454202,
-0.00000000,
-0.09454202,
-0.04435647,
-0.00790407,
0.01047717,
0.01486305,
-0.04819057,
-0.12227230,
-0.05394139,
0.00853965,
-0.00459034,
-0.00358461,
-0.01977507,
-0.04084211,
-0.00228219,
0.03930573,
0.01161195,
0.00128000,
-0.00343249,
-0.00640815,
-0.00073141,
0.01124321,
0.00182078,
0.00285723,
0.01166982,
],
]
).T
return filters
def corrDn(image, filt, step=1, channels=1):
r"""Compute correlation of image with FILT, followed by downsampling.
Args:
image: A tensor. Shape :math:`(N, C, H, W)`.
filt: A filter.
step: Downsampling factors.
channels: Number of channels.
"""
filt_ = (
torch.from_numpy(filt)
.float()
.unsqueeze(0)
.unsqueeze(0)
.repeat(channels, 1, 1, 1)
.to(image.device)
)
p = (filt_.shape[2] - 1) // 2
image = F.pad(image, (p, p, p, p), "reflect")
img = F.conv2d(image, filt_, stride=step, padding=0, groups=channels)
return img
def SteerablePyramidSpace(image, height=4, order=5, channels=1):
r"""Construct a steerable pyramid on image.
Args:
image: A tensor. Shape :math:`(N, C, H, W)`.
height (int): Number of pyramid levels to build.
order (int): Number of orientations.
channels (int): Number of channels.
"""
num_orientations = order + 1
filters = sp5_filters()
hi0 = corrDn(image, filters["hi0filt"], step=1, channels=channels)
pyr_coeffs = []
pyr_coeffs.append(hi0)
lo = corrDn(image, filters["lo0filt"], step=1, channels=channels)
for _ in range(height):
bfiltsz = int(np.floor(np.sqrt(filters["bfilts"].shape[0])))
for b in range(num_orientations):
filt = filters["bfilts"][:, b].reshape(bfiltsz, bfiltsz).T
band = corrDn(lo, filt, step=1, channels=channels)
pyr_coeffs.append(band)
lo = corrDn(lo, filters["lofilt"], step=2, channels=channels)
pyr_coeffs.append(lo)
return pyr_coeffs
@ARCH_REGISTRY.register()
class VIF(torch.nn.Module):
r"""Image Information and Visual Quality metric
Args:
channels (int): Number of channels.
level (int): Number of levels to build.
ori (int): Number of orientations.
Reference:
Sheikh, Hamid R., and Alan C. Bovik. "Image information and visual quality."
IEEE Transactions on image processing 15, no. 2 (2006): 430-444.
"""
def __init__(self, channels=1, level=4, ori=6):
super(VIF, self).__init__()
self.ori = ori - 1
self.level = level
self.channels = channels
self.M = 3
self.subbands = [4, 7, 10, 13, 16, 19, 22, 25]
self.sigma_nsq = 0.4
self.tol = 1e-12
def corrDn_win(self, image, filt, step=1, channels=1, start=[0, 0], end=[0, 0]):
r"""Compute correlation of image with FILT using window, followed by downsampling.
Args:
image: A tensor. Shape :math:`(N, C, H, W)`.
filt: A filter.
step (int): Downsampling factors.
channels (int): Number of channels.
start (list): The window over which the convolution occurs.
end (list): The window over which the convolution occurs.
"""
filt_ = (
torch.from_numpy(filt)
.float()
.unsqueeze(0)
.unsqueeze(0)
.repeat(channels, 1, 1, 1)
.to(image.device)
)
p = (filt_.shape[2] - 1) // 2
image = F.pad(image, (p, p, p, p), "reflect")
img = F.conv2d(image, filt_, stride=1, padding=0, groups=channels)
img = img[:, :, start[0] : end[0] : step, start[1] : end[1] : step]
return img
def vifsub_est_M(self, org, dist):
r"""Calculate the parameters of the distortion channel.
Args:
org: A reference tensor. Shape :math:`(N, C, H, W)`.
dist: A distortion tensor. Shape :math:`(N, C, H, W)`.
"""
g_all = []
vv_all = []
for i in range(len(self.subbands)):
sub = self.subbands[i] - 1
y = org[sub]
yn = dist[sub]
lev = np.ceil((sub - 1) / 6)
winsize = int(2 ** lev + 1)
win = np.ones((winsize, winsize))
newsizeX = int(np.floor(y.shape[2] / self.M) * self.M)
newsizeY = int(np.floor(y.shape[3] / self.M) * self.M)
y = y[:, :, :newsizeX, :newsizeY]
yn = yn[:, :, :newsizeX, :newsizeY]
winstart = [int(1 * np.floor(self.M / 2)), int(1 * np.floor(self.M / 2))]
winend = [
int(y.shape[2] - np.ceil(self.M / 2)) + 1,
int(y.shape[3] - np.ceil(self.M / 2)) + 1,
]
mean_x = self.corrDn_win(
y,
win / (winsize ** 2),
step=self.M,
channels=self.channels,
start=winstart,
end=winend,
)
mean_y = self.corrDn_win(
yn,
win / (winsize ** 2),
step=self.M,
channels=self.channels,
start=winstart,
end=winend,
)
cov_xy = (
self.corrDn_win(
y * yn,
win,
step=self.M,
channels=self.channels,
start=winstart,
end=winend,
)
- (winsize ** 2) * mean_x * mean_y
)
ss_x = (
self.corrDn_win(
y ** 2,
win,
step=self.M,
channels=self.channels,
start=winstart,
end=winend,
)
- (winsize ** 2) * mean_x ** 2
)
ss_y = (
self.corrDn_win(
yn ** 2,
win,
step=self.M,
channels=self.channels,
start=winstart,
end=winend,
)
- (winsize ** 2) * mean_y ** 2
)
ss_x = F.relu(ss_x)
ss_y = F.relu(ss_y)
g = cov_xy / (ss_x + self.tol)
vv = (ss_y - g * cov_xy) / (winsize ** 2)
g = g.masked_fill(ss_x < self.tol, 0)
vv[ss_x < self.tol] = ss_y[ss_x < self.tol]
ss_x = ss_x.masked_fill(ss_x < self.tol, 0)
g = g.masked_fill(ss_y < self.tol, 0)
vv = vv.masked_fill(ss_y < self.tol, 0)
vv[g < 0] = ss_y[g < 0]
g = F.relu(g)
vv = vv.masked_fill(vv < self.tol, self.tol)
g_all.append(g)
vv_all.append(vv)
return g_all, vv_all
def refparams_vecgsm(self, org):
r"""Calculate the parameters of the reference image.
Args:
org: A reference tensor. Shape :math:`(N, C, H, W)`.
"""
ssarr, l_arr, cu_arr = [], [], []
for i in range(len(self.subbands)):
sub = self.subbands[i] - 1
y = org[sub]
M = self.M
newsizeX = int(np.floor(y.shape[2] / M) * M)
newsizeY = int(np.floor(y.shape[3] / M) * M)
y = y[:, :, :newsizeX, :newsizeY]
B, C, H, W = y.shape
temp = []
for j in range(M):
for k in range(M):
temp.append(
y[:, :, k : H - (M - k) + 1, j : W - (M - j) + 1].reshape(
B, C, -1
)
)
temp = torch.stack(temp, dim=3)
mcu = torch.mean(temp, dim=2).unsqueeze(2).repeat(1, 1, temp.shape[2], 1)
cu = (
torch.matmul((temp - mcu).permute(0, 1, 3, 2), temp - mcu)
/ temp.shape[2]
)
temp = []
for j in range(M):
for k in range(M):
temp.append(y[:, :, k : H + 1 : M, j : W + 1 : M].reshape(B, C, -1))
temp = torch.stack(temp, dim=2)
ss = torch.matmul(torch.pinverse(cu), temp)
ss = torch.sum(ss * temp, dim=2) / (M * M)
ss = ss.reshape(B, C, H // M, W // M)
v, _ = torch.linalg.eigh(cu, UPLO="U")
l_arr.append(v)
ssarr.append(ss)
cu_arr.append(cu)
return ssarr, l_arr, cu_arr
def vif(self, x, y):
r"""VIF metric. Order of input is important.
Args:
x: A distortion tensor. Shape :math:`(N, C, H, W)`.
y: A reference tensor. Shape :math:`(N, C, H, W)`.
"""
# Convert RGB image to YCBCR and use the Y-channel.
x = to_y_channel(x, 255)
y = to_y_channel(y, 255)
sp_x = SteerablePyramidSpace(
x, height=self.level, order=self.ori, channels=self.channels
)[::-1]
sp_y = SteerablePyramidSpace(
y, height=self.level, order=self.ori, channels=self.channels
)[::-1]
g_all, vv_all = self.vifsub_est_M(sp_y, sp_x)
ss_arr, l_arr, cu_arr = self.refparams_vecgsm(sp_y)
num, den = [], []
for i in range(len(self.subbands)):
sub = self.subbands[i]
g = g_all[i]
vv = vv_all[i]
ss = ss_arr[i]
lamda = l_arr[i]
neigvals = lamda.shape[2]
lev = np.ceil((sub - 1) / 6)
winsize = 2 ** lev + 1
offset = (winsize - 1) / 2
offset = int(np.ceil(offset / self.M))
_, _, H, W = g.shape
g = g[:, :, offset : H - offset, offset : W - offset]
vv = vv[:, :, offset : H - offset, offset : W - offset]
ss = ss[:, :, offset : H - offset, offset : W - offset]
temp1 = 0
temp2 = 0
for j in range(neigvals):
cc = lamda[:, :, j].unsqueeze(2).unsqueeze(3)
temp1 = temp1 + torch.sum(
torch.log2(1 + g * g * ss * cc / (vv + self.sigma_nsq)), dim=[2, 3]
)
temp2 = temp2 + torch.sum(
torch.log2(1 + ss * cc / (self.sigma_nsq)), dim=[2, 3]
)
num.append(temp1.mean(1))
den.append(temp2.mean(1))
return torch.stack(num, dim=1).sum(1) / (torch.stack(den, dim=1).sum(1) + 1e-12)
def forward(self, X, Y):
r"""Args:
x: A distortion tensor. Shape :math:`(N, C, H, W)`.
y: A reference tensor. Shape :math:`(N, C, H, W)`.
Order of input is important.
"""
assert (
X.shape == Y.shape
), "Input and reference images should have the same shape, but got"
f"{X.shape} and {Y.shape}"
score = self.vif(X, Y)
return score
| 25,964 | 28.913594 | 90 | py |
BVQI | BVQI-master/pyiqa/archs/fid_arch.py | """FID and clean-fid metric
Codes are borrowed from the clean-fid project:
- https://github.com/GaParmar/clean-fid
Ref:
[1] GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium.
Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Sepp Hochreiter
NeurIPS, 2017
[2] On Aliased Resizing and Surprising Subtleties in GAN Evaluation
Gaurav Parmar, Richard Zhang, Jun-Yan Zhu
CVPR, 2022
"""
import os
from email.policy import default
from glob import glob
import numpy as np
import torch
import torchvision
from PIL import Image
from scipy import linalg
from torch import nn
from tqdm import tqdm
from pyiqa.utils.download_util import load_file_from_url
from pyiqa.utils.img_util import is_image_file
from pyiqa.utils.registry import ARCH_REGISTRY
from .inception import InceptionV3
default_model_urls = {
"ffhq_clean_trainval70k_512.npz": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/ffhq_clean_trainval70k_512.npz",
"ffhq_clean_trainval70k_512_kid.npz": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/ffhq_clean_trainval70k_512_kid.npz",
}
class ResizeDataset(torch.utils.data.Dataset):
"""
A placeholder Dataset that enables parallelizing the resize operation
using multiple CPU cores
files: list of all files in the folder
mode:
- clean: use PIL resize before calculate features
- legacy_pytorch: do not resize here, but before pytorch model
"""
def __init__(self, files, mode, size=(299, 299)):
self.files = files
self.transforms = torchvision.transforms.ToTensor()
self.size = size
self.mode = mode
def __len__(self):
return len(self.files)
def __getitem__(self, i):
path = str(self.files[i])
img_pil = Image.open(path).convert("RGB")
if self.mode == "clean":
def resize_single_channel(x_np):
img = Image.fromarray(x_np.astype(np.float32), mode="F")
img = img.resize(self.size, resample=Image.BICUBIC)
return np.asarray(img).clip(0, 255).reshape(*self.size, 1)
img_np = np.array(img_pil)
img_np = [resize_single_channel(img_np[:, :, idx]) for idx in range(3)]
img_np = np.concatenate(img_np, axis=2).astype(np.float32)
img_np = (img_np - 128) / 128
img_t = torch.tensor(img_np).permute(2, 0, 1)
else:
img_np = np.array(img_pil).clip(0, 255)
img_t = self.transforms(img_np)
return img_t
def get_reference_statistics(name, res, mode="clean", split="test", metric="FID"):
r"""
Load precomputed reference statistics for commonly used datasets
"""
base_url = "https://www.cs.cmu.edu/~clean-fid/stats"
if split == "custom":
res = "na"
if metric == "FID":
rel_path = (f"{name}_{mode}_{split}_{res}.npz").lower()
url = f"{base_url}/{rel_path}"
if rel_path in default_model_urls.keys():
fpath = load_file_from_url(default_model_urls[rel_path])
else:
fpath = load_file_from_url(url)
stats = np.load(fpath)
mu, sigma = stats["mu"], stats["sigma"]
return mu, sigma
elif metric == "KID":
rel_path = (f"{name}_{mode}_{split}_{res}_kid.npz").lower()
url = f"{base_url}/{rel_path}"
if rel_path in default_model_urls.keys():
fpath = load_file_from_url(default_model_urls[rel_path])
else:
fpath = load_file_from_url(url)
stats = np.load(fpath)
return stats["feats"]
def frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""
Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Danica J. Sutherland.
Params:
mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
mu2 : The sample mean over activations, precalculated on an
representative data set.
sigma1: The covariance matrix over activations for generated samples.
sigma2: The covariance matrix over activations, precalculated on an
representative data set.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert (
mu1.shape == mu2.shape
), "Training and test mean vectors have different lengths"
assert (
sigma1.shape == sigma2.shape
), "Training and test covariances have different dimensions"
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = (
"fid calculation produces singular product; "
"adding %s to diagonal of cov estimates"
) % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
def kernel_distance(feats1, feats2, num_subsets=100, max_subset_size=1000):
r"""
Compute the KID score given the sets of features
"""
n = feats1.shape[1]
m = min(min(feats1.shape[0], feats2.shape[0]), max_subset_size)
t = 0
for _subset_idx in range(num_subsets):
x = feats2[np.random.choice(feats2.shape[0], m, replace=False)]
y = feats1[np.random.choice(feats1.shape[0], m, replace=False)]
a = (x @ x.T / n + 1) ** 3 + (y @ y.T / n + 1) ** 3
b = (x @ y.T / n + 1) ** 3
t += (a.sum() - np.diag(a).sum()) / (m - 1) - b.sum() * 2 / m
kid = t / num_subsets / m
return float(kid)
def get_folder_features(
fdir,
model=None,
num_workers=12,
batch_size=32,
device=torch.device("cuda"),
mode="clean",
description="",
verbose=True,
):
r"""
Compute the inception features for a folder of image files
"""
files = sorted(
[file for file in glob(os.path.join(fdir, "*")) if is_image_file(file)]
)
if verbose:
print(f"Found {len(files)} images in the folder {fdir}")
dataset = ResizeDataset(files, mode=mode)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
)
# collect all inception features
if verbose:
pbar = tqdm(dataloader, desc=description)
else:
pbar = dataloader
if mode == "clean":
resize_input = normalize_input = False
else:
resize_input = normalize_input = True
l_feats = []
with torch.no_grad():
for batch in pbar:
feat = model(batch.to(device), resize_input, normalize_input)
feat = feat[0].squeeze(-1).squeeze(-1).detach().cpu().numpy()
l_feats.append(feat)
np_feats = np.concatenate(l_feats)
return np_feats
@ARCH_REGISTRY.register()
class FID(nn.Module):
"""FID and Clean-FID metric
Args:
mode: [clean, legacy_pytorch]. Default: clean
"""
def __init__(
self,
dims=2048,
) -> None:
super().__init__()
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
self.model = InceptionV3(output_blocks=[block_idx])
self.model.eval()
def forward(
self,
fdir1=None,
fdir2=None,
mode="clean",
dataset_name="FFHQ",
dataset_res=1024,
dataset_split="train",
num_workers=12,
batch_size=32,
device=torch.device("cuda"),
verbose=True,
):
assert mode in [
"clean",
"legacy_pytorch",
"legacy_tensorflow",
], "Invalid calculation mode, should be in [clean, legacy_pytorch, legacy_tensorflow]"
# if both dirs are specified, compute FID between folders
if fdir1 is not None and fdir2 is not None:
if not verbose:
print("compute FID between two folders")
fbname1 = os.path.basename(fdir1)
np_feats1 = get_folder_features(
fdir1,
self.model,
num_workers=num_workers,
batch_size=batch_size,
device=device,
mode=mode,
description=f"FID {fbname1}: ",
verbose=verbose,
)
fbname2 = os.path.basename(fdir2)
np_feats2 = get_folder_features(
fdir2,
self.model,
num_workers=num_workers,
batch_size=batch_size,
device=device,
mode=mode,
description=f"FID {fbname2}: ",
verbose=verbose,
)
mu1, sig1 = np.mean(np_feats1, axis=0), np.cov(np_feats1, rowvar=False)
mu2, sig2 = np.mean(np_feats2, axis=0), np.cov(np_feats2, rowvar=False)
return frechet_distance(mu1, sig1, mu2, sig2)
# compute fid of a folder
elif fdir1 is not None and fdir2 is None:
if verbose:
print(
f"compute FID of a folder with {dataset_name}-{mode}-{dataset_split}-{dataset_res} statistics"
)
fbname1 = os.path.basename(fdir1)
np_feats1 = get_folder_features(
fdir1,
self.model,
num_workers=num_workers,
batch_size=batch_size,
device=device,
mode=mode,
description=f"FID {fbname1}: ",
verbose=verbose,
)
# Load reference FID statistics (download if needed)
ref_mu, ref_sigma = get_reference_statistics(
dataset_name, dataset_res, mode=mode, split=dataset_split
)
mu1, sig1 = np.mean(np_feats1, axis=0), np.cov(np_feats1, rowvar=False)
score = frechet_distance(mu1, sig1, ref_mu, ref_sigma)
return score
else:
raise ValueError("invalid combination of arguments entered")
| 10,849 | 31.779456 | 151 | py |
BVQI | BVQI-master/pyiqa/archs/brisque_arch.py | r"""BRISQUE Metric
Created by: https://github.com/photosynthesis-team/piq/blob/master/piq/brisque.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Reference:
MATLAB codes: https://live.ece.utexas.edu/research/Quality/index_algorithms.htm BRISQUE;
Pretrained model from: https://github.com/photosynthesis-team/piq/releases/download/v0.4.0/brisque_svm_weights.pt
"""
import torch
from pyiqa.matlab_utils import imresize
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.download_util import load_file_from_url
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import estimate_aggd_param, estimate_ggd_param, normalize_img_with_guass
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/brisque_svm_weights.pth"
}
def brisque(
x: torch.Tensor,
kernel_size: int = 7,
kernel_sigma: float = 7 / 6,
test_y_channel: bool = True,
pretrained_model_path: str = None,
) -> torch.Tensor:
r"""Interface of BRISQUE index.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
kernel_size: The side-length of the sliding window used in comparison. Must be an odd value.
kernel_sigma: Sigma of normal distribution.
data_range: Maximum value range of images (usually 1.0 or 255).
to_y_channel: Whether use the y-channel of YCBCR.
pretrained_model_path: The model path.
Returns:
Value of BRISQUE index.
References:
Mittal, Anish, Anush Krishna Moorthy, and Alan Conrad Bovik.
"No-reference image quality assessment in the spatial domain."
IEEE Transactions on image processing 21, no. 12 (2012): 4695-4708.
"""
if test_y_channel and x.size(1) == 3:
x = to_y_channel(x, 255.0)
else:
x = x * 255
features = []
num_of_scales = 2
for _ in range(num_of_scales):
features.append(natural_scene_statistics(x, kernel_size, kernel_sigma))
x = imresize(x, scale=0.5, antialiasing=True)
features = torch.cat(features, dim=-1)
scaled_features = scale_features(features)
if pretrained_model_path:
sv_coef, sv = torch.load(pretrained_model_path)
sv_coef = sv_coef.to(x)
sv = sv.to(x)
# gamma and rho are SVM model parameters taken from official implementation of BRISQUE on MATLAB
# Source: https://live.ece.utexas.edu/research/Quality/index_algorithms.htm
gamma = 0.05
rho = -153.591
sv.t_()
kernel_features = rbf_kernel(features=scaled_features, sv=sv, gamma=gamma)
score = kernel_features @ sv_coef
return score - rho
def natural_scene_statistics(
luma: torch.Tensor, kernel_size: int = 7, sigma: float = 7.0 / 6
) -> torch.Tensor:
luma_nrmlzd = normalize_img_with_guass(luma, kernel_size, sigma, padding="same")
alpha, sigma = estimate_ggd_param(luma_nrmlzd)
features = [alpha, sigma.pow(2)]
shifts = [(0, 1), (1, 0), (1, 1), (-1, 1)]
for shift in shifts:
shifted_luma_nrmlzd = torch.roll(luma_nrmlzd, shifts=shift, dims=(-2, -1))
alpha, sigma_l, sigma_r = estimate_aggd_param(
luma_nrmlzd * shifted_luma_nrmlzd, return_sigma=True
)
eta = (sigma_r - sigma_l) * torch.exp(
torch.lgamma(2.0 / alpha)
- (torch.lgamma(1.0 / alpha) + torch.lgamma(3.0 / alpha)) / 2
)
features.extend((alpha, eta, sigma_l.pow(2), sigma_r.pow(2)))
return torch.stack(features, dim=-1)
def scale_features(features: torch.Tensor) -> torch.Tensor:
lower_bound = -1
upper_bound = 1
# Feature range is taken from official implementation of BRISQUE on MATLAB.
# Source: https://live.ece.utexas.edu/research/Quality/index_algorithms.htm
feature_ranges = torch.tensor(
[
[0.338, 10],
[0.017204, 0.806612],
[0.236, 1.642],
[-0.123884, 0.20293],
[0.000155, 0.712298],
[0.001122, 0.470257],
[0.244, 1.641],
[-0.123586, 0.179083],
[0.000152, 0.710456],
[0.000975, 0.470984],
[0.249, 1.555],
[-0.135687, 0.100858],
[0.000174, 0.684173],
[0.000913, 0.534174],
[0.258, 1.561],
[-0.143408, 0.100486],
[0.000179, 0.685696],
[0.000888, 0.536508],
[0.471, 3.264],
[0.012809, 0.703171],
[0.218, 1.046],
[-0.094876, 0.187459],
[1.5e-005, 0.442057],
[0.001272, 0.40803],
[0.222, 1.042],
[-0.115772, 0.162604],
[1.6e-005, 0.444362],
[0.001374, 0.40243],
[0.227, 0.996],
[-0.117188, 0.09832299999999999],
[3e-005, 0.531903],
[0.001122, 0.369589],
[0.228, 0.99],
[-0.12243, 0.098658],
[2.8e-005, 0.530092],
[0.001118, 0.370399],
]
).to(features)
scaled_features = lower_bound + (upper_bound - lower_bound) * (
features - feature_ranges[..., 0]
) / (feature_ranges[..., 1] - feature_ranges[..., 0])
return scaled_features
def rbf_kernel(
features: torch.Tensor, sv: torch.Tensor, gamma: float = 0.05
) -> torch.Tensor:
dist = (features.unsqueeze(dim=-1) - sv.unsqueeze(dim=0)).pow(2).sum(dim=1)
return torch.exp(-dist * gamma)
@ARCH_REGISTRY.register()
class BRISQUE(torch.nn.Module):
r"""Creates a criterion that measures the BRISQUE score.
Args:
kernel_size (int): By default, the mean and covariance of a pixel is obtained
by convolution with given filter_size. Must be an odd value.
kernel_sigma (float): Standard deviation for Gaussian kernel.
to_y_channel (bool): Whether use the y-channel of YCBCR.
pretrained_model_path (str): The model path.
"""
def __init__(
self,
kernel_size: int = 7,
kernel_sigma: float = 7 / 6,
test_y_channel: bool = True,
pretrained_model_path: str = None,
) -> None:
super().__init__()
self.kernel_size = kernel_size
# This check might look redundant because kernel size is checked within the brisque function anyway.
# However, this check allows to fail fast when the loss is being initialised and training has not been started.
assert kernel_size % 2 == 1, f"Kernel size must be odd, got [{kernel_size}]"
self.kernel_sigma = kernel_sigma
self.test_y_channel = test_y_channel
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(default_model_urls["url"])
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""Computation of BRISQUE score as a loss function.
Args:
x: An input tensor with (N, C, H, W) shape. RGB channel order for colour images.
Returns:
Value of BRISQUE metric.
"""
return brisque(
x,
kernel_size=self.kernel_size,
kernel_sigma=self.kernel_sigma,
test_y_channel=self.test_y_channel,
pretrained_model_path=self.pretrained_model_path,
)
| 7,328 | 32.774194 | 119 | py |
BVQI | BVQI-master/pyiqa/archs/psnr_arch.py | r"""Peak signal-to-noise ratio (PSNR) Metric
Created by: https://github.com/photosynthesis-team/piq
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
Wikipedia from https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
QIQA from https://github.com/francois-rozet/piqa/blob/master/piqa/psnr.py
"""
import torch
import torch.nn as nn
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
def psnr(x, y, test_y_channel=False, data_range=1.0, eps=1e-8, color_space="yiq"):
r"""Compute Peak Signal-to-Noise Ratio for a batch of images.
Supports both greyscale and color images with RGB channel order.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
test_y_channel (Boolean): Convert RGB image to YCbCr format and computes PSNR
only on luminance channel if `True`. Compute on all 3 channels otherwise.
data_range: Maximum value range of images (default 1.0).
Returns:
PSNR Index of similarity betwen two images.
"""
if (x.shape[1] == 3) and test_y_channel:
# Convert RGB image to YCbCr and use Y-channel
x = to_y_channel(x, data_range, color_space)
y = to_y_channel(y, data_range, color_space)
mse = torch.mean((x - y) ** 2, dim=[1, 2, 3])
score = 10 * torch.log10(data_range ** 2 / (mse + eps))
return score
@ARCH_REGISTRY.register()
class PSNR(nn.Module):
r"""
Args:
X, Y (torch.Tensor): distorted image and reference image tensor with shape (B, 3, H, W)
test_y_channel (Boolean): Convert RGB image to YCbCr format and computes PSNR
only on luminance channel if `True`. Compute on all 3 channels otherwise.
kwargs: other parameters, including
- data_range: maximun numeric value
- eps: small constant for numeric stability
Return:
score (torch.Tensor): (B, 1)
"""
def __init__(self, test_y_channel=False, crop_border=0, **kwargs):
super().__init__()
self.test_y_channel = test_y_channel
self.kwargs = kwargs
self.crop_border = crop_border
def forward(self, X, Y):
assert (
X.shape == Y.shape
), f"Input and reference images should have the same shape, but got {X.shape} and {Y.shape}"
if self.crop_border != 0:
crop_border = self.crop_border
X = X[..., crop_border:-crop_border, crop_border:-crop_border]
Y = Y[..., crop_border:-crop_border, crop_border:-crop_border]
score = psnr(X, Y, self.test_y_channel, **self.kwargs)
return score
| 2,691 | 33.961039 | 100 | py |
BVQI | BVQI-master/pyiqa/archs/gmsd_arch.py | r"""GMSD Metric
Created by: https://github.com/dingkeyan93/IQA-optimization/blob/master/IQA_pytorch/GMSD.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Refer to:
Matlab code from https://www4.comp.polyu.edu.hk/~cslzhang/IQA/GMSD/GMSD.m;
"""
import torch
from torch import nn
from torch.nn import functional as F
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.registry import ARCH_REGISTRY
def gmsd(
x: torch.Tensor,
y: torch.Tensor,
T: int = 170,
channels: int = 3,
test_y_channel: bool = True,
) -> torch.Tensor:
r"""GMSD metric.
Args:
x: A distortion tensor. Shape :math:`(N, C, H, W)`.
y: A reference tensor. Shape :math:`(N, C, H, W)`.
T: A positive constant that supplies numerical stability.
channels: Number of channels.
test_y_channel: bool, whether to use y channel on ycbcr.
"""
if test_y_channel:
x = to_y_channel(x, 255)
y = to_y_channel(y, 255)
channels = 1
else:
x = x * 255.0
y = y * 255.0
dx = (
(torch.Tensor([[1, 0, -1], [1, 0, -1], [1, 0, -1]]) / 3.0)
.unsqueeze(0)
.unsqueeze(0)
.repeat(channels, 1, 1, 1)
.to(x)
)
dy = (
(torch.Tensor([[1, 1, 1], [0, 0, 0], [-1, -1, -1]]) / 3.0)
.unsqueeze(0)
.unsqueeze(0)
.repeat(channels, 1, 1, 1)
.to(x)
)
aveKernel = torch.ones(channels, 1, 2, 2).to(x) / 4.0
Y1 = F.conv2d(x, aveKernel, stride=2, padding=0, groups=channels)
Y2 = F.conv2d(y, aveKernel, stride=2, padding=0, groups=channels)
IxY1 = F.conv2d(Y1, dx, stride=1, padding=1, groups=channels)
IyY1 = F.conv2d(Y1, dy, stride=1, padding=1, groups=channels)
gradientMap1 = torch.sqrt(IxY1 ** 2 + IyY1 ** 2 + 1e-12)
IxY2 = F.conv2d(Y2, dx, stride=1, padding=1, groups=channels)
IyY2 = F.conv2d(Y2, dy, stride=1, padding=1, groups=channels)
gradientMap2 = torch.sqrt(IxY2 ** 2 + IyY2 ** 2 + 1e-12)
quality_map = (2 * gradientMap1 * gradientMap2 + T) / (
gradientMap1 ** 2 + gradientMap2 ** 2 + T
)
score = torch.std(quality_map.view(quality_map.shape[0], -1), dim=1)
return score
@ARCH_REGISTRY.register()
class GMSD(nn.Module):
r"""Gradient Magnitude Similarity Deviation Metric.
Args:
channels: Number of channels.
test_y_channel: bool, whether to use y channel on ycbcr.
Reference:
Xue, Wufeng, Lei Zhang, Xuanqin Mou, and Alan C. Bovik.
"Gradient magnitude similarity deviation: A highly efficient
perceptual image quality index." IEEE Transactions on Image
Processing 23, no. 2 (2013): 684-695.
"""
def __init__(self, channels: int = 3, test_y_channel: bool = True) -> None:
super(GMSD, self).__init__()
self.channels = channels
self.test_y_channel = test_y_channel
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
r"""Args:
x: A distortion tensor. Shape :math:`(N, C, H, W)`.
y: A reference tensor. Shape :math:`(N, C, H, W)`.
Order of input is important.
"""
assert (
x.shape == y.shape
), f"Input and reference images should have the same shape, but got {x.shape} and {y.shape}"
score = gmsd(x, y, channels=self.channels, test_y_channel=self.test_y_channel)
return score
| 3,418 | 30.657407 | 100 | py |
BVQI | BVQI-master/pyiqa/archs/.ipynb_checkpoints/niqe_arch-checkpoint.py | r"""NIQE and ILNIQE Metrics
NIQE Metric
Created by: https://github.com/xinntao/BasicSR/blob/5668ba75eb8a77e8d2dd46746a36fee0fbb0fdcd/basicsr/metrics/niqe.py
Modified by: Jiadi Mo (https://github.com/JiadiMo)
Reference:
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
ILNIQE Metric
Created by: Chaofeng Chen (https://github.com/chaofengc)
Reference:
- Python codes: https://github.com/IceClear/IL-NIQE/blob/master/IL-NIQE.py
- Matlab codes: https://www4.comp.polyu.edu.hk/~cslzhang/IQA/ILNIQE/Files/ILNIQE.zip
"""
import math
import numpy as np
import scipy
import scipy.io
import torch
from pyiqa.archs.fsim_arch import _construct_filters
from pyiqa.matlab_utils import (
blockproc,
conv2d,
fitweibull,
fspecial,
imfilter,
imresize,
nancov,
nanmean,
)
from pyiqa.utils.color_util import to_y_channel
from pyiqa.utils.download_util import load_file_from_url
from pyiqa.utils.registry import ARCH_REGISTRY
from .func_util import diff_round, estimate_aggd_param, normalize_img_with_guass
default_model_urls = {
"url": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/niqe_modelparameters.mat",
"niqe": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/niqe_modelparameters.mat",
"ilniqe": "https://github.com/chaofengc/IQA-PyTorch/releases/download/v0.1-weights/ILNIQE_templateModel.mat",
}
def compute_feature(
block: torch.Tensor,
ilniqe: bool = False,
) -> torch.Tensor:
"""Compute features.
Args:
block (Tensor): Image block in shape (b, c, h, w).
Returns:
list: Features with length of 18.
"""
bsz = block.shape[0]
aggd_block = block[:, [0]]
alpha, beta_l, beta_r = estimate_aggd_param(aggd_block)
feat = [alpha, (beta_l + beta_r) / 2]
# distortions disturb the fairly regular structure of natural images.
# This deviation can be captured by analyzing the sample distribution of
# the products of pairs of adjacent coefficients computed along
# horizontal, vertical and diagonal orientations.
shifts = [[0, 1], [1, 0], [1, 1], [1, -1]]
for i in range(len(shifts)):
shifted_block = torch.roll(aggd_block, shifts[i], dims=(2, 3))
alpha, beta_l, beta_r = estimate_aggd_param(aggd_block * shifted_block)
# Eq. 8
mean = (beta_r - beta_l) * (
torch.lgamma(2 / alpha) - torch.lgamma(1 / alpha)
).exp()
feat.extend((alpha, mean, beta_l, beta_r))
feat = [x.reshape(bsz, 1) for x in feat]
if ilniqe:
tmp_block = block[:, 1:4]
channels = 4 - 1
shape_scale = fitweibull(tmp_block.reshape(bsz * channels, -1))
scale_shape = shape_scale[:, [1, 0]].reshape(bsz, -1)
feat.append(scale_shape)
mu = torch.mean(block[:, 4:7], dim=(2, 3))
sigmaSquare = torch.var(block[:, 4:7], dim=(2, 3))
mu_sigma = torch.stack((mu, sigmaSquare), dim=-1).reshape(bsz, -1)
feat.append(mu_sigma)
channels = 85 - 7
tmp_block = block[:, 7:85].reshape(bsz * channels, 1, *block.shape[2:])
alpha_data, beta_l_data, beta_r_data = estimate_aggd_param(tmp_block)
alpha_data = alpha_data.reshape(bsz, channels)
beta_l_data = beta_l_data.reshape(bsz, channels)
beta_r_data = beta_r_data.reshape(bsz, channels)
alpha_beta = torch.stack(
[alpha_data, (beta_l_data + beta_r_data) / 2], dim=-1
).reshape(bsz, -1)
feat.append(alpha_beta)
tmp_block = block[:, 85:109]
channels = 109 - 85
shape_scale = fitweibull(tmp_block.reshape(bsz * channels, -1))
scale_shape = shape_scale[:, [1, 0]].reshape(bsz, -1)
feat.append(scale_shape)
feat = torch.cat(feat, dim=-1)
return feat
def niqe(
img: torch.Tensor,
mu_pris_param: torch.Tensor,
cov_pris_param: torch.Tensor,
block_size_h: int = 96,
block_size_w: int = 96,
) -> torch.Tensor:
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image.
mu_pris_param (Tensor): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (Tensor): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
gaussian_window (Tensor): A 7x7 Gaussian window used for smoothing the image.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 96 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 96 (the official recommended value).
"""
assert (
img.ndim == 4
), "Input image must be a gray or Y (of YCbCr) image with shape (b, c, h, w)."
# crop image
b, c, h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[..., 0 : num_block_h * block_size_h, 0 : num_block_w * block_size_w]
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
img_normalized = normalize_img_with_guass(img, padding="replicate")
distparam.append(
blockproc(
img_normalized,
[block_size_h // scale, block_size_w // scale],
fun=compute_feature,
)
)
if scale == 1:
img = imresize(img / 255.0, scale=0.5, antialiasing=True)
img = img * 255.0
distparam = torch.cat(distparam, -1)
# fit a MVG (multivariate Gaussian) model to distorted patch features
mu_distparam = nanmean(distparam, dim=1)
cov_distparam = nancov(distparam)
# compute niqe quality, Eq. 10 in the paper
invcov_param = torch.linalg.pinv((cov_pris_param + cov_distparam) / 2)
diff = (mu_pris_param - mu_distparam).unsqueeze(1)
quality = torch.bmm(torch.bmm(diff, invcov_param), diff.transpose(1, 2)).squeeze()
quality = torch.sqrt(quality)
return quality
def calculate_niqe(
img: torch.Tensor,
crop_border: int = 0,
test_y_channel: bool = True,
pretrained_model_path: str = None,
color_space: str = "yiq",
**kwargs,
) -> torch.Tensor:
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image whose quality needs to be computed.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
test_y_channel (Bool): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'.
pretrained_model_path (str): The pretrained model path.
Returns:
Tensor: NIQE result.
"""
params = scipy.io.loadmat(pretrained_model_path)
mu_pris_param = np.ravel(params["mu_prisparam"])
cov_pris_param = params["cov_prisparam"]
mu_pris_param = torch.from_numpy(mu_pris_param).to(img)
cov_pris_param = torch.from_numpy(cov_pris_param).to(img)
mu_pris_param = mu_pris_param.repeat(img.size(0), 1)
cov_pris_param = cov_pris_param.repeat(img.size(0), 1, 1)
if test_y_channel and img.shape[1] == 3:
print(img.shape)
img = to_y_channel(img, 255, color_space)
img = diff_round(img)
img = img.to(torch.float64)
if crop_border != 0:
img = img[..., crop_border:-crop_border, crop_border:-crop_border]
niqe_result = niqe(img, mu_pris_param, cov_pris_param)
return niqe_result
def gauDerivative(sigma, in_ch=1, out_ch=1, device=None):
halfLength = math.ceil(3 * sigma)
x, y = np.meshgrid(
np.linspace(-halfLength, halfLength, 2 * halfLength + 1),
np.linspace(-halfLength, halfLength, 2 * halfLength + 1),
)
gauDerX = x * np.exp(-(x ** 2 + y ** 2) / 2 / sigma / sigma)
gauDerY = y * np.exp(-(x ** 2 + y ** 2) / 2 / sigma / sigma)
dx = torch.from_numpy(gauDerX).to(device)
dy = torch.from_numpy(gauDerY).to(device)
dx = dx.repeat(out_ch, in_ch, 1, 1)
dy = dy.repeat(out_ch, in_ch, 1, 1)
return dx, dy
def ilniqe(
img: torch.Tensor,
mu_pris_param: torch.Tensor,
cov_pris_param: torch.Tensor,
principleVectors: torch.Tensor,
meanOfSampleData: torch.Tensor,
resize: bool = True,
block_size_h: int = 84,
block_size_w: int = 84,
) -> torch.Tensor:
"""Calculate IL-NIQE (Integrated Local Natural Image Quality Evaluator) metric.
Args:
img (Tensor): Input image.
mu_pris_param (Tensor): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (Tensor): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
principleVectors (Tensor): Features from official .mat file.
meanOfSampleData (Tensor): Features from official .mat file.
resize (Bloolean): resize image. Default: True.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 84 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 84 (the official recommended value).
"""
assert (
img.ndim == 4
), "Input image must be a gray or Y (of YCbCr) image with shape (b, c, h, w)."
sigmaForGauDerivative = 1.66
KforLog = 0.00001
normalizedWidth = 524
minWaveLength = 2.4
sigmaOnf = 0.55
mult = 1.31
dThetaOnSigma = 1.10
scaleFactorForLoG = 0.87
scaleFactorForGaussianDer = 0.28
sigmaForDownsample = 0.9
EPS = 1e-8
scales = 3
orientations = 4
infConst = 10000
nanConst = 2000
if resize:
img = imresize(img, sizes=(normalizedWidth, normalizedWidth))
img = img.clamp(0.0, 255.0)
# crop image
b, c, h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[..., 0 : num_block_h * block_size_h, 0 : num_block_w * block_size_w]
ospace_weight = torch.tensor(
[
[0.3, 0.04, -0.35],
[0.34, -0.6, 0.17],
[0.06, 0.63, 0.27],
]
).to(img)
O_img = img.permute(0, 2, 3, 1) @ ospace_weight.T
O_img = O_img.permute(0, 3, 1, 2)
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
struct_dis = normalize_img_with_guass(
O_img[:, [2]], kernel_size=5, sigma=5.0 / 6, padding="replicate"
)
dx, dy = gauDerivative(
sigmaForGauDerivative / (scale ** scaleFactorForGaussianDer), device=img
)
Ix = conv2d(O_img, dx.repeat(3, 1, 1, 1), groups=3)
Iy = conv2d(O_img, dy.repeat(3, 1, 1, 1), groups=3)
GM = torch.sqrt(Ix ** 2 + Iy ** 2 + EPS)
Ixy = torch.stack((Ix, Iy), dim=2).reshape(
Ix.shape[0], Ix.shape[1] * 2, *Ix.shape[2:]
) # reshape to (IxO1, IxO1, IxO2, IyO2, IxO3, IyO3)
logRGB = torch.log(img + KforLog)
logRGBMS = logRGB - logRGB.mean(dim=(2, 3), keepdim=True)
Intensity = logRGBMS.sum(dim=1, keepdim=True) / np.sqrt(3)
BY = (logRGBMS[:, [0]] + logRGBMS[:, [1]] - 2 * logRGBMS[:, [2]]) / np.sqrt(6)
RG = (logRGBMS[:, [0]] - logRGBMS[:, [1]]) / np.sqrt(2)
compositeMat = torch.cat([struct_dis, GM, Intensity, BY, RG, Ixy], dim=1)
O3 = O_img[:, [2]]
# gabor filter in shape (b, ori * scale, h, w)
LGFilters = _construct_filters(
O3,
scales=scales,
orientations=orientations,
min_length=minWaveLength / (scale ** scaleFactorForLoG),
sigma_f=sigmaOnf,
mult=mult,
delta_theta=dThetaOnSigma,
use_lowpass_filter=False,
)
# reformat to scale * ori
b, _, h, w = LGFilters.shape
LGFilters = (
LGFilters.reshape(b, orientations, scales, h, w)
.transpose(1, 2)
.reshape(b, -1, h, w)
)
# TODO: current filters needs to be transposed to get same results as matlab, find the bug
LGFilters = LGFilters.transpose(-1, -2)
fftIm = torch.fft.fft2(O3)
logResponse = []
partialDer = []
GM = []
for index in range(LGFilters.shape[1]):
filter = LGFilters[:, [index]]
response = torch.fft.ifft2(filter * fftIm)
realRes = torch.real(response)
imagRes = torch.imag(response)
partialXReal = conv2d(realRes, dx)
partialYReal = conv2d(realRes, dy)
realGM = torch.sqrt(partialXReal ** 2 + partialYReal ** 2 + EPS)
partialXImag = conv2d(imagRes, dx)
partialYImag = conv2d(imagRes, dy)
imagGM = torch.sqrt(partialXImag ** 2 + partialYImag ** 2 + EPS)
logResponse.append(realRes)
logResponse.append(imagRes)
partialDer.append(partialXReal)
partialDer.append(partialYReal)
partialDer.append(partialXImag)
partialDer.append(partialYImag)
GM.append(realGM)
GM.append(imagGM)
logResponse = torch.cat(logResponse, dim=1)
partialDer = torch.cat(partialDer, dim=1)
GM = torch.cat(GM, dim=1)
compositeMat = torch.cat((compositeMat, logResponse, partialDer, GM), dim=1)
distparam.append(
blockproc(
compositeMat,
[block_size_h // scale, block_size_w // scale],
fun=compute_feature,
ilniqe=True,
)
)
gauForDS = fspecial(math.ceil(6 * sigmaForDownsample), sigmaForDownsample).to(
img
)
filterResult = imfilter(
O_img, gauForDS.repeat(3, 1, 1, 1), padding="replicate", groups=3
)
O_img = filterResult[..., ::2, ::2]
filterResult = imfilter(
img, gauForDS.repeat(3, 1, 1, 1), padding="replicate", groups=3
)
img = filterResult[..., ::2, ::2]
distparam = torch.cat(distparam, dim=-1) # b, block_num, feature_num
distparam[distparam > infConst] = infConst
# fit a MVG (multivariate Gaussian) model to distorted patch features
coefficientsViaPCA = torch.bmm(
principleVectors.transpose(1, 2),
(distparam - meanOfSampleData.unsqueeze(1)).transpose(1, 2),
)
final_features = coefficientsViaPCA.transpose(1, 2)
b, blk_num, feat_num = final_features.shape
# remove block features with nan and compute nonan cov
cov_distparam = nancov(final_features)
# replace nan in final features with mu
mu_final_features = nanmean(final_features, dim=1, keepdim=True)
final_features_withmu = torch.where(
torch.isnan(final_features), mu_final_features, final_features
)
# compute ilniqe quality
invcov_param = torch.linalg.pinv((cov_pris_param + cov_distparam) / 2)
diff = final_features_withmu - mu_pris_param.unsqueeze(1)
quality = (torch.bmm(diff, invcov_param) * diff).sum(dim=-1)
quality = torch.sqrt(quality).mean(dim=1)
return quality
def calculate_ilniqe(
img: torch.Tensor, crop_border: int = 0, pretrained_model_path: str = None, **kwargs
) -> torch.Tensor:
"""Calculate IL-NIQE metric.
Args:
img (Tensor): Input image whose quality needs to be computed.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
Returns:
Tensor: IL-NIQE result.
"""
params = scipy.io.loadmat(pretrained_model_path)
img = img * 255.0
img = diff_round(img)
# float64 precision is critical to be consistent with matlab codes
img = img.to(torch.float64)
mu_pris_param = np.ravel(params["templateModel"][0][0])
cov_pris_param = params["templateModel"][0][1]
meanOfSampleData = np.ravel(params["templateModel"][0][2])
principleVectors = params["templateModel"][0][3]
mu_pris_param = torch.from_numpy(mu_pris_param).to(img)
cov_pris_param = torch.from_numpy(cov_pris_param).to(img)
meanOfSampleData = torch.from_numpy(meanOfSampleData).to(img)
principleVectors = torch.from_numpy(principleVectors).to(img)
mu_pris_param = mu_pris_param.repeat(img.size(0), 1)
cov_pris_param = cov_pris_param.repeat(img.size(0), 1, 1)
meanOfSampleData = meanOfSampleData.repeat(img.size(0), 1)
principleVectors = principleVectors.repeat(img.size(0), 1, 1)
if crop_border != 0:
img = img[..., crop_border:-crop_border, crop_border:-crop_border]
ilniqe_result = ilniqe(
img, mu_pris_param, cov_pris_param, principleVectors, meanOfSampleData
)
return ilniqe_result
@ARCH_REGISTRY.register()
class NIQE(torch.nn.Module):
r"""Args:
channels (int): Number of processed channel.
test_y_channel (bool): whether to use y channel on ycbcr.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
References:
Mittal, Anish, Rajiv Soundararajan, and Alan C. Bovik.
"Making a “completely blind” image quality analyzer."
IEEE Signal Processing Letters (SPL) 20.3 (2012): 209-212.
"""
def __init__(
self,
channels: int = 1,
test_y_channel: bool = True,
color_space: str = "yiq",
crop_border: int = 0,
pretrained_model_path: str = None,
) -> None:
super(NIQE, self).__init__()
self.channels = channels
self.test_y_channel = test_y_channel
self.color_space = color_space
self.crop_border = crop_border
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(default_model_urls["url"])
def forward(self, X: torch.Tensor) -> torch.Tensor:
r"""Computation of NIQE metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of niqe metric in [0, 1] range.
"""
score = calculate_niqe(
X,
self.crop_border,
self.test_y_channel,
self.pretrained_model_path,
self.color_space,
)
return score
@ARCH_REGISTRY.register()
class ILNIQE(torch.nn.Module):
r"""Args:
channels (int): Number of processed channel.
test_y_channel (bool): whether to use y channel on ycbcr.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
pretrained_model_path (str): The pretrained model path.
References:
Zhang, Lin, Lei Zhang, and Alan C. Bovik. "A feature-enriched
completely blind image quality evaluator." IEEE Transactions
on Image Processing 24.8 (2015): 2579-2591.
"""
def __init__(
self, channels: int = 3, crop_border: int = 0, pretrained_model_path: str = None
) -> None:
super(ILNIQE, self).__init__()
self.channels = channels
self.crop_border = crop_border
if pretrained_model_path is not None:
self.pretrained_model_path = pretrained_model_path
else:
self.pretrained_model_path = load_file_from_url(
default_model_urls["ilniqe"]
)
def forward(self, X: torch.Tensor) -> torch.Tensor:
r"""Computation of NIQE metric.
Args:
X: An input tensor. Shape :math:`(N, C, H, W)`.
Returns:
Value of niqe metric in [0, 1] range.
"""
score = calculate_ilniqe(X, self.crop_border, self.pretrained_model_path)
return score
| 20,124 | 35.196043 | 120 | py |
BVQI | BVQI-master/pyiqa/losses/losses.py | import math
import torch
from torch import autograd as autograd
from torch import nn as nn
from torch.nn import functional as F
from pyiqa.utils.registry import LOSS_REGISTRY
from .loss_util import weighted_loss
_reduction_modes = ["none", "mean", "sum"]
@weighted_loss
def l1_loss(pred, target):
return F.l1_loss(pred, target, reduction="none")
@weighted_loss
def mse_loss(pred, target):
return F.mse_loss(pred, target, reduction="none")
@weighted_loss
def cross_entropy(pred, target):
return F.cross_entropy(pred, target, reduction="none")
@weighted_loss
def nll_loss(pred, target):
return F.nll_loss(pred, target, reduction="none")
@weighted_loss
def charbonnier_loss(pred, target, eps=1e-12):
return torch.sqrt((pred - target) ** 2 + eps)
@LOSS_REGISTRY.register()
class L1Loss(nn.Module):
"""L1 (mean absolute error, MAE) loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
"""
def __init__(self, loss_weight=1.0, reduction="mean"):
super(L1Loss, self).__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
"""
return self.loss_weight * l1_loss(
pred, target, weight, reduction=self.reduction
)
@LOSS_REGISTRY.register()
class MSELoss(nn.Module):
"""MSE (L2) loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
"""
def __init__(self, loss_weight=1.0, reduction="mean"):
super(MSELoss, self).__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
"""
return self.loss_weight * mse_loss(
pred, target, weight, reduction=self.reduction
)
@LOSS_REGISTRY.register()
class CrossEntropyLoss(nn.Module):
"""MSE (L2) loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
"""
def __init__(self, loss_weight=1.0, reduction="mean"):
super().__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
"""
return self.loss_weight * cross_entropy(
pred, target, weight, reduction=self.reduction
)
@LOSS_REGISTRY.register()
class NLLLoss(nn.Module):
"""MSE (L2) loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
"""
def __init__(self, loss_weight=1.0, reduction="mean"):
super().__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
"""
return self.loss_weight * nll_loss(
pred, target, weight, reduction=self.reduction
)
@LOSS_REGISTRY.register()
class CharbonnierLoss(nn.Module):
"""Charbonnier loss (one variant of Robust L1Loss, a differentiable
variant of L1Loss).
Described in "Deep Laplacian Pyramid Networks for Fast and Accurate
Super-Resolution".
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
eps (float): A value used to control the curvature near zero. Default: 1e-12.
"""
def __init__(self, loss_weight=1.0, reduction="mean", eps=1e-12):
super(CharbonnierLoss, self).__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.reduction = reduction
self.eps = eps
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None.
"""
return self.loss_weight * charbonnier_loss(
pred, target, weight, eps=self.eps, reduction=self.reduction
)
@LOSS_REGISTRY.register()
class WeightedTVLoss(L1Loss):
"""Weighted TV loss.
Args:
loss_weight (float): Loss weight. Default: 1.0.
"""
def __init__(self, loss_weight=1.0, reduction="mean"):
if reduction not in ["mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: mean | sum"
)
super(WeightedTVLoss, self).__init__(
loss_weight=loss_weight, reduction=reduction
)
def forward(self, pred, weight=None):
if weight is None:
y_weight = None
x_weight = None
else:
y_weight = weight[:, :, :-1, :]
x_weight = weight[:, :, :, :-1]
y_diff = super().forward(pred[:, :, :-1, :], pred[:, :, 1:, :], weight=y_weight)
x_diff = super().forward(pred[:, :, :, :-1], pred[:, :, :, 1:], weight=x_weight)
loss = x_diff + y_diff
return loss
| 7,764 | 31.763713 | 98 | py |
BVQI | BVQI-master/pyiqa/losses/loss_util.py | import functools
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are 'none', 'mean' and 'sum'.
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction="mean"):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
'none', 'mean' and 'sum'. Default: 'mean'.
Returns:
Tensor: Loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if weight is not specified or reduction is sum, just reduce the loss
if weight is None or reduction == "sum":
loss = reduce_loss(loss, reduction)
# if reduction is mean, then compute mean over weight region
elif reduction == "mean":
if weight.size(1) > 1:
weight = weight.sum()
else:
weight = weight.sum() * loss.size(1)
loss = loss.sum() / weight
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
**kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction="mean", **kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction)
return loss
return wrapper
| 2,904 | 28.948454 | 78 | py |
BVQI | BVQI-master/pyiqa/losses/iqa_losses.py | import numpy as np
import torch
from cv2 import reduce
from torch import nn as nn
from torch.nn import functional as F
from pyiqa.utils.registry import LOSS_REGISTRY
from .loss_util import weighted_loss
_reduction_modes = ["none", "mean", "sum"]
@weighted_loss
def emd_loss(pred, target, r=2):
"""
Args:
pred (Tensor): of shape (N, C). Predicted tensor.
target (Tensor): of shape (N, C). Ground truth tensor.
r (float): norm level, default l2 norm.
"""
loss = torch.abs(torch.cumsum(pred, dim=-1) - torch.cumsum(target, dim=-1)) ** r
loss = loss.mean(dim=-1) ** (1.0 / r)
return loss
@LOSS_REGISTRY.register()
class EMDLoss(nn.Module):
"""EMD (earth mover distance) loss."""
def __init__(self, loss_weight=1.0, r=2, reduction="mean"):
super(EMDLoss, self).__init__()
if reduction not in ["none", "mean", "sum"]:
raise ValueError(
f"Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}"
)
self.loss_weight = loss_weight
self.r = r
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
return self.loss_weight * emd_loss(
pred, target, r=self.r, weight=weight, reduction=self.reduction
)
def plcc_loss(pred, target):
"""
Args:
pred (Tensor): of shape (N, 1). Predicted tensor.
target (Tensor): of shape (N, 1). Ground truth tensor.
"""
batch_size = pred.shape[0]
if batch_size > 1:
vx = pred - pred.mean()
vy = target - target.mean()
loss = F.normalize(vx, p=2, dim=0) * F.normalize(vy, p=2, dim=0)
loss = (1 - loss.sum()) / 2 # normalize to [0, 1]
else:
loss = F.l1_loss(pred, target)
return loss.mean()
@LOSS_REGISTRY.register()
class PLCCLoss(nn.Module):
"""PLCC loss, induced from Pearson’s Linear Correlation Coefficient."""
def __init__(self, loss_weight=1.0):
super(PLCCLoss, self).__init__()
self.loss_weight = loss_weight
def forward(self, pred, target):
return self.loss_weight * plcc_loss(pred, target)
@LOSS_REGISTRY.register()
class RankLoss(nn.Module):
"""Monotonicity regularization loss, will be zero when rankings of pred and target are the same.
Reference:
- https://github.com/lidq92/LinearityIQA/blob/master/IQAloss.py
"""
def __init__(self, detach=False, loss_weight=1.0):
super(RankLoss, self).__init__()
self.loss_weight = loss_weight
def forward(self, pred, target):
if pred.size(0) > 1: #
ranking_loss = F.relu((pred - pred.t()) * torch.sign((target.t() - target)))
scale = 1 + torch.max(ranking_loss.detach())
loss = ranking_loss.mean() / scale
else:
loss = F.l1_loss(pred, target.detach()) # 0 for batch with single sample.
return self.loss_weight * loss
def norm_loss_with_normalization(pred, target, p, q):
"""
Args:
pred (Tensor): of shape (N, 1). Predicted tensor.
target (Tensor): of shape (N, 1). Ground truth tensor.
"""
batch_size = pred.shape[0]
if batch_size > 1:
vx = pred - pred.mean()
vy = target - target.mean()
scale = np.power(2, p) * np.power(batch_size, max(0, 1 - p / q)) # p, q>0
norm_pred = F.normalize(vx, p=q, dim=0)
norm_target = F.normalize(vy, p=q, dim=0)
loss = torch.norm(norm_pred - norm_target, p=p) / scale
else:
loss = F.l1_loss(pred, target)
return loss.mean()
@LOSS_REGISTRY.register()
class NiNLoss(nn.Module):
"""NiN (Norm in Norm) loss
Reference:
- Dingquan Li, Tingting Jiang, and Ming Jiang. Norm-in-Norm Loss with Faster Convergence and Better
Performance for Image Quality Assessment. ACMM2020.
- https://arxiv.org/abs/2008.03889
- https://github.com/lidq92/LinearityIQA
This loss can be simply described as: l1_norm(normalize(pred - pred_mean), normalize(target - target_mean))
"""
def __init__(self, loss_weight=1.0, p=1, q=2):
super(NiNLoss, self).__init__()
self.loss_weight = loss_weight
self.p = p
self.q = q
def forward(self, pred, target):
return self.loss_weight * norm_loss_with_normalization(
pred, target, self.p, self.q
)
| 4,414 | 29.874126 | 111 | py |
BVQI | BVQI-master/V1_extraction/extract_multi_scale_v1_features.py | import os
import cv2
import torch
import numpy as np
import pandas as pd
from time import time
from sklearn import decomposition
from torchvision.transforms import transforms
from gabor_filter import GaborFilters
# resolutioin: 960*540,480*270,240*135,120*67
# downsample rate: 1.0, 0.5, 0.25, 0.125
if __name__ == '__main__':
data_name = 'konvid1k'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
if data_name == 'konvid1k':
data_path = '/mnt/lustre/lliao/Dataset/KoNViD_1k/KoNViD_1k_videos/'
elif data_name == 'livevqc':
data_path = '/mnt/lustre/lliao/Dataset/LIVE-VQC/Video'
else:
raise NotImplementedError
feat_path = './features'
save_path = os.path.join(feat_path, data_name + 'multi_scale')
if not os.path.exists(save_path): os.makedirs(save_path)
meta_data = pd.read_csv(
os.path.join(feat_path, data_name + '_metadata.csv'))
video_num = len(meta_data)
width_list = [960, 480, 240, 120]
height_list = [540, 270, 135, 67]
#downsample_rate_list = [1.0, 0.5, 0.25, 0.125]
scale = 5
orientations = 8
kernel_size = 19
row_downsample = 4
column_downsample = 4
pca_d = 10
gb = GaborFilters(scale,
orientations, (kernel_size - 1) // 2,
row_downsample,
column_downsample,
device=device)
for vn in range(video_num):
start_time = time()
if data_name == 'konvid1k':
video_name = os.path.join(data_path,
'{}.mp4'.format(meta_data.flickr_id[vn]))
elif data_name == 'livevqc':
video_name = os.path.join(data_path, meta_data.File[vn])
video_capture = cv2.VideoCapture(video_name)
frame_num = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
video_width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
video_height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
v1_features = []
transform_list = []
for i in range(len(width_list)):
width = width_list[i]
height = height_list[i]
v1_features.append(
torch.zeros(
frame_num,
(scale * orientations * round(width / column_downsample) *
round(height / row_downsample))))
transform_list.append(
transforms.Compose([
transforms.ToTensor(),
transforms.Resize((height, width))
]))
# for i in range(len(downsample_rate_list)):
# width = int(video_width * downsample_rate_list[i])
# height = int(video_height * downsample_rate_list[i])
# v1_features.append(
# torch.zeros(
# frame_num,
# (scale * orientations * round(width / column_downsample) *
# round(height / row_downsample))))
# transform_list.append(
# transforms.Compose([
# transforms.ToTensor(),
# transforms.Resize((height, width))
# ]))
count = 0
while True:
success, frame = video_capture.read()
if not success:
break
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for i in range(len(width_list)):# + len(downsample_rate_list)):
frame = transform_list[i](frame_gray)
frame_imag = torch.zeros(frame.size())
frame = torch.stack((frame, frame_imag), 3)
frame = torch.view_as_complex(frame)
frame = frame[None, :, :, :]
frame = frame.to(device)
v1_features[i][count, :] = gb(frame).detach().cpu()
count += 1
for i in range(len(width_list)):# + len(downsample_rate_list)):
v1_features[i] = torch.nan_to_num(v1_features[i])
v1_features[i] = v1_features[i].numpy()
pca = decomposition.PCA(pca_d)
v1_features[i] = pca.fit_transform(v1_features[i])
np.save(
os.path.join(
save_path,
'{}_{}.npy'.format(i, os.path.split(video_name)[-1])),
v1_features[i])
end_time = time()
print('Video {}, {}s elapsed running in {}'.format(
vn, end_time - start_time, device))
| 4,553 | 34.578125 | 80 | py |
BVQI | BVQI-master/V1_extraction/extract_v1_features_480.py | import os
import cv2
import torch
import numpy as np
import pandas as pd
from time import time
from sklearn import decomposition
from torchvision.transforms import transforms
from gabor_filter import GaborFilters
if __name__ == '__main__':
data_name = 'livevqc'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
if data_name == 'konvid1k':
data_path = '/mnt/lustre/lliao/Dataset/KoNViD_1k/KoNViD_1k_videos/'
elif data_name == 'livevqc':
data_path = '/mnt/lustre/lliao/Dataset/LIVE-VQC/Video'
else:
raise NotImplementedError
width = 480
height = 270
feat_path = './features'
save_path = os.path.join(feat_path, data_name + str(width))
if not os.path.exists(save_path): os.makedirs(save_path)
meta_data = pd.read_csv(
os.path.join(feat_path, data_name + '_metadata.csv'))
video_num = len(meta_data)
scale = 6
orientations = 8
kernel_size = 39
row_downsample = 4
column_downsample = 4
trasform = transforms.Compose(
[transforms.ToTensor(),
transforms.Resize((height, width))])
for vn in range(video_num):
if data_name == 'konvid1k':
video_name = os.path.join(data_path,
'{}.mp4'.format(meta_data.flickr_id[vn]))
elif data_name == 'livevqc':
video_name = os.path.join(data_path, meta_data.File[vn])
video_capture = cv2.VideoCapture(video_name)
frame_num = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
v1_features = torch.zeros(
frame_num,
(scale * orientations * round(width / column_downsample) *
round(height / row_downsample)))
start_time = time()
count = 0
while True:
success, frame = video_capture.read()
if not success:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = trasform(frame)
frame_imag = torch.zeros(frame.size())
frame = torch.stack((frame, frame_imag), 3)
frame = torch.view_as_complex(frame)
frame = frame[None, :, :, :]
frame = frame.to(device)
v1_features[count, :] = gb(frame).detach().cpu()
count += 1
v1_features = torch.nan_to_num(v1_features)
v1_features = v1_features.numpy()
pca = decomposition.PCA()
v1_features = pca.fit_transform(v1_features)
end_time = time()
print('Video {}, {}s elapsed running in {}'.format(
vn, end_time - start_time, device))
np.save(
os.path.join(save_path,
os.path.split(video_name)[-1] + '.npy'), v1_features)
| 2,798 | 30.1 | 79 | py |
BVQI | BVQI-master/V1_extraction/extract_v1_features.py | import os
import cv2
import torch
import numpy as np
import pandas as pd
from time import time
from sklearn import decomposition
from torchvision.transforms import transforms
from gabor_filter import GaborFilters
if __name__ == '__main__':
data_name = 'livevqc'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
if data_name == 'konvid1k':
data_path = '/mnt/data/xkm/datasets/KoNViD_1k_videos/KoNViD_1k_videos/'
elif data_name == 'livevqc':
data_path = '/mnt/data/xkm/datasets/LIVE_VQC/Video'
else:
raise NotImplementedError
feat_path = './features'
save_path = os.path.join(feat_path, data_name)
if not os.path.exists(save_path): os.makedirs(save_path)
meta_data = pd.read_csv(
os.path.join(feat_path, data_name + '_metadata.csv'))
video_num = len(meta_data)
width = 480
height = 270
scale = 5
orientations = 8
kernel_size = 19
row_downsample = 4
column_downsample = 4
trasform = transforms.Compose(
[transforms.ToTensor(),
transforms.Resize((height, width))])
gb = GaborFilters(scale,
orientations, (kernel_size - 1) // 2,
row_downsample,
column_downsample,
device=device)
for vn in range(video_num):
if data_name == 'konvid1k':
video_name = os.path.join(data_path,
'{}.mp4'.format(meta_data.flickr_id[vn]))
elif data_name == 'livevqc':
video_name = os.path.join(data_path, meta_data.File[vn])
video_capture = cv2.VideoCapture(video_name)
frame_num = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
v1_features = torch.zeros(
frame_num,
(scale * orientations * round(width / column_downsample) *
round(height / row_downsample)))
start_time = time()
count = 0
while True:
success, frame = video_capture.read()
if not success:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = trasform(frame)
frame_imag = torch.zeros(frame.size())
frame = torch.stack((frame, frame_imag), 3)
frame = torch.view_as_complex(frame)
frame = frame[None, :, :, :]
frame = frame.to(device)
v1_features[count, :] = gb(frame).detach().cpu()
count += 1
v1_features = torch.nan_to_num(v1_features)
v1_features = v1_features.numpy()
pca = decomposition.PCA()
v1_features = pca.fit_transform(v1_features)
end_time = time()
print('Video {}, {}s elapsed running in {}'.format(
vn, end_time - start_time, device))
np.save(
os.path.join(save_path,
os.path.split(video_name)[-1] + '.npy'), v1_features)
| 2,986 | 30.776596 | 79 | py |
BVQI | BVQI-master/V1_extraction/gabor_filter.py | import math
import cmath
import torch
import torch.nn as nn
class GaborFilters(nn.Module):
def __init__(self,
n_scale=5,
n_orientation=8,
kernel_radius=9,
row_downsample=4,
column_downsample=4,
device='cpu'):
super().__init__()
self.kernel_size = kernel_radius * 2 + 1
self.kernel_radius = kernel_radius
self.n_scale = n_scale
self.n_orientation = n_orientation
self.row_downsample = row_downsample
self.column_downsample = column_downsample
self.to(device)
self.gb = self.make_gabor_filters().to(device)
def make_gabor_filters(self):
kernel_size = self.kernel_size
n_scale = self.n_scale
n_orientation = self.n_orientation
gb = torch.zeros((n_scale * n_orientation, kernel_size, kernel_size),
dtype=torch.cfloat)
fmax = 0.25
gama = math.sqrt(2)
eta = math.sqrt(2)
for i in range(n_scale):
fu = fmax / (math.sqrt(2)**i)
alpha = fu / gama
beta = fu / eta
for j in range(n_orientation):
tetav = (j / n_orientation) * math.pi
g_filter = torch.zeros((kernel_size, kernel_size),
dtype=torch.cfloat)
for x in range(1, kernel_size + 1):
for y in range(1, kernel_size + 1):
xprime = (x - (
(kernel_size + 1) / 2)) * math.cos(tetav) + (y - (
(kernel_size + 1) / 2)) * math.sin(tetav)
yprime = -(x - (
(kernel_size + 1) / 2)) * math.sin(tetav) + (y - (
(kernel_size + 1) / 2)) * math.cos(tetav)
g_filter[x - 1][
y -
1] = (fu**2 / (math.pi * gama * eta)) * math.exp(-(
(alpha**2) * (xprime**2) + (beta**2) *
(yprime**2))) * cmath.exp(
1j * 2 * math.pi * fu * xprime)
gb[i * n_orientation + j] = g_filter
return gb
def forward(self, x):
batch_size = x.size(0)
cn = x.size(1)
sy = x.size(2)
sx = x.size(3)
assert cn == 1
gb = self.gb
gb = gb[:, None, :, :]
res = nn.functional.conv2d(input=x, weight=gb, padding='same')
res = res.view(batch_size, -1, sy, sx)
res = torch.abs(res)
res = res[:, :, ::self.row_downsample, :]
res = res[:, :, :, ::self.column_downsample]
res = res.reshape(batch_size, res.size(1), -1)
res = (res - torch.mean(res, 2, keepdim=True)) / torch.std(
res, 2, keepdim=True)
res = res.view(batch_size, -1)
return res
if __name__ == "__main__":
import time
from PIL import Image
from torchvision.transforms import transforms
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
img = Image.open(
'/mnt/d/datasets/KonIQ-10k/images_512x384/826373.jpg').convert('L')
img = transforms.ToTensor()(img)
img_imag = torch.zeros(img.size())
img = torch.stack((img, img_imag), 3)
img = torch.view_as_complex(img)
img = img[None, :, :, :]
gb = GaborFilters(device=device)
img = img.to(device)
start_time = time.time()
res = gb(img)
end_time = time.time()
print(res.shape)
print('{}s elapsed running in {}'.format(end_time - start_time, device))
| 3,734 | 31.763158 | 79 | py |
BVQI | BVQI-master/V1_extraction/.ipynb_checkpoints/gabor_filter-checkpoint.py | import math
import cmath
import torch
import torch.nn as nn
class GaborFilters(nn.Module):
def __init__(self,
n_scale=5,
n_orientation=8,
kernel_radius=9,
row_downsample=4,
column_downsample=4,
device='cpu'):
super().__init__()
self.kernel_size = kernel_radius * 2 + 1
self.kernel_radius = kernel_radius
self.n_scale = n_scale
self.n_orientation = n_orientation
self.row_downsample = row_downsample
self.column_downsample = column_downsample
self.to(device)
self.gb = self.make_gabor_filters().to(device)
def make_gabor_filters(self):
kernel_size = self.kernel_size
n_scale = self.n_scale
n_orientation = self.n_orientation
gb = torch.zeros((n_scale * n_orientation, kernel_size, kernel_size),
dtype=torch.cfloat)
fmax = 0.25
gama = math.sqrt(2)
eta = math.sqrt(2)
for i in range(n_scale):
fu = fmax / (math.sqrt(2)**i)
alpha = fu / gama
beta = fu / eta
for j in range(n_orientation):
tetav = (j / n_orientation) * math.pi
g_filter = torch.zeros((kernel_size, kernel_size),
dtype=torch.cfloat)
for x in range(1, kernel_size + 1):
for y in range(1, kernel_size + 1):
xprime = (x - (
(kernel_size + 1) / 2)) * math.cos(tetav) + (y - (
(kernel_size + 1) / 2)) * math.sin(tetav)
yprime = -(x - (
(kernel_size + 1) / 2)) * math.sin(tetav) + (y - (
(kernel_size + 1) / 2)) * math.cos(tetav)
g_filter[x - 1][
y -
1] = (fu**2 / (math.pi * gama * eta)) * math.exp(-(
(alpha**2) * (xprime**2) + (beta**2) *
(yprime**2))) * cmath.exp(
1j * 2 * math.pi * fu * xprime)
gb[i * n_orientation + j] = g_filter
return gb
def forward(self, x):
batch_size = x.size(0)
cn = x.size(1)
sy = x.size(2)
sx = x.size(3)
assert cn == 1
gb = self.gb
gb = gb[:, None, :, :]
res = nn.functional.conv2d(input=x, weight=gb, padding='same')
res = res.view(batch_size, -1, sy, sx)
res = torch.abs(res)
res = res[:, :, ::self.row_downsample, :]
res = res[:, :, :, ::self.column_downsample]
res = res.reshape(batch_size, res.size(1), -1)
res = (res - torch.mean(res, 2, keepdim=True)) / torch.std(
res, 2, keepdim=True)
res = res.view(batch_size, -1)
return res
if __name__ == "__main__":
import time
from PIL import Image
from torchvision.transforms import transforms
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
img = Image.open(
'/mnt/d/datasets/KonIQ-10k/images_512x384/826373.jpg').convert('L')
img = transforms.ToTensor()(img)
img_imag = torch.zeros(img.size())
img = torch.stack((img, img_imag), 3)
img = torch.view_as_complex(img)
img = img[None, :, :, :]
gb = GaborFilters(device=device)
img = img.to(device)
start_time = time.time()
res = gb(img)
end_time = time.time()
print(res.shape)
print('{}s elapsed running in {}'.format(end_time - start_time, device))
| 3,734 | 31.763158 | 79 | py |
BVQI | BVQI-master/V1_extraction/.ipynb_checkpoints/extract_v1_features-checkpoint.py | import os
import cv2
import torch
import numpy as np
import pandas as pd
from time import time
from sklearn import decomposition
from torchvision.transforms import transforms
from gabor_filter import GaborFilters
if __name__ == '__main__':
data_name = 'livevqc'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
if data_name == 'konvid1k':
data_path = '/mnt/data/xkm/datasets/KoNViD_1k_videos/KoNViD_1k_videos/'
elif data_name == 'livevqc':
data_path = '/mnt/data/xkm/datasets/LIVE_VQC/Video'
else:
raise NotImplementedError
feat_path = './features'
save_path = os.path.join(feat_path, data_name)
if not os.path.exists(save_path): os.makedirs(save_path)
meta_data = pd.read_csv(
os.path.join(feat_path, data_name + '_metadata.csv'))
video_num = len(meta_data)
width = 480
height = 270
scale = 5
orientations = 8
kernel_size = 19
row_downsample = 4
column_downsample = 4
trasform = transforms.Compose(
[transforms.ToTensor(),
transforms.Resize((height, width))])
gb = GaborFilters(scale,
orientations, (kernel_size - 1) // 2,
row_downsample,
column_downsample,
device=device)
for vn in range(video_num):
if data_name == 'konvid1k':
video_name = os.path.join(data_path,
'{}.mp4'.format(meta_data.flickr_id[vn]))
elif data_name == 'livevqc':
video_name = os.path.join(data_path, meta_data.File[vn])
video_capture = cv2.VideoCapture(video_name)
frame_num = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
v1_features = torch.zeros(
frame_num,
(scale * orientations * round(width / column_downsample) *
round(height / row_downsample)))
start_time = time()
count = 0
while True:
success, frame = video_capture.read()
if not success:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = trasform(frame)
frame_imag = torch.zeros(frame.size())
frame = torch.stack((frame, frame_imag), 3)
frame = torch.view_as_complex(frame)
frame = frame[None, :, :, :]
frame = frame.to(device)
v1_features[count, :] = gb(frame).detach().cpu()
count += 1
v1_features = torch.nan_to_num(v1_features)
v1_features = v1_features.numpy()
pca = decomposition.PCA()
v1_features = pca.fit_transform(v1_features)
end_time = time()
print('Video {}, {}s elapsed running in {}'.format(
vn, end_time - start_time, device))
np.save(
os.path.join(save_path,
os.path.split(video_name)[-1] + '.npy'), v1_features)
| 2,986 | 30.776596 | 79 | py |
BVQI | BVQI-master/V1_extraction/.ipynb_checkpoints/extract_v1_features_480-checkpoint.py | import os
import cv2
import torch
import numpy as np
import pandas as pd
from time import time
from sklearn import decomposition
from torchvision.transforms import transforms
from gabor_filter import GaborFilters
if __name__ == '__main__':
data_name = ''
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
if data_name == 'konvid1k':
data_path = '/mnt/lustre/lliao/Dataset/KoNViD_1k/KoNViD_1k_videos/'
elif data_name == 'livevqc':
data_path = '/mnt/lustre/lliao/Dataset/LIVE-VQC/Video'
else:
raise NotImplementedError
width = 480
height = 270
feat_path = './features'
save_path = os.path.join(feat_path, data_name + str(width))
if not os.path.exists(save_path): os.makedirs(save_path)
meta_data = pd.read_csv(
os.path.join(feat_path, data_name + '_metadata.csv'))
video_num = len(meta_data)
scale = 6
orientations = 8
kernel_size = 39
row_downsample = 4
column_downsample = 4
trasform = transforms.Compose(
[transforms.ToTensor(),
transforms.Resize((height, width))])
gb = GaborFilters(scale,
orientations, (kernel_size - 1) // 2,
row_downsample,
column_downsample,
device=device)
for vn in range(video_num):
if data_name == 'konvid1k':
video_name = os.path.join(data_path,
'{}.mp4'.format(meta_data.flickr_id[vn]))
elif data_name == 'livevqc':
video_name = os.path.join(data_path, meta_data.File[vn])
video_capture = cv2.VideoCapture(video_name)
frame_num = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
v1_features = torch.zeros(
frame_num,
(scale * orientations * round(width / column_downsample) *
round(height / row_downsample)))
start_time = time()
count = 0
while True:
success, frame = video_capture.read()
if not success:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = trasform(frame)
frame_imag = torch.zeros(frame.size())
frame = torch.stack((frame, frame_imag), 3)
frame = torch.view_as_complex(frame)
frame = frame[None, :, :, :]
frame = frame.to(device)
v1_features[count, :] = gb(frame).detach().cpu()
count += 1
v1_features = torch.nan_to_num(v1_features)
v1_features = v1_features.numpy()
pca = decomposition.PCA()
v1_features = pca.fit_transform(v1_features)
end_time = time()
print('Video {}, {}s elapsed running in {}'.format(
vn, end_time - start_time, device))
np.save(
os.path.join(save_path,
os.path.split(video_name)[-1] + '.npy'), v1_features)
| 2,991 | 30.829787 | 79 | py |
BVQI | BVQI-master/buona_vista/datasets/fusion_datasets.py | import copy
import glob
import os
import os.path as osp
import random
from functools import lru_cache
import cv2
import decord
import numpy as np
import skvideo.io
import torch
import torchvision
from decord import VideoReader, cpu, gpu
from tqdm import tqdm
random.seed(42)
decord.bridge.set_bridge("torch")
def get_spatial_fragments(
video,
fragments_h=7,
fragments_w=7,
fsize_h=32,
fsize_w=32,
aligned=32,
nfrags=1,
random=False,
random_upsample=False,
fallback_type="upsample",
**kwargs,
):
size_h = fragments_h * fsize_h
size_w = fragments_w * fsize_w
## video: [C,T,H,W]
## situation for images
if video.shape[1] == 1:
aligned = 1
dur_t, res_h, res_w = video.shape[-3:]
ratio = min(res_h / size_h, res_w / size_w)
if fallback_type == "upsample" and ratio < 1:
ovideo = video
video = torch.nn.functional.interpolate(
video / 255.0, scale_factor=1 / ratio, mode="bilinear"
)
video = (video * 255.0).type_as(ovideo)
if random_upsample:
randratio = random.random() * 0.5 + 1
video = torch.nn.functional.interpolate(
video / 255.0, scale_factor=randratio, mode="bilinear"
)
video = (video * 255.0).type_as(ovideo)
assert dur_t % aligned == 0, "Please provide match vclip and align index"
size = size_h, size_w
## make sure that sampling will not run out of the picture
hgrids = torch.LongTensor(
[min(res_h // fragments_h * i, res_h - fsize_h) for i in range(fragments_h)]
)
wgrids = torch.LongTensor(
[min(res_w // fragments_w * i, res_w - fsize_w) for i in range(fragments_w)]
)
hlength, wlength = res_h // fragments_h, res_w // fragments_w
if random:
print("This part is deprecated. Please remind that.")
if res_h > fsize_h:
rnd_h = torch.randint(
res_h - fsize_h, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_h = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
if res_w > fsize_w:
rnd_w = torch.randint(
res_w - fsize_w, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_w = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
else:
if hlength > fsize_h:
rnd_h = torch.randint(
hlength - fsize_h, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_h = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
if wlength > fsize_w:
rnd_w = torch.randint(
wlength - fsize_w, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_w = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
target_video = torch.zeros(video.shape[:-2] + size).to(video.device)
# target_videos = []
for i, hs in enumerate(hgrids):
for j, ws in enumerate(wgrids):
for t in range(dur_t // aligned):
t_s, t_e = t * aligned, (t + 1) * aligned
h_s, h_e = i * fsize_h, (i + 1) * fsize_h
w_s, w_e = j * fsize_w, (j + 1) * fsize_w
if random:
h_so, h_eo = rnd_h[i][j][t], rnd_h[i][j][t] + fsize_h
w_so, w_eo = rnd_w[i][j][t], rnd_w[i][j][t] + fsize_w
else:
h_so, h_eo = hs + rnd_h[i][j][t], hs + rnd_h[i][j][t] + fsize_h
w_so, w_eo = ws + rnd_w[i][j][t], ws + rnd_w[i][j][t] + fsize_w
target_video[:, t_s:t_e, h_s:h_e, w_s:w_e] = video[
:, t_s:t_e, h_so:h_eo, w_so:w_eo
]
# target_videos.append(video[:,t_s:t_e,h_so:h_eo,w_so:w_eo])
# target_video = torch.stack(target_videos, 0).reshape((dur_t // aligned, fragments, fragments,) + target_videos[0].shape).permute(3,0,4,1,5,2,6)
# target_video = target_video.reshape((-1, dur_t,) + size) ## Splicing Fragments
return target_video
@lru_cache
def get_resize_function(size_h, size_w, target_ratio=1, random_crop=False):
if random_crop:
return torchvision.transforms.RandomResizedCrop(
(size_h, size_w), scale=(0.40, 1.0)
)
if target_ratio > 1:
size_h = int(target_ratio * size_w)
assert size_h > size_w
elif target_ratio < 1:
size_w = int(size_h / target_ratio)
assert size_w > size_h
return torchvision.transforms.Resize((size_h, size_w))
def get_resized_video(
video, size_h=224, size_w=224, random_crop=False, arp=False, **kwargs,
):
video = video.permute(1, 0, 2, 3)
resize_opt = get_resize_function(
size_h, size_w, video.shape[-2] / video.shape[-1] if arp else 1, random_crop
)
video = resize_opt(video).permute(1, 0, 2, 3)
return video
def get_arp_resized_video(
video, short_edge=224, train=False, **kwargs,
):
if train: ## if during training, will random crop into square and then resize
res_h, res_w = video.shape[-2:]
ori_short_edge = min(video.shape[-2:])
if res_h > ori_short_edge:
rnd_h = random.randrange(res_h - ori_short_edge)
video = video[..., rnd_h : rnd_h + ori_short_edge, :]
elif res_w > ori_short_edge:
rnd_w = random.randrange(res_w - ori_short_edge)
video = video[..., :, rnd_h : rnd_h + ori_short_edge]
ori_short_edge = min(video.shape[-2:])
scale_factor = short_edge / ori_short_edge
ovideo = video
video = torch.nn.functional.interpolate(
video / 255.0, scale_factors=scale_factor, mode="bilinear"
)
video = (video * 255.0).type_as(ovideo)
return video
def get_arp_fragment_video(
video, short_fragments=7, fsize=32, train=False, **kwargs,
):
if (
train
): ## if during training, will random crop into square and then get fragments
res_h, res_w = video.shape[-2:]
ori_short_edge = min(video.shape[-2:])
if res_h > ori_short_edge:
rnd_h = random.randrange(res_h - ori_short_edge)
video = video[..., rnd_h : rnd_h + ori_short_edge, :]
elif res_w > ori_short_edge:
rnd_w = random.randrange(res_w - ori_short_edge)
video = video[..., :, rnd_h : rnd_h + ori_short_edge]
kwargs["fsize_h"], kwargs["fsize_w"] = fsize, fsize
res_h, res_w = video.shape[-2:]
if res_h > res_w:
kwargs["fragments_w"] = short_fragments
kwargs["fragments_h"] = int(short_fragments * res_h / res_w)
else:
kwargs["fragments_h"] = short_fragments
kwargs["fragments_w"] = int(short_fragments * res_w / res_h)
return get_spatial_fragments(video, **kwargs)
def get_cropped_video(
video, size_h=224, size_w=224, **kwargs,
):
kwargs["fragments_h"], kwargs["fragments_w"] = 1, 1
kwargs["fsize_h"], kwargs["fsize_w"] = size_h, size_w
return get_spatial_fragments(video, **kwargs)
def get_single_view(
video, sample_type="aesthetic", **kwargs,
):
if sample_type.startswith("aesthetic"):
video = get_resized_video(video, **kwargs)
elif sample_type.startswith("technical"):
video = get_spatial_fragments(video, **kwargs)
elif sample_type == "original":
return video
return video
def spatial_temporal_view_decomposition(
video_path, sample_types, samplers, is_train=False, augment=False,
):
video = {}
if video_path.endswith(".yuv"):
print("This part will be deprecated due to large memory cost.")
## This is only an adaptation to LIVE-Qualcomm
ovideo = skvideo.io.vread(
video_path, 1080, 1920, inputdict={"-pix_fmt": "yuvj420p"}
)
for stype in samplers:
frame_inds = samplers[stype](ovideo.shape[0], is_train)
imgs = [torch.from_numpy(ovideo[idx]) for idx in frame_inds]
video[stype] = torch.stack(imgs, 0).permute(3, 0, 1, 2)
del ovideo
else:
vreader = VideoReader(video_path)
### Avoid duplicated video decoding!!! Important!!!!
all_frame_inds = []
frame_inds = {}
for stype in samplers:
frame_inds[stype] = samplers[stype](len(vreader), is_train)
all_frame_inds.append(frame_inds[stype])
### Each frame is only decoded one time!!!
all_frame_inds = np.concatenate(all_frame_inds, 0)
frame_dict = {idx: vreader[idx] for idx in np.unique(all_frame_inds)}
for stype in samplers:
imgs = [frame_dict[idx] for idx in frame_inds[stype]]
video[stype] = torch.stack(imgs, 0).permute(3, 0, 1, 2)
sampled_video = {}
for stype, sopt in sample_types.items():
sampled_video[stype] = get_single_view(video[stype], stype, **sopt)
return sampled_video, frame_inds
import random
import numpy as np
class UnifiedFrameSampler:
def __init__(
self, fsize_t, fragments_t, frame_interval=1, num_clips=1, drop_rate=0.0,
):
self.fragments_t = fragments_t
self.fsize_t = fsize_t
self.size_t = fragments_t * fsize_t
self.frame_interval = frame_interval
self.num_clips = num_clips
self.drop_rate = drop_rate
def get_frame_indices(self, num_frames, train=False):
tgrids = np.array(
[num_frames // self.fragments_t * i for i in range(self.fragments_t)],
dtype=np.int32,
)
tlength = num_frames // self.fragments_t
if tlength > self.fsize_t * self.frame_interval:
rnd_t = np.random.randint(
0, tlength - self.fsize_t * self.frame_interval, size=len(tgrids)
)
else:
rnd_t = np.zeros(len(tgrids), dtype=np.int32)
ranges_t = (
np.arange(self.fsize_t)[None, :] * self.frame_interval
+ rnd_t[:, None]
+ tgrids[:, None]
)
drop = random.sample(
list(range(self.fragments_t)), int(self.fragments_t * self.drop_rate)
)
dropped_ranges_t = []
for i, rt in enumerate(ranges_t):
if i not in drop:
dropped_ranges_t.append(rt)
return np.concatenate(dropped_ranges_t)
def __call__(self, total_frames, train=False, start_index=0):
frame_inds = []
if self.fsize_t < 0:
return np.arange(total_frames)
for i in range(self.num_clips):
frame_inds += [self.get_frame_indices(total_frames)]
frame_inds = np.concatenate(frame_inds)
frame_inds = np.mod(frame_inds + start_index, total_frames)
return frame_inds.astype(np.int32)
class ViewDecompositionDataset(torch.utils.data.Dataset):
def __init__(self, opt):
## opt is a dictionary that includes options for video sampling
super().__init__()
self.weight = opt.get("weight", 0.5)
self.video_infos = []
self.ann_file = opt["anno_file"]
self.data_prefix = opt["data_prefix"]
self.opt = opt
self.sample_types = opt["sample_types"]
self.data_backend = opt.get("data_backend", "disk")
self.augment = opt.get("augment", False)
if self.data_backend == "petrel":
from petrel_client import client
self.client = client.Client(enable_mc=True)
self.phase = opt["phase"]
self.crop = opt.get("random_crop", False)
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
self.samplers = {}
for stype, sopt in opt["sample_types"].items():
if "t_frag" not in sopt:
# resized temporal sampling for TQE in DOVER
self.samplers[stype] = UnifiedFrameSampler(
sopt["clip_len"], sopt["num_clips"], sopt["frame_interval"]
)
else:
# temporal sampling for AQE in DOVER
self.samplers[stype] = UnifiedFrameSampler(
sopt["clip_len"] // sopt["t_frag"],
sopt["t_frag"],
sopt["frame_interval"],
sopt["num_clips"],
)
print(
stype + " branch sampled frames:",
self.samplers[stype](240, self.phase == "train"),
)
if isinstance(self.ann_file, list):
self.video_infos = self.ann_file
else:
try:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.video_infos.append(dict(filename=filename, label=label))
except:
#### No Label Testing
video_filenames = []
for (root, dirs, files) in os.walk(self.data_prefix, topdown=True):
for file in files:
if file.endswith(".mp4"):
video_filenames += [os.path.join(root, file)]
print(len(video_filenames))
for filename in video_filenames:
self.video_infos.append(dict(filename=filename, label=-1))
def __getitem__(self, index):
video_info = self.video_infos[index]
filename = video_info["filename"]
label = video_info["label"]
try:
## Read Original Frames
## Process Frames
data, frame_inds = spatial_temporal_view_decomposition(
filename,
self.sample_types,
self.samplers,
self.phase == "train",
self.augment and (self.phase == "train"),
)
for k, v in data.items():
data[k] = ((v.permute(1, 2, 3, 0) - self.mean) / self.std).permute(
3, 0, 1, 2
)
data["num_clips"] = {}
for stype, sopt in self.sample_types.items():
data["num_clips"][stype] = sopt["num_clips"]
data["frame_inds"] = frame_inds
data["gt_label"] = label
data["name"] = filename # osp.basename(video_info["filename"])
except:
# exception flow
return {"name": filename}
return data
def __len__(self):
return len(self.video_infos)
| 14,666 | 34.257212 | 149 | py |
BVQI | BVQI-master/buona_vista/datasets/basic_datasets.py | import os.path as osp
import random
import cv2
import decord
import numpy as np
import skvideo.io
import torch
import torchvision
from decord import VideoReader, cpu, gpu
from tqdm import tqdm
random.seed(42)
decord.bridge.set_bridge("torch")
def get_spatial_fragments(
video,
fragments_h=7,
fragments_w=7,
fsize_h=32,
fsize_w=32,
aligned=32,
nfrags=1,
random=False,
fallback_type="upsample",
):
size_h = fragments_h * fsize_h
size_w = fragments_w * fsize_w
## situation for images
if video.shape[1] == 1:
aligned = 1
dur_t, res_h, res_w = video.shape[-3:]
ratio = min(res_h / size_h, res_w / size_w)
if fallback_type == "upsample" and ratio < 1:
ovideo = video
video = torch.nn.functional.interpolate(
video / 255.0, scale_factor=1 / ratio, mode="bilinear"
)
video = (video * 255.0).type_as(ovideo)
assert dur_t % aligned == 0, "Please provide match vclip and align index"
size = size_h, size_w
## make sure that sampling will not run out of the picture
hgrids = torch.LongTensor(
[min(res_h // fragments_h * i, res_h - fsize_h) for i in range(fragments_h)]
)
wgrids = torch.LongTensor(
[min(res_w // fragments_w * i, res_w - fsize_w) for i in range(fragments_w)]
)
hlength, wlength = res_h // fragments_h, res_w // fragments_w
if random:
print("This part is deprecated. Please remind that.")
if res_h > fsize_h:
rnd_h = torch.randint(
res_h - fsize_h, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_h = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
if res_w > fsize_w:
rnd_w = torch.randint(
res_w - fsize_w, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_w = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
else:
if hlength > fsize_h:
rnd_h = torch.randint(
hlength - fsize_h, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_h = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
if wlength > fsize_w:
rnd_w = torch.randint(
wlength - fsize_w, (len(hgrids), len(wgrids), dur_t // aligned)
)
else:
rnd_w = torch.zeros((len(hgrids), len(wgrids), dur_t // aligned)).int()
target_video = torch.zeros(video.shape[:-2] + size).to(video.device)
# target_videos = []
for i, hs in enumerate(hgrids):
for j, ws in enumerate(wgrids):
for t in range(dur_t // aligned):
t_s, t_e = t * aligned, (t + 1) * aligned
h_s, h_e = i * fsize_h, (i + 1) * fsize_h
w_s, w_e = j * fsize_w, (j + 1) * fsize_w
if random:
h_so, h_eo = rnd_h[i][j][t], rnd_h[i][j][t] + fsize_h
w_so, w_eo = rnd_w[i][j][t], rnd_w[i][j][t] + fsize_w
else:
h_so, h_eo = hs + rnd_h[i][j][t], hs + rnd_h[i][j][t] + fsize_h
w_so, w_eo = ws + rnd_w[i][j][t], ws + rnd_w[i][j][t] + fsize_w
target_video[:, t_s:t_e, h_s:h_e, w_s:w_e] = video[
:, t_s:t_e, h_so:h_eo, w_so:w_eo
]
# target_videos.append(video[:,t_s:t_e,h_so:h_eo,w_so:w_eo])
# target_video = torch.stack(target_videos, 0).reshape((dur_t // aligned, fragments, fragments,) + target_videos[0].shape).permute(3,0,4,1,5,2,6)
# target_video = target_video.reshape((-1, dur_t,) + size) ## Splicing Fragments
return target_video
class FragmentSampleFrames:
def __init__(self, fsize_t, fragments_t, frame_interval=1, num_clips=1):
self.fragments_t = fragments_t
self.fsize_t = fsize_t
self.size_t = fragments_t * fsize_t
self.frame_interval = frame_interval
self.num_clips = num_clips
def get_frame_indices(self, num_frames):
tgrids = np.array(
[num_frames // self.fragments_t * i for i in range(self.fragments_t)],
dtype=np.int32,
)
tlength = num_frames // self.fragments_t
if tlength > self.fsize_t * self.frame_interval:
rnd_t = np.random.randint(
0, tlength - self.fsize_t * self.frame_interval, size=len(tgrids)
)
else:
rnd_t = np.zeros(len(tgrids), dtype=np.int32)
ranges_t = (
np.arange(self.fsize_t)[None, :] * self.frame_interval
+ rnd_t[:, None]
+ tgrids[:, None]
)
return np.concatenate(ranges_t)
def __call__(self, total_frames, train=False, start_index=0):
frame_inds = []
for i in range(self.num_clips):
frame_inds += [self.get_frame_indices(total_frames)]
frame_inds = np.concatenate(frame_inds)
frame_inds = np.mod(frame_inds + start_index, total_frames)
return frame_inds
class SampleFrames:
def __init__(self, clip_len, frame_interval=1, num_clips=1):
self.clip_len = clip_len
self.frame_interval = frame_interval
self.num_clips = num_clips
def _get_train_clips(self, num_frames):
"""Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips
if avg_interval > 0:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + np.random.randint(
avg_interval, size=self.num_clips
)
elif num_frames > max(self.num_clips, ori_clip_len):
clip_offsets = np.sort(
np.random.randint(num_frames - ori_clip_len + 1, size=self.num_clips)
)
elif avg_interval == 0:
ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips
clip_offsets = np.around(np.arange(self.num_clips) * ratio)
else:
clip_offsets = np.zeros((self.num_clips,), dtype=np.int)
return clip_offsets
def _get_test_clips(self, num_frames, start_index=0):
"""Get clip offsets in test mode.
Calculate the average interval for selected frames, and shift them
fixedly by avg_interval/2.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) / float(self.num_clips)
if num_frames > ori_clip_len - 1:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int32)
else:
clip_offsets = np.zeros((self.num_clips,), dtype=np.int32)
return clip_offsets
def __call__(self, total_frames, train=False, start_index=0):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if train:
clip_offsets = self._get_train_clips(total_frames)
else:
clip_offsets = self._get_test_clips(total_frames)
frame_inds = (
clip_offsets[:, None]
+ np.arange(self.clip_len)[None, :] * self.frame_interval
)
frame_inds = np.concatenate(frame_inds)
frame_inds = frame_inds.reshape((-1, self.clip_len))
frame_inds = np.mod(frame_inds, total_frames)
frame_inds = np.concatenate(frame_inds) + start_index
return frame_inds.astype(np.int32)
class FastVQAPlusPlusDataset(torch.utils.data.Dataset):
def __init__(
self,
ann_file,
data_prefix,
frame_interval=2,
aligned=32,
fragments=(8, 8, 8),
fsize=(4, 32, 32),
num_clips=1,
nfrags=1,
cache_in_memory=False,
phase="test",
fallback_type="oversample",
):
"""
Fragments.
args:
fragments: G_f as in the paper.
fsize: S_f as in the paper.
nfrags: number of samples (spatially) as in the paper.
num_clips: number of samples (temporally) as in the paper.
"""
self.ann_file = ann_file
self.data_prefix = data_prefix
self.frame_interval = frame_interval
self.num_clips = num_clips
self.fragments = fragments
self.fsize = fsize
self.nfrags = nfrags
self.clip_len = fragments[0] * fsize[0]
self.aligned = aligned
self.fallback_type = fallback_type
self.sampler = FragmentSampleFrames(
fsize[0], fragments[0], frame_interval, num_clips
)
self.video_infos = []
self.phase = phase
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
if isinstance(self.ann_file, list):
self.video_infos = self.ann_file
else:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.video_infos.append(dict(filename=filename, label=label))
if cache_in_memory:
self.cache = {}
for i in tqdm(range(len(self)), desc="Caching fragments"):
self.cache[i] = self.__getitem__(i, tocache=True)
else:
self.cache = None
def __getitem__(
self, index, tocache=False, need_original_frames=False,
):
if tocache or self.cache is None:
fx, fy = self.fragments[1:]
fsx, fsy = self.fsize[1:]
video_info = self.video_infos[index]
filename = video_info["filename"]
label = video_info["label"]
if filename.endswith(".yuv"):
video = skvideo.io.vread(
filename, 1080, 1920, inputdict={"-pix_fmt": "yuvj420p"}
)
frame_inds = self.sampler(video.shape[0], self.phase == "train")
imgs = [torch.from_numpy(video[idx]) for idx in frame_inds]
else:
vreader = VideoReader(filename)
frame_inds = self.sampler(len(vreader), self.phase == "train")
frame_dict = {idx: vreader[idx] for idx in np.unique(frame_inds)}
imgs = [frame_dict[idx] for idx in frame_inds]
img_shape = imgs[0].shape
video = torch.stack(imgs, 0)
video = video.permute(3, 0, 1, 2)
if self.nfrags == 1:
vfrag = get_spatial_fragments(
video,
fx,
fy,
fsx,
fsy,
aligned=self.aligned,
fallback_type=self.fallback_type,
)
else:
vfrag = get_spatial_fragments(
video,
fx,
fy,
fsx,
fsy,
aligned=self.aligned,
fallback_type=self.fallback_type,
)
for i in range(1, self.nfrags):
vfrag = torch.cat(
(
vfrag,
get_spatial_fragments(
video,
fragments,
fx,
fy,
fsx,
fsy,
aligned=self.aligned,
fallback_type=self.fallback_type,
),
),
1,
)
if tocache:
return (vfrag, frame_inds, label, img_shape)
else:
vfrag, frame_inds, label, img_shape = self.cache[index]
vfrag = ((vfrag.permute(1, 2, 3, 0) - self.mean) / self.std).permute(3, 0, 1, 2)
data = {
"video": vfrag.reshape(
(-1, self.nfrags * self.num_clips, self.clip_len) + vfrag.shape[2:]
).transpose(
0, 1
), # B, V, T, C, H, W
"frame_inds": frame_inds,
"gt_label": label,
"original_shape": img_shape,
}
if need_original_frames:
data["original_video"] = video.reshape(
(-1, self.nfrags * self.num_clips, self.clip_len) + video.shape[2:]
).transpose(0, 1)
return data
def __len__(self):
return len(self.video_infos)
class FragmentVideoDataset(torch.utils.data.Dataset):
def __init__(
self,
ann_file,
data_prefix,
clip_len=32,
frame_interval=2,
num_clips=4,
aligned=32,
fragments=7,
fsize=32,
nfrags=1,
cache_in_memory=False,
phase="test",
):
"""
Fragments.
args:
fragments: G_f as in the paper.
fsize: S_f as in the paper.
nfrags: number of samples as in the paper.
"""
self.ann_file = ann_file
self.data_prefix = data_prefix
self.clip_len = clip_len
self.frame_interval = frame_interval
self.num_clips = num_clips
self.fragments = fragments
self.fsize = fsize
self.nfrags = nfrags
self.aligned = aligned
self.sampler = SampleFrames(clip_len, frame_interval, num_clips)
self.video_infos = []
self.phase = phase
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
if isinstance(self.ann_file, list):
self.video_infos = self.ann_file
else:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.video_infos.append(dict(filename=filename, label=label))
if cache_in_memory:
self.cache = {}
for i in tqdm(range(len(self)), desc="Caching fragments"):
self.cache[i] = self.__getitem__(i, tocache=True)
else:
self.cache = None
def __getitem__(
self, index, fragments=-1, fsize=-1, tocache=False, need_original_frames=False,
):
if tocache or self.cache is None:
if fragments == -1:
fragments = self.fragments
if fsize == -1:
fsize = self.fsize
video_info = self.video_infos[index]
filename = video_info["filename"]
label = video_info["label"]
if filename.endswith(".yuv"):
video = skvideo.io.vread(
filename, 1080, 1920, inputdict={"-pix_fmt": "yuvj420p"}
)
frame_inds = self.sampler(video.shape[0], self.phase == "train")
imgs = [torch.from_numpy(video[idx]) for idx in frame_inds]
else:
vreader = VideoReader(filename)
frame_inds = self.sampler(len(vreader), self.phase == "train")
frame_dict = {idx: vreader[idx] for idx in np.unique(frame_inds)}
imgs = [frame_dict[idx] for idx in frame_inds]
img_shape = imgs[0].shape
video = torch.stack(imgs, 0)
video = video.permute(3, 0, 1, 2)
if self.nfrags == 1:
vfrag = get_spatial_fragments(
video, fragments, fragments, fsize, fsize, aligned=self.aligned
)
else:
vfrag = get_spatial_fragments(
video, fragments, fragments, fsize, fsize, aligned=self.aligned
)
for i in range(1, self.nfrags):
vfrag = torch.cat(
(
vfrag,
get_spatial_fragments(
video,
fragments,
fragments,
fsize,
fsize,
aligned=self.aligned,
),
),
1,
)
if tocache:
return (vfrag, frame_inds, label, img_shape)
else:
vfrag, frame_inds, label, img_shape = self.cache[index]
vfrag = ((vfrag.permute(1, 2, 3, 0) - self.mean) / self.std).permute(3, 0, 1, 2)
data = {
"video": vfrag.reshape(
(-1, self.nfrags * self.num_clips, self.clip_len) + vfrag.shape[2:]
).transpose(
0, 1
), # B, V, T, C, H, W
"frame_inds": frame_inds,
"gt_label": label,
"original_shape": img_shape,
}
if need_original_frames:
data["original_video"] = video.reshape(
(-1, self.nfrags * self.num_clips, self.clip_len) + video.shape[2:]
).transpose(0, 1)
return data
def __len__(self):
return len(self.video_infos)
class ResizedVideoDataset(torch.utils.data.Dataset):
def __init__(
self,
ann_file,
data_prefix,
clip_len=32,
frame_interval=2,
num_clips=4,
aligned=32,
size=224,
cache_in_memory=False,
phase="test",
):
"""
Using resizing.
"""
self.ann_file = ann_file
self.data_prefix = data_prefix
self.clip_len = clip_len
self.frame_interval = frame_interval
self.num_clips = num_clips
self.size = size
self.aligned = aligned
self.sampler = SampleFrames(clip_len, frame_interval, num_clips)
self.video_infos = []
self.phase = phase
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
if isinstance(self.ann_file, list):
self.video_infos = self.ann_file
else:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.video_infos.append(dict(filename=filename, label=label))
if cache_in_memory:
self.cache = {}
for i in tqdm(range(len(self)), desc="Caching resized videos"):
self.cache[i] = self.__getitem__(i, tocache=True)
else:
self.cache = None
def __getitem__(self, index, tocache=False, need_original_frames=False):
if tocache or self.cache is None:
video_info = self.video_infos[index]
filename = video_info["filename"]
label = video_info["label"]
vreader = VideoReader(filename)
frame_inds = self.sampler(len(vreader), self.phase == "train")
frame_dict = {idx: vreader[idx] for idx in np.unique(frame_inds)}
imgs = [frame_dict[idx] for idx in frame_inds]
img_shape = imgs[0].shape
video = torch.stack(imgs, 0)
video = video.permute(3, 0, 1, 2)
video = torch.nn.functional.interpolate(video, size=(self.size, self.size))
if tocache:
return (vfrag, frame_inds, label, img_shape)
else:
vfrag, frame_inds, label, img_shape = self.cache[index]
vfrag = ((vfrag.permute(1, 2, 3, 0) - self.mean) / self.std).permute(3, 0, 1, 2)
data = {
"video": vfrag.reshape(
(-1, self.num_clips, self.clip_len) + vfrag.shape[2:]
).transpose(
0, 1
), # B, V, T, C, H, W
"frame_inds": frame_inds,
"gt_label": label,
"original_shape": img_shape,
}
if need_original_frames:
data["original_video"] = video.reshape(
(-1, self.nfrags * self.num_clips, self.clip_len) + video.shape[2:]
).transpose(0, 1)
return data
def __len__(self):
return len(self.video_infos)
class CroppedVideoDataset(FragmentVideoDataset):
def __init__(
self,
ann_file,
data_prefix,
clip_len=32,
frame_interval=2,
num_clips=4,
aligned=32,
size=224,
ncrops=1,
cache_in_memory=False,
phase="test",
):
"""
Regard Cropping as a special case for Fragments in Grid 1*1.
"""
super().__init__(
ann_file,
data_prefix,
clip_len=clip_len,
frame_interval=frame_interval,
num_clips=num_clips,
aligned=aligned,
fragments=1,
fsize=224,
nfrags=ncrops,
cache_in_memory=cache_in_memory,
phase=phase,
)
class FragmentImageDataset(torch.utils.data.Dataset):
def __init__(
self,
ann_file,
data_prefix,
fragments=7,
fsize=32,
nfrags=1,
cache_in_memory=False,
phase="test",
):
self.ann_file = ann_file
self.data_prefix = data_prefix
self.fragments = fragments
self.fsize = fsize
self.nfrags = nfrags
self.image_infos = []
self.phase = phase
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
if isinstance(self.ann_file, list):
self.image_infos = self.ann_file
else:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.image_infos.append(dict(filename=filename, label=label))
if cache_in_memory:
self.cache = {}
for i in tqdm(range(len(self)), desc="Caching fragments"):
self.cache[i] = self.__getitem__(i, tocache=True)
else:
self.cache = None
def __getitem__(
self, index, fragments=-1, fsize=-1, tocache=False, need_original_frames=False
):
if tocache or self.cache is None:
if fragments == -1:
fragments = self.fragments
if fsize == -1:
fsize = self.fsize
image_info = self.image_infos[index]
filename = image_info["filename"]
label = image_info["label"]
try:
img = torchvision.io.read_image(filename)
except:
img = cv2.imread(filename)
img = torch.from_numpy(img[:, :, [2, 1, 0]]).permute(2, 0, 1)
img_shape = img.shape[1:]
image = img.unsqueeze(1)
if self.nfrags == 1:
ifrag = get_spatial_fragments(image, fragments, fragments, fsize, fsize)
else:
ifrag = get_spatial_fragments(image, fragments, fragments, fsize, fsize)
for i in range(1, self.nfrags):
ifrag = torch.cat(
(
ifrag,
get_spatial_fragments(
image, fragments, fragments, fsize, fsize
),
),
1,
)
if tocache:
return (ifrag, label, img_shape)
else:
ifrag, label, img_shape = self.cache[index]
if self.nfrags == 1:
ifrag = (
((ifrag.permute(1, 2, 3, 0) - self.mean) / self.std)
.squeeze(0)
.permute(2, 0, 1)
)
else:
### During testing, one image as a batch
ifrag = (
((ifrag.permute(1, 2, 3, 0) - self.mean) / self.std)
.squeeze(0)
.permute(0, 3, 1, 2)
)
data = {
"image": ifrag,
"gt_label": label,
"original_shape": img_shape,
"name": filename,
}
if need_original_frames:
data["original_image"] = image.squeeze(1)
return data
def __len__(self):
return len(self.image_infos)
class ResizedImageDataset(torch.utils.data.Dataset):
def __init__(
self, ann_file, data_prefix, size=224, cache_in_memory=False, phase="test",
):
self.ann_file = ann_file
self.data_prefix = data_prefix
self.size = size
self.image_infos = []
self.phase = phase
self.mean = torch.FloatTensor([123.675, 116.28, 103.53])
self.std = torch.FloatTensor([58.395, 57.12, 57.375])
if isinstance(self.ann_file, list):
self.image_infos = self.ann_file
else:
with open(self.ann_file, "r") as fin:
for line in fin:
line_split = line.strip().split(",")
filename, _, _, label = line_split
label = float(label)
filename = osp.join(self.data_prefix, filename)
self.image_infos.append(dict(filename=filename, label=label))
if cache_in_memory:
self.cache = {}
for i in tqdm(range(len(self)), desc="Caching fragments"):
self.cache[i] = self.__getitem__(i, tocache=True)
else:
self.cache = None
def __getitem__(
self, index, fragments=-1, fsize=-1, tocache=False, need_original_frames=False
):
if tocache or self.cache is None:
if fragments == -1:
fragments = self.fragments
if fsize == -1:
fsize = self.fsize
image_info = self.image_infos[index]
filename = image_info["filename"]
label = image_info["label"]
img = torchvision.io.read_image(filename)
img_shape = img.shape[1:]
image = img.unsqueeze(1)
if self.nfrags == 1:
ifrag = get_spatial_fragments(image, fragments, fsize)
else:
ifrag = get_spatial_fragments(image, fragments, fsize)
for i in range(1, self.nfrags):
ifrag = torch.cat(
(ifrag, get_spatial_fragments(image, fragments, fsize)), 1
)
if tocache:
return (ifrag, label, img_shape)
else:
ifrag, label, img_shape = self.cache[index]
ifrag = (
((ifrag.permute(1, 2, 3, 0) - self.mean) / self.std)
.squeeze(0)
.permute(2, 0, 1)
)
data = {
"image": ifrag,
"gt_label": label,
"original_shape": img_shape,
}
if need_original_frames:
data["original_image"] = image.squeeze(1)
return data
def __len__(self):
return len(self.image_infos)
class CroppedImageDataset(FragmentImageDataset):
def __init__(
self,
ann_file,
data_prefix,
size=224,
ncrops=1,
cache_in_memory=False,
phase="test",
):
"""
Regard Cropping as a special case for Fragments in Grid 1*1.
"""
super().__init__(
ann_file,
data_prefix,
fragments=1,
fsize=224,
nfrags=ncrops,
cache_in_memory=cache_in_memory,
phase=phase,
)
| 29,085 | 34.776138 | 149 | py |
MachineUnlearningPy | MachineUnlearningPy-master/doc/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'LensKit'
copyright = '2018 Boise State University'
author = 'Michael D. Ekstrand'
# The short X.Y version
version = '0.6.1'
# The full version, including alpha/beta/rc tags
release = '0.6.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'nbsphinx',
'recommonmark',
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'python3'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'github_user': 'lenskit',
'github_repo': 'lkpy',
'travis_button': False,
'canonical_url': 'https://lkpy.lenskit.org/',
'font_family': 'Charter, serif'
# 'font_family': '"Source Sans Pro", "Georgia Pro", Georgia, serif',
# 'font_size': '15px',
# 'head_font_family': '"Merriweather Sans", "Arial", sans-serif',
# 'code_font_size': '1em',
# 'code_font_family': '"Source Code Pro", "Consolas", "Menlo", sans-serif'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'LensKitdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'LensKit.tex', 'LensKit Documentation',
'Michael D. Ekstrand', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lenskit', 'LensKit Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LensKit', 'LensKit Documentation',
author, 'LensKit', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'hpfrec': ('https://hpfrec.readthedocs.io/en/latest/', None),
'implicit': ('https://implicit.readthedocs.io/en/latest/', None),
'scikit': ('https://scikit-learn.org/stable/', None),
'tqdm': ('https://tqdm.github.io/', None)
}
| 6,129 | 31.263158 | 79 | py |
battery-historian | battery-historian-master/scripts/historian.py | #!/usr/bin/python
"""Legacy Historian script for analyzing Android bug reports."""
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TO USE: (see also usage() below)
# adb shell dumpsys batterystats --enable full-wake-history (post-KitKat only)
# adb shell dumpsys batterystats --reset
# Optionally start powermonitor logging:
# For example, if using a Monsoon:
# if device/host clocks are not synced, run historian.py -v
# cts/tools/utils/monsoon.py --serialno 2294 --hz 1 --samples 100000 \
# -timestamp | tee monsoon.out
# ...let device run a while...
# stop monsoon.py
# adb bugreport > bugreport.txt
# ./historian.py -p monsoon.out bugreport.txt
import collections
import datetime
import fileinput
import getopt
import re
import StringIO
import subprocess
import sys
import time
POWER_DATA_FILE_TIME_OFFSET = 0 # deal with any clock mismatch.
BLAME_CATEGORY = "wake_lock_in" # category to assign power blame to.
ROWS_TO_SUMMARIZE = ["wake_lock", "running"] # -s: summarize these rows
getopt_debug = 0
getopt_bill_extra_secs = 0
getopt_power_quanta = 15 # slice powermonitor data this many seconds,
# to avoid crashing visualizer
getopt_power_data_file = False
getopt_proc_name = ""
getopt_highlight_category = ""
getopt_show_all_wakelocks = False
getopt_sort_by_power = True
getopt_summarize_pct = -1
getopt_report_filename = ""
getopt_generate_chart_only = False
getopt_disable_chart_drawing = False
def usage():
"""Print usage of the script."""
print "\nUsage: %s [OPTIONS] [FILE]\n" % sys.argv[0]
print " -a: show all wakelocks (don't abbreviate system wakelocks)"
print " -c: disable drawing of chart"
print " -d: debug mode, output debugging info for this program"
print (" -e TIME: extend billing an extra TIME seconds after each\n"
" wakelock, or until the next wakelock is seen. Useful for\n"
" accounting for modem power overhead.")
print " -h: print this message."
print (" -m: generate output that can be embedded in an existing page.\n"
" HTML header and body tags are not outputted.")
print (" -n [CATEGORY=]PROC: output another row containing only processes\n"
" whose name matches uid of PROC in CATEGORY.\n"
" If CATEGORY is not specified, search in wake_lock_in.")
print (" -p FILE: analyze FILE containing power data. Format per\n"
" line: <timestamp in epoch seconds> <amps>")
print (" -q TIME: quantize data on power row in buckets of TIME\n"
" seconds (default %d)" % getopt_power_quanta)
print " -r NAME: report input file name as NAME in HTML."
print (" -s PCT: summarize certain useful rows with additional rows\n"
" showing percent time spent over PCT% in each.")
print " -t: sort power report by wakelock duration instead of charge"
print " -v: synchronize device time before collecting power data"
print "\n"
sys.exit(1)
def parse_time(s, fmt):
"""Parses a human readable duration string into milliseconds.
Takes a human readable duration string like '1d2h3m4s5ms' and returns
the equivalent in milliseconds.
Args:
s: Duration string
fmt: A re object to parse the string
Returns:
A number indicating the duration in milliseconds.
"""
if s == "0": return 0.0
p = re.compile(fmt)
match = p.search(s)
try:
d = match.groupdict()
except IndexError:
return -1.0
ret = 0.0
if d["day"]: ret += float(d["day"])*60*60*24
if d["hrs"]: ret += float(d["hrs"])*60*60
if d["min"]: ret += float(d["min"])*60
if d["sec"]: ret += float(d["sec"])
if d["ms"]: ret += float(d["ms"])/1000
return ret
def time_float_to_human(t, show_complete_time):
if show_complete_time:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
else:
return time.strftime("%H:%M:%S", time.localtime(t))
def abbrev_timestr(s):
"""Chop milliseconds off of a time string, if present."""
arr = s.split("s")
if len(arr) < 3: return "0s"
return arr[0]+"s"
def timestr_to_jsdate(timestr):
return "new Date(%s * 1000)" % timestr
def format_time(delta_time):
"""Return a time string representing time past since initial event."""
if not delta_time:
return str(0)
timestr = "+"
datet = datetime.datetime.utcfromtimestamp(delta_time)
if delta_time > 24 * 60 * 60:
timestr += str(datet.day - 1) + datet.strftime("d%Hh%Mm%Ss")
elif delta_time > 60 * 60:
timestr += datet.strftime("%Hh%Mm%Ss").lstrip("0")
elif delta_time > 60:
timestr += datet.strftime("%Mm%Ss").lstrip("0")
elif delta_time > 1:
timestr += datet.strftime("%Ss").lstrip("0")
ms = datet.microsecond / 1000.0
timestr += "%03dms" % ms
return timestr
def format_duration(dur_ms):
"""Return a time string representing the duration in human readable format."""
if not dur_ms:
return "0ms"
ms = dur_ms % 1000
dur_ms = (dur_ms - ms) / 1000
secs = dur_ms % 60
dur_ms = (dur_ms - secs) / 60
mins = dur_ms % 60
hrs = (dur_ms - mins) / 60
out = ""
if hrs > 0:
out += "%dh" % hrs
if mins > 0:
out += "%dm" % mins
if secs > 0:
out += "%ds" % secs
if ms > 0 or not out:
out += "%dms" % ms
return out
def get_event_category(e):
e = e.lstrip("+-")
earr = e.split("=")
return earr[0]
def get_quoted_region(e):
e = e.split("\"")[1]
return e
def get_after_equal(e):
e = e.split("=")[1]
return e
def get_wifi_suppl_state(e):
try:
e = get_after_equal(e)
return e.split("(")[0]
except IndexError:
return ""
def get_event_subcat(cat, e):
"""Get subcategory of an category from an event string.
Subcategory can be use to distinguish simultaneous entities
within one category. To track possible concurrent instances,
add category name to concurrent_cat. Default is to track
events using only category name.
Args:
cat: Category name
e: Event name
Returns:
A string that is the subcategory of the event. Returns
the substring after category name if not empty and cat
is one of the categories tracked by concurrent_cat.
Default subcategory is the empty string.
"""
concurrent_cat = {"wake_lock_in", "sync", "top", "job", "conn"}
if cat in concurrent_cat:
try:
return get_after_equal(e)
except IndexError:
pass
return ""
def get_proc_pair(e):
if ":" in e:
proc_pair = get_after_equal(e)
return proc_pair.split(":", 1)
else:
return ("", "")
def as_to_mah(a):
return a * 1000 / 60 / 60
def apply_fn_over_range(fn, start_time, end_time, arglist):
"""Apply a given function per second quanta over a time range.
Args:
fn: The function to apply
start_time: The starting time of the whole duration
end_time: The ending time of the whole duration
arglist: Additional argument list
Returns:
A list of results generated by applying the function
over the time range.
"""
results = []
cursor = start_time
while cursor < end_time:
cursor_int = int(cursor)
next_cursor = float(cursor_int + 1)
if next_cursor > end_time: next_cursor = end_time
time_this_quanta = next_cursor - cursor
results.append(fn(cursor_int, time_this_quanta, *arglist))
cursor = next_cursor
return results
def space_escape(match):
value = match.group()
p = re.compile(r"\s+")
return p.sub("_", value)
def parse_reset_time(line):
line = line.strip()
line = line.split("RESET:TIME: ", 1)[1]
st = time.strptime(line, "%Y-%m-%d-%H-%M-%S")
return time.mktime(st)
def is_file_legacy_mode(input_file):
"""Autodetect legacy (K and earlier) format."""
detection_on = False
for line in fileinput.input(input_file):
if not detection_on and line.startswith("Battery History"):
detection_on = True
if not detection_on:
continue
split_line = line.split()
if not split_line:
continue
line_time = split_line[0]
if "+" not in line_time and "-" not in line_time:
continue
fileinput.close()
return line_time[0] == "-"
return False
def is_emit_event(e):
return e[0] != "+"
def is_standalone_event(e):
return not (e[0] == "+" or e[0] == "-")
def is_proc_event(e):
return e.startswith("+proc")
def autovivify():
"""Returns a multidimensional dict."""
return collections.defaultdict(autovivify)
def swap(swap_list, first, second):
swap_list[first], swap_list[second] = swap_list[second], swap_list[first]
def add_emit_event(emit_dict, cat, name, start, end):
"""Saves a new event into the dictionary that will be visualized."""
newevent = (name, int(start), int(end))
if end < start:
print "BUG: end time before start time: %s %s %s<br>" % (name,
start,
end)
else:
if getopt_debug:
print "Stored emitted event: %s<br>" % str(newevent)
if cat in emit_dict:
emit_dict[cat].append(newevent)
else:
emit_dict[cat] = [newevent]
def sync_time():
subprocess.call(["adb", "root"])
subprocess.call(["sleep", "3"])
start_time = int(time.time())
while int(time.time()) == start_time:
pass
curr_time = time.strftime("%Y%m%d.%H%M%S", time.localtime())
subprocess.call(["adb", "shell", "date", "-s", curr_time])
sys.exit(0)
def parse_search_option(cmd):
global getopt_proc_name, getopt_highlight_category
if "=" in cmd:
getopt_highlight_category = cmd.split("=")[0]
getopt_proc_name = cmd.split("=")[1]
else:
getopt_highlight_category = "wake_lock_in"
getopt_proc_name = cmd
def parse_argv():
"""Parse argument and set up globals."""
global getopt_debug, getopt_bill_extra_secs, getopt_power_quanta
global getopt_sort_by_power, getopt_power_data_file
global getopt_summarize_pct, getopt_show_all_wakelocks
global getopt_report_filename
global getopt_generate_chart_only
global getopt_disable_chart_drawing
try:
opts, argv_rest = getopt.getopt(sys.argv[1:],
"acde:hmn:p:q:r:s:tv", ["help"])
except getopt.GetoptError as err:
print "<pre>\n"
print str(err)
usage()
try:
for o, a in opts:
if o == "-a": getopt_show_all_wakelocks = True
if o == "-c": getopt_disable_chart_drawing = True
if o == "-d": getopt_debug = True
if o == "-e": getopt_bill_extra_secs = int(a)
if o in ("-h", "--help"): usage()
if o == "-m": getopt_generate_chart_only = True
if o == "-n": parse_search_option(a)
if o == "-p": getopt_power_data_file = a
if o == "-q": getopt_power_quanta = int(a)
if o == "-r": getopt_report_filename = str(a)
if o == "-s": getopt_summarize_pct = int(a)
if o == "-t": getopt_sort_by_power = False
if o == "-v": sync_time()
except ValueError as err:
print str(err)
usage()
if not argv_rest:
usage()
return argv_rest
class Printer(object):
"""Organize and render the visualizer."""
_default_color = "#4070cf"
# -n option is represented by "highlight". All the other names specified
# in _print_setting are the same as category names.
_print_setting = [
("battery_level", "#4070cf"),
("plugged", "#2e8b57"),
("screen", "#cbb69d"),
("top", "#dc3912"),
("sync", "#9900aa"),
("wake_lock_pct", "#6fae11"),
("wake_lock", "#cbb69d"),
("highlight", "#4070cf"),
("running_pct", "#6fae11"),
("running", "#990099"),
("wake_reason", "#b82e2e"),
("wake_lock_in", "#ff33cc"),
("job", "#cbb69d"),
("mobile_radio", "#aa0000"),
("data_conn", "#4070cf"),
("conn", "#ff6a19"),
("activepower", "#dd4477"),
("device_idle", "#37ff64"),
("motion", "#4070cf"),
("active", "#119fc8"),
("power_save", "#ff2222"),
("wifi", "#119fc8"),
("wifi_full_lock", "#888888"),
("wifi_scan", "#888888"),
("wifi_multicast", "#888888"),
("wifi_radio", "#888888"),
("wifi_running", "#109618"),
("wifi_suppl", "#119fc8"),
("wifi_signal_strength", "#9900aa"),
("phone_signal_strength", "#dc3912"),
("phone_scanning", "#dda0dd"),
("audio", "#990099"),
("phone_in_call", "#cbb69d"),
("bluetooth", "#cbb69d"),
("phone_state", "#dc3912"),
("signal_strength", "#119fc8"),
("video", "#cbb69d"),
("flashlight", "#cbb69d"),
("low_power", "#109618"),
("fg", "#dda0dd"),
("gps", "#ff9900"),
("reboot", "#ddff77"),
("power", "#ff2222"),
("status", "#9ac658"),
("health", "#888888"),
("plug", "#888888"),
("charging", "#888888"),
("pkginst", "#cbb69d"),
("pkgunin", "#cbb69d")]
_ignore_categories = ["user", "userfg"]
def __init__(self):
self._print_setting_cats = set()
for cat in self._print_setting:
self._print_setting_cats.add(cat[0])
def combine_wifi_states(self, event_list, start_time):
"""Discard intermediate states and combine events chronologically."""
tracking_states = ["disconn", "completed", "disabled", "scanning"]
selected_event_list = []
for event in event_list:
state = get_wifi_suppl_state(event[0])
if state in tracking_states:
selected_event_list.append(event)
if len(selected_event_list) <= 1:
return set(selected_event_list)
event_name = "wifi_suppl="
for e in selected_event_list:
state = get_wifi_suppl_state(e[0])
event_name += (state + "->")
event_name = event_name[:-2]
sample_event = selected_event_list[0][0]
timestr_start = sample_event.find("(")
event_name += sample_event[timestr_start:]
return set([(event_name, start_time, start_time)])
def aggregate_events(self, emit_dict):
"""Combine events with the same name occurring during the same second.
Aggregate events to keep visualization from being so noisy.
Args:
emit_dict: A dict containing events.
Returns:
A dict with repeated events happening within one sec removed.
"""
output_dict = {}
for cat, events in emit_dict.iteritems():
output_dict[cat] = []
start_dict = {}
for event in events:
start_time = event[1]
if start_time in start_dict:
start_dict[start_time].append(event)
else:
start_dict[start_time] = [event]
for start_time, event_list in start_dict.iteritems():
if cat == "wifi_suppl":
event_set = self.combine_wifi_states(event_list, start_time)
else:
event_set = set(event_list) # uniqify
for event in event_set:
output_dict[cat].append(event)
return output_dict
def print_emit_dict(self, cat, emit_dict):
for e in emit_dict[cat]:
if cat == "wake_lock":
cat_name = "wake_lock *"
else:
cat_name = cat
print "['%s', '%s', %s, %s]," % (cat_name, e[0],
timestr_to_jsdate(e[1]),
timestr_to_jsdate(e[2]))
def print_highlight_dict(self, highlight_dict):
catname = getopt_proc_name + " " + getopt_highlight_category
if getopt_highlight_category in highlight_dict:
for e in highlight_dict[getopt_highlight_category]:
print "['%s', '%s', %s, %s]," % (catname, e[0],
timestr_to_jsdate(e[1]),
timestr_to_jsdate(e[2]))
def print_events(self, emit_dict, highlight_dict):
"""print category data in the order of _print_setting.
Args:
emit_dict: Major event dict.
highlight_dict: Additional event information for -n option.
"""
emit_dict = self.aggregate_events(emit_dict)
highlight_dict = self.aggregate_events(highlight_dict)
cat_count = 0
for i in range(0, len(self._print_setting)):
cat = self._print_setting[i][0]
if cat in emit_dict:
self.print_emit_dict(cat, emit_dict)
cat_count += 1
if cat == "highlight":
self.print_highlight_dict(highlight_dict)
# handle category that is not included in _print_setting
if cat_count < len(emit_dict):
for cat in emit_dict:
if (cat not in self._print_setting_cats and
cat not in self._ignore_categories):
sys.stderr.write("event category not found: %s\n" % cat)
self.print_emit_dict(cat, emit_dict)
def print_chart_options(self, emit_dict, highlight_dict, width, height):
"""Print Options provided to the visualizater."""
color_string = ""
cat_count = 0
# construct color string following the order of _print_setting
for i in range(0, len(self._print_setting)):
cat = self._print_setting[i][0]
if cat in emit_dict:
color_string += "'%s', " % self._print_setting[i][1]
cat_count += 1
if cat == "highlight" and highlight_dict:
color_string += "'%s', " % self._print_setting[i][1]
cat_count += 1
if cat_count % 4 == 0:
color_string += "\n\t"
# handle category that is not included in _print_setting
if cat_count < len(emit_dict):
for cat in emit_dict:
if cat not in self._print_setting_cats:
color_string += "'%s', " % self._default_color
print("\toptions = {\n"
"\ttimeline: { colorByRowLabel: true},\n"
"\t'width': %s,\n"
"\t'height': %s, \n"
"\tcolors: [%s]\n"
"\t};" % (width, height, color_string))
class LegacyFormatConverter(object):
"""Convert Kit-Kat bugreport format to latest format support."""
_TIME_FORMAT = (r"\-((?P<day>\d+)d)?((?P<hrs>\d+)h)?((?P<min>\d+)m)?"
r"((?P<sec>\d+)s)?((?P<ms>\d+)ms)?$")
def __init__(self):
self._end_time = 0
self._total_duration = 0
def parse_end_time(self, line):
line = line.strip()
try:
line = line.split("dumpstate: ", 1)[1]
st = time.strptime(line, "%Y-%m-%d %H:%M:%S")
self._end_time = time.mktime(st)
except IndexError:
pass
def get_timestr(self, line_time):
"""Convert backward time string in Kit-Kat to forward time string."""
delta = self._total_duration - parse_time(line_time, self._TIME_FORMAT)
datet = datetime.datetime.utcfromtimestamp(delta)
if delta == 0:
return "0"
timestr = "+"
if delta > 24 * 60 * 60:
timestr += str(datet.day - 1) + datet.strftime("d%Hh%Mm%Ss")
elif delta > 60 * 60:
timestr += datet.strftime("%Hh%Mm%Ss").lstrip("0")
elif delta > 60:
timestr += datet.strftime("%Mm%Ss").lstrip("0")
elif delta > 1:
timestr += datet.strftime("%Ss").lstrip("0")
ms = datet.microsecond / 1000.0
timestr += "%03dms" % ms
return timestr
def get_header(self, line_time):
self._total_duration = parse_time(line_time, self._TIME_FORMAT)
start_time = self._end_time - self._total_duration
header = "Battery History\n"
header += "RESET:TIME: %s\n" % time.strftime("%Y-%m-%d-%H-%M-%S",
time.localtime(start_time))
return header
def convert(self, input_file):
"""Convert legacy format file into string that fits latest format."""
output_string = ""
history_start = False
for line in fileinput.input(input_file):
if "dumpstate:" in line:
self.parse_end_time(line)
if self._end_time:
break
fileinput.close()
if not self._end_time:
print "cannot find end time"
sys.exit(1)
for line in fileinput.input(input_file):
if not history_start and line.startswith("Battery History"):
history_start = True
continue
elif not history_start:
continue
if line.isspace(): break
line = line.strip()
arr = line.split()
if len(arr) < 4: continue
p = re.compile('"[^"]+"')
line = p.sub(space_escape, line)
split_line = line.split()
(line_time, line_battery_level, line_state) = split_line[:3]
line_events = split_line[3:]
if not self._total_duration:
output_string += self.get_header(line_time)
timestr = self.get_timestr(line_time)
event_string = " ".join(line_events)
newline = "%s _ %s %s %s\n" % (timestr, line_battery_level,
line_state, event_string)
output_string += newline
fileinput.close()
return output_string
class BHEmitter(object):
"""Process battery history section from bugreport.txt."""
_omit_cats = ["temp", "volt", "brightness", "sensor", "proc"]
# categories that have "+" and "-" events. If we see an event in these
# categories starting at time 0 without +/- sign, treat it as a "+" event.
_transitional_cats = ["plugged", "running", "wake_lock", "gps", "sensor",
"phone_in_call", "mobile_radio", "phone_scanning",
"proc", "fg", "top", "sync", "wifi", "wifi_full_lock",
"wifi_scan", "wifi_multicast", "wifi_running", "conn",
"bluetooth", "audio", "video", "wake_lock_in", "job",
"device_idle", "wifi_radio"]
_in_progress_dict = autovivify() # events that are currently in progress
_proc_dict = {} # mapping of "proc" uid to human-readable name
_search_proc_id = -1 # proc id of the getopt_proc_name
match_list = [] # list of package names that match search string
cat_list = [] # BLAME_CATEGORY summary data
def store_event(self, cat, subcat, event_str, event_time, timestr):
self._in_progress_dict[cat][subcat] = (event_str, event_time, timestr)
if getopt_debug:
print "store_event: %s in %s/%s<br>" % (event_str, cat, subcat)
def retrieve_event(self, cat, subcat):
"""Pop event from in-progress event dict if match exists."""
if cat in self._in_progress_dict:
try:
result = self._in_progress_dict[cat].pop(subcat)
if getopt_debug:
print "retrieve_event: found %s/%s<br>" % (cat, subcat)
return (True, result)
except KeyError:
pass
if getopt_debug:
print "retrieve_event: no match for event %s/%s<br>" % (cat, subcat)
return (False, (None, None, None))
def store_proc(self, e, highlight_dict):
proc_pair = get_after_equal(e)
(proc_id, proc_name) = proc_pair.split(":", 1)
self._proc_dict[proc_id] = proc_name # may overwrite
if getopt_proc_name and getopt_proc_name in proc_name and proc_id:
if proc_pair not in self.match_list:
self.match_list.append(proc_pair)
if self._search_proc_id == -1:
self._search_proc_id = proc_id
elif self._search_proc_id != proc_id:
if (proc_name[1:-1] == getopt_proc_name or
proc_name == getopt_proc_name):
# reinitialize
highlight_dict.clear()
# replace default match with complete match
self._search_proc_id = proc_id
swap(self.match_list, 0, -1)
def procs_to_str(self):
l = sorted(self._proc_dict.items(), key=lambda x: x[0])
result = ""
for i in l:
result += "%s: %s\n" % (i[0], i[1])
return result
def get_proc_name(self, proc_id):
if proc_id in self._proc_dict:
return self._proc_dict[proc_id]
else:
return ""
def annotate_event_name(self, name):
"""Modifies the event name to make it more understandable."""
if "*alarm*" in name:
try:
proc_pair = get_after_equal(name)
except IndexError:
return name
proc_id = proc_pair.split(":", 1)[0]
name = name + ":" + self.get_proc_name(proc_id)
if getopt_debug:
print "annotate_event_name: %s" % name
return name
def abbreviate_event_name(self, name):
"""Abbreviate location-related event name."""
if not getopt_show_all_wakelocks:
if "wake_lock" in name:
if "LocationManagerService" in name or "NlpWakeLock" in name:
return "LOCATION"
if "UlrDispatching" in name:
return "LOCATION"
if "GCoreFlp" in name or "GeofencerStateMachine" in name:
return "LOCATION"
if "NlpCollectorWakeLock" in name or "WAKEUP_LOCATOR" in name:
return "LOCATION"
if "GCM" in name or "C2DM" in name:
return "GCM"
return name
def process_wakelock_event_name(self, start_name, start_id, end_name, end_id):
start_name = self.process_event_name(start_name)
end_name = self.process_event_name(end_name)
event_name = "first=%s:%s, last=%s:%s" % (start_id, start_name,
end_id, end_name)
return event_name
def process_event_timestr(self, start_timestr, end_timestr):
return "(%s-%s)" % (abbrev_timestr(start_timestr),
abbrev_timestr(end_timestr))
def process_event_name(self, event_name):
event_name = self.annotate_event_name(event_name)
event_name = self.abbreviate_event_name(event_name)
return event_name.replace("'", r"\'")
def track_event_parallelism_fn(self, start_time, time_this_quanta, time_dict):
if start_time in time_dict:
time_dict[start_time] += time_this_quanta
else:
time_dict[start_time] = time_this_quanta
if getopt_debug:
print "time_dict[%d] now %f added %f" % (start_time,
time_dict[start_time],
time_this_quanta)
# track total amount of event time held per second quanta
def track_event_parallelism(self, start_time, end_time, time_dict):
apply_fn_over_range(self.track_event_parallelism_fn,
start_time, end_time, [time_dict])
def emit_event(self, cat, event_name, start_time, start_timestr,
end_event_name, end_time, end_timestr,
emit_dict, time_dict, highlight_dict):
"""Saves an event to be later visualized."""
(start_pid, start_pname) = get_proc_pair(event_name)
(end_pid, end_pname) = get_proc_pair(end_event_name)
if cat == "wake_lock" and end_pname and end_pname != start_pname:
short_event_name = self.process_wakelock_event_name(
start_pname, start_pid, end_pname, end_pid)
else:
short_event_name = self.process_event_name(event_name)
event_name = short_event_name + self.process_event_timestr(start_timestr,
end_timestr)
if getopt_highlight_category == cat:
if start_pid == self._search_proc_id or end_pid == self._search_proc_id:
add_emit_event(highlight_dict, cat,
event_name, start_time, end_time)
if cat == BLAME_CATEGORY:
self.cat_list.append((short_event_name, start_time, end_time))
end_time += getopt_bill_extra_secs
self.track_event_parallelism(start_time, end_time, time_dict)
if end_time - start_time < 1:
# HACK: visualizer library doesn't always render sub-second events
end_time += 1
add_emit_event(emit_dict, cat, event_name, start_time, end_time)
def handle_event(self, event_time, time_str, event_str,
emit_dict, time_dict, highlight_dict):
"""Handle an individual event.
Args:
event_time: Event time
time_str: Event time as string
event_str: Event string
emit_dict: A dict tracking events to draw in the timeline, by row
time_dict: A dict tracking BLAME_CATEGORY duration, by seconds
highlight_dict: A separate event dict for -n option
"""
if getopt_debug:
print "<p>handle_event: %s at %s<br>" % (event_str, time_str)
cat = get_event_category(event_str)
subcat = get_event_subcat(cat, event_str)
# events already in progress are treated as starting at time 0
if (time_str == "0" and is_standalone_event(event_str)
and cat in self._transitional_cats):
event_str = "+" + event_str
if is_proc_event(event_str): self.store_proc(event_str, highlight_dict)
if cat in self._omit_cats: return
if not is_emit_event(event_str):
# "+" event, save it until we find a matching "-"
self.store_event(cat, subcat, event_str, event_time, time_str)
return
else:
# "-" or standalone event such as "wake_reason"
start_time = 0.0
(found, event) = self.retrieve_event(cat, subcat)
if found:
(event_name, start_time, start_timestr) = event
else:
event_name = event_str
start_time = event_time
start_timestr = time_str
# Events that were still going on at the time of reboot
# should be marked as ending at the time of reboot.
if event_str == "reboot":
self.emit_remaining_events(event_time, time_str, emit_dict,
time_dict, highlight_dict)
self.emit_event(cat, event_name, start_time, start_timestr,
event_str, event_time, time_str,
emit_dict, time_dict, highlight_dict)
def generate_summary_row(self, row_to_summarize, emit_dict, start_time,
end_time):
"""Generate additional data row showing % time covered by another row."""
summarize_quanta = 60
row_name = row_to_summarize + "_pct"
if row_to_summarize not in emit_dict: return
summarize_list = emit_dict[row_to_summarize]
seconds_dict = {}
# Generate dict of seconds where the row to summarize is seen.
for i in summarize_list:
self.track_event_parallelism(i[1], i[2], seconds_dict)
# Traverse entire range of time we care about and generate % events.
for summary_start_time in range(int(start_time), int(end_time),
summarize_quanta):
summary_end_time = summary_start_time + summarize_quanta
found_ctr = 0
for second_cursor in range(summary_start_time, summary_end_time):
if second_cursor in seconds_dict:
found_ctr += 1
if found_ctr:
pct = int(found_ctr * 100 / summarize_quanta)
if pct > getopt_summarize_pct:
add_emit_event(emit_dict, row_name, "%s=%d" % (row_name, pct),
summary_start_time, summary_end_time)
def generate_summary_rows(self, emit_dict, start_time, end_time):
if getopt_summarize_pct < 0:
return
for i in ROWS_TO_SUMMARIZE:
self.generate_summary_row(i, emit_dict, start_time, end_time)
def emit_remaining_events(self, end_time, end_timestr, emit_dict, time_dict,
highlight_dict):
for cat in self._in_progress_dict:
for subcat in self._in_progress_dict[cat]:
(event_name, s_time, s_timestr) = self._in_progress_dict[cat][subcat]
self.emit_event(cat, event_name, s_time, s_timestr,
event_name, end_time, end_timestr,
emit_dict, time_dict, highlight_dict)
class BlameSynopsis(object):
"""Summary data of BLAME_CATEGORY instance used for power accounting."""
def __init__(self):
self.name = ""
self.mah = 0
self.timestr = ""
self._duration_list = []
def add(self, name, duration, mah, t):
self.name = name
self._duration_list.append(duration)
self.mah += mah
if not self.timestr:
self.timestr = time_float_to_human(t, False)
def get_count(self):
return len(self._duration_list)
def get_median_duration(self):
return sorted(self._duration_list)[int(self.get_count() / 2)]
def get_total_duration(self):
return sum(self._duration_list)
def to_str(self, total_mah, show_power):
"""Returns a summary string."""
if total_mah:
pct = self.mah * 100 / total_mah
else:
pct = 0
avg = self.get_total_duration() / self.get_count()
ret = ""
if show_power:
ret += "%.3f mAh (%.1f%%), " % (self.mah, pct)
ret += "%3s events, " % str(self.get_count())
ret += "%6.3fs total " % self.get_total_duration()
ret += "%6.3fs avg " % avg
ret += "%6.3fs median: " % self.get_median_duration()
ret += self.name
ret += " (first at %s)" % self.timestr
return ret
class PowerEmitter(object):
"""Give power accounting and bill to wake lock."""
_total_amps = 0
_total_top_amps = 0
_line_ctr = 0
_TOP_THRESH = .01
_quanta_amps = 0
_start_secs = 0
_power_dict = {}
_synopsis_dict = {}
def __init__(self, cat_list):
self._cat_list = cat_list
def get_range_power_fn(self, start_time, time_this_quanta, time_dict):
"""Assign proportional share of blame.
During any second, this event might have been held for
less than the second, and others might have been held during
that time. Here we try to assign the proportional share of the
blame.
Args:
start_time: Starting time of this quanta
time_this_quanta: Duration of this quanta
time_dict: A dict tracking total time at different starting time
Returns:
A proportional share of blame for the quanta.
"""
if start_time in self._power_dict:
total_time_held = time_dict[start_time]
multiplier = time_this_quanta / total_time_held
result = self._power_dict[start_time] * multiplier
if getopt_debug:
print("get_range_power: distance %f total time %f "
"base power %f, multiplier %f<br>" %
(time_this_quanta, total_time_held,
self._power_dict[start_time], multiplier))
assert multiplier <= 1.0
else:
if getopt_debug:
print "get_range_power: no power data available"
result = 0.0
return result
def get_range_power(self, start, end, time_dict):
power_results = apply_fn_over_range(self.get_range_power_fn,
start, end, [time_dict])
result = 0.0
for i in power_results:
result += i
return result
def bill(self, time_dict):
for _, e in enumerate(self._cat_list):
(event_name, start_time, end_time) = e
if event_name in self._synopsis_dict:
sd = self._synopsis_dict[event_name]
else:
sd = BlameSynopsis()
amps = self.get_range_power(start_time,
end_time + getopt_bill_extra_secs,
time_dict)
mah = as_to_mah(amps)
sd.add(event_name, end_time - start_time, mah, start_time)
if getopt_debug:
print "billed range %f %f at %fAs to %s<br>" % (start_time, end_time,
amps, event_name)
self._synopsis_dict[event_name] = sd
def handle_line(self, secs, amps, emit_dict):
"""Handle a power data file line."""
self._line_ctr += 1
if not self._start_secs:
self._start_secs = secs
self._quanta_amps += amps
self._total_amps += amps
self._power_dict[secs] = amps
if secs % getopt_power_quanta:
return
avg = self._quanta_amps / getopt_power_quanta
event_name = "%.3f As (%.3f A avg)" % (self._quanta_amps, avg)
add_emit_event(emit_dict, "power", event_name, self._start_secs, secs)
if self._quanta_amps > self._TOP_THRESH * getopt_power_quanta:
self._total_top_amps += self._quanta_amps
add_emit_event(emit_dict, "activepower", event_name,
self._start_secs, secs)
self._quanta_amps = 0
self._start_secs = secs
def report(self):
"""Report bill of BLAME_CATEGORY."""
mah = as_to_mah(self._total_amps)
report_power = self._line_ctr
if report_power:
avg_ma = self._total_amps/self._line_ctr
print "<p>Total power: %.3f mAh, avg %.3f" % (mah, avg_ma)
top_mah = as_to_mah(self._total_top_amps)
print ("<br>Total power above awake "
"threshold (%.1fmA): %.3f mAh %.3f As" % (self._TOP_THRESH * 1000,
top_mah,
self._total_top_amps))
print "<br>%d samples, %d min<p>" % (self._line_ctr, self._line_ctr / 60)
if report_power and getopt_bill_extra_secs:
print("<b>Power seen during each history event, including %d "
"seconds after each event:" % getopt_bill_extra_secs)
elif report_power:
print "<b>Power seen during each history event:"
else:
print "<b>Event summary:"
print "</b><br><pre>"
report_list = []
total_mah = 0.0
total_count = 0
for _, v in self._synopsis_dict.iteritems():
total_mah += v.mah
total_count += v.get_count()
if getopt_sort_by_power and report_power:
sort_term = v.mah
else:
sort_term = v.get_total_duration()
report_list.append((sort_term, v.to_str(mah, report_power)))
report_list.sort(key=lambda tup: tup[0], reverse=True)
for i in report_list:
print i[1]
print "total: %.3f mAh, %d events" % (total_mah, total_count)
print "</pre>\n"
def adjust_reboot_time(line, event_time):
# Line delta time is not reset after reboot, but wall time will
# be printed after reboot finishes. This function returns how much
# we are off and actual reboot event time.
line = line.strip()
line = line.split("TIME: ", 1)[1]
st = time.strptime(line, "%Y-%m-%d-%H-%M-%S")
wall_time = time.mktime(st)
return wall_time - event_time, wall_time
def get_app_id(uid):
"""Returns the app ID from a string.
Reverses and uses the methods defined in UserHandle.java to get
only the app ID.
Args:
uid: a string representing the uid printed in the history output
Returns:
An integer representing the specific app ID.
"""
abr_uid_re = re.compile(r"u(?P<userId>\d+)(?P<aidType>[ias])(?P<appId>\d+)")
if not uid:
return 0
if uid.isdigit():
# 100000 is the range of uids allocated for a user.
return int(uid) % 100000
if abr_uid_re.match(uid):
match = abr_uid_re.search(uid)
try:
d = match.groupdict()
if d["aidType"] == "i": # first isolated uid
return int(d["appId"]) + 99000
if d["aidType"] == "a": # first application uid
return int(d["appId"]) + 10000
return int(d["appId"]) # app id wasn't modified
except IndexError:
sys.stderr.write("Abbreviated app UID didn't match properly")
return uid
usr_time = "usrTime"
sys_time = "sysTime"
# A map of app uid to their total CPU usage in terms of user
# and system time (in ms).
app_cpu_usage = {}
def save_app_cpu_usage(uid, usr_cpu_time, sys_cpu_time):
uid = get_app_id(uid)
if uid in app_cpu_usage:
app_cpu_usage[uid][usr_time] += usr_cpu_time
app_cpu_usage[uid][sys_time] += sys_cpu_time
else:
app_cpu_usage[uid] = {usr_time: usr_cpu_time, sys_time: sys_cpu_time}
# Constants defined in android.net.ConnectivityManager
conn_constants = {
"0": "TYPE_MOBILE",
"1": "TYPE_WIFI",
"2": "TYPE_MOBILE_MMS",
"3": "TYPE_MOBILE_SUPL",
"4": "TYPE_MOBILE_DUN",
"5": "TYPE_MOBILE_HIPRI",
"6": "TYPE_WIMAX",
"7": "TYPE_BLUETOOTH",
"8": "TYPE_DUMMY",
"9": "TYPE_ETHERNET",
"17": "TYPE_VPN",
}
def main():
details_re = re.compile(r"^Details:\scpu=\d+u\+\d+s\s*(\((?P<appCpu>.*)\))?")
app_cpu_usage_re = re.compile(
r"(?P<uid>\S+)=(?P<userTime>\d+)u\+(?P<sysTime>\d+)s")
proc_stat_re = re.compile((r"^/proc/stat=(?P<usrTime>-?\d+)\s+usr,\s+"
r"(?P<sysTime>-?\d+)\s+sys,\s+"
r"(?P<ioTime>-?\d+)\s+io,\s+"
r"(?P<irqTime>-?\d+)\s+irq,\s+"
r"(?P<sirqTime>-?\d+)\s+sirq,\s+"
r"(?P<idleTime>-?\d+)\s+idle.*")
)
data_start_time = 0.0
data_stop_time = 0
data_stop_timestr = ""
on_mode = False
time_offset = 0.0
overflowed = False
reboot = False
prev_battery_level = -1
bhemitter = BHEmitter()
emit_dict = {} # maps event categories to events
time_dict = {} # total event time held per second
highlight_dict = {} # search result for -n option
is_first_data_line = True
is_dumpsys_format = False
argv_remainder = parse_argv()
input_file = argv_remainder[0]
legacy_mode = is_file_legacy_mode(input_file)
# A map of /proc/stat names to total times (in ms).
proc_stat_summary = {
"usr": 0,
"sys": 0,
"io": 0,
"irq": 0,
"sirq": 0,
"idle": 0,
}
if legacy_mode:
input_string = LegacyFormatConverter().convert(input_file)
input_file = StringIO.StringIO(input_string)
else:
input_file = open(input_file, "r")
while True:
line = input_file.readline()
if not line: break
if not on_mode and line.startswith("Battery History"):
on_mode = True
continue
elif not on_mode:
continue
if line.isspace(): break
line = line.strip()
if "RESET:TIME: " in line:
data_start_time = parse_reset_time(line)
continue
if "OVERFLOW" in line:
overflowed = True
break
if "START" in line:
reboot = True
continue
if "TIME: " in line:
continue
# escape spaces within quoted regions
p = re.compile('"[^"]+"')
line = p.sub(space_escape, line)
if details_re.match(line):
match = details_re.search(line)
try:
d = match.groupdict()
if d["appCpu"]:
for app in d["appCpu"].split(", "):
app_match = app_cpu_usage_re.search(app)
try:
a = app_match.groupdict()
save_app_cpu_usage(a["uid"],
int(a["userTime"]), int(a["sysTime"]))
except IndexError:
sys.stderr.write("App CPU usage line didn't match properly")
except IndexError:
sys.stderr.write("Details line didn't match properly")
continue
elif proc_stat_re.match(line):
match = proc_stat_re.search(line)
try:
d = match.groupdict()
if d["usrTime"]:
proc_stat_summary["usr"] += int(d["usrTime"])
if d["sysTime"]:
proc_stat_summary["sys"] += int(d["sysTime"])
if d["ioTime"]:
proc_stat_summary["io"] += int(d["ioTime"])
if d["irqTime"]:
proc_stat_summary["irq"] += int(d["irqTime"])
if d["sirqTime"]:
proc_stat_summary["sirq"] += int(d["sirqTime"])
if d["idleTime"]:
proc_stat_summary["idle"] += int(d["idleTime"])
except IndexError:
sys.stderr.write("proc/stat line didn't match properly")
continue
# pull apart input line by spaces
split_line = line.split()
if len(split_line) < 4: continue
(line_time, _, line_battery_level, fourth_field) = split_line[:4]
# "bugreport" output has an extra hex field vs "dumpsys", detect here.
if is_first_data_line:
is_first_data_line = False
try:
int(fourth_field, 16)
except ValueError:
is_dumpsys_format = True
if is_dumpsys_format:
line_events = split_line[3:]
else:
line_events = split_line[4:]
fmt = (r"\+((?P<day>\d+)d)?((?P<hrs>\d+)h)?((?P<min>\d+)m)?"
r"((?P<sec>\d+)s)?((?P<ms>\d+)ms)?$")
time_delta_s = parse_time(line_time, fmt) + time_offset
if time_delta_s < 0:
print "Warning: time went backwards: %s" % line
continue
event_time = data_start_time + time_delta_s
if reboot and "TIME:" in line:
# adjust offset using wall time
offset, event_time = adjust_reboot_time(line, event_time)
if offset < 0:
print "Warning: time went backwards: %s" % line
continue
time_offset += offset
time_delta_s = event_time - data_start_time
reboot = False
line_events = {"reboot"}
if line_battery_level != prev_battery_level:
# battery_level is not an actual event, it's on every line
if line_battery_level.isdigit():
bhemitter.handle_event(event_time, format_time(time_delta_s),
"battery_level=" + line_battery_level,
emit_dict, time_dict, highlight_dict)
for event in line_events:
# conn events need to be parsed in order to be useful
if event.startswith("conn"):
num, ev = get_after_equal(event).split(":")
if ev == "\"CONNECTED\"":
event = "+conn="
else:
event = "-conn="
if num in conn_constants:
event += conn_constants[num]
else:
event += "UNKNOWN"
bhemitter.handle_event(event_time, format_time(time_delta_s), event,
emit_dict, time_dict, highlight_dict)
prev_battery_level = line_battery_level
data_stop_time = event_time
data_stop_timestr = format_time(time_delta_s)
input_file.close()
if not on_mode:
print "Battery history not present in bugreport."
return
bhemitter.emit_remaining_events(data_stop_time, data_stop_timestr,
emit_dict, time_dict, highlight_dict)
bhemitter.generate_summary_rows(emit_dict, data_start_time,
data_stop_time)
power_emitter = PowerEmitter(bhemitter.cat_list)
if getopt_power_data_file:
for line in fileinput.input(getopt_power_data_file):
data = line.split(" ")
secs = float(data[0]) + POWER_DATA_FILE_TIME_OFFSET
amps = float(data[1])
power_emitter.handle_line(secs, amps, emit_dict)
power_emitter.bill(time_dict)
printer = Printer()
if not getopt_generate_chart_only:
print "<!DOCTYPE html>\n<html><head>\n"
report_filename = argv_remainder[0]
if getopt_report_filename:
report_filename = getopt_report_filename
header = "Battery Historian analysis for %s" % report_filename
print "<title>" + header + "</title>"
if overflowed:
print ('<font size="5" color="red">Warning: History overflowed at %s, '
'many events may be missing.</font>' %
time_float_to_human(data_stop_time, True))
print "<p>" + header + "</p>"
if legacy_mode:
print("<p><b>WARNING:</b> legacy format detected; "
"history information is limited</p>\n")
if not getopt_generate_chart_only:
print """
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script type="text/javascript" src="https://www.google.com/jsapi?autoload={'modules':[{'name':'visualization','version':'1','packages':['timeline']}]}"></script>
"""
print "<script type=\"text/javascript\">"
if not getopt_disable_chart_drawing:
print "google.setOnLoadCallback(drawChart);\n"
print """
var dataTable;
var chart;
var options;
var default_width = 3000
function drawChart() {
container = document.getElementById('chart');
chart = new google.visualization.Timeline(container);
dataTable = new google.visualization.DataTable();
dataTable.addColumn({ type: 'string', id: 'Position' });
dataTable.addColumn({ type: 'string', id: 'Name' });
dataTable.addColumn({ type: 'date', id: 'Start' });
dataTable.addColumn({ type: 'date', id: 'End' });
dataTable.addRows([
"""
printer.print_events(emit_dict, highlight_dict)
print "]);"
width = 3000 # default width
height = 3000 # intial height
printer.print_chart_options(emit_dict, highlight_dict, width, height)
print """
//make sure allocate enough vertical space
options['height'] = dataTable.getNumberOfRows() * 40;
chart.draw(dataTable, options);
//get vertical coordinate of scale bar
var svg = document.getElementById('chart').getElementsByTagName('svg')[0];
var label = svg.children[2].children[0];
var y = label.getAttribute('y');
//plus height of scale bar
var chart_div_height = parseInt(y) + 50;
var chart_height = chart_div_height;
//set chart height to exact height
options['height'] = chart_height;
$('#chart').css('height', chart_div_height);
svg.setAttribute('height', chart_height);
var content = $('#chart').children()[0];
$(content).css('height', chart_height);
var inner = $(content).children()[0];
$(inner).css('height', chart_height);
}
function redrawChart() {
var scale = document.getElementById("scale").value;
scale = scale.replace('%', '') / 100
options['width'] = scale * default_width;
chart.draw(dataTable, options);
}
</script>
<style>
#redrawButton{
width:100px;
}
</style>
"""
if not getopt_generate_chart_only:
print "</head>\n<body>\n"
show_complete_time = False
if data_stop_time - data_start_time > 24 * 60 * 60:
show_complete_time = True
start_localtime = time_float_to_human(data_start_time, show_complete_time)
stop_localtime = time_float_to_human(data_stop_time, show_complete_time)
print "<div id=\"chart\">"
if not getopt_generate_chart_only:
print ("<b>WARNING: Visualizer disabled. "
"If you see this message, download the HTML then open it.</b>")
print "</div>"
print("<p><b>WARNING:</b>\n"
"<br>*: wake_lock field only shows the first/last wakelock held \n"
"when the system is awake. For more detail, use wake_lock_in."
"<br>To enable full wakelock reporting (post-KitKat only) : \n"
"<br>adb shell dumpsys batterystats "
"--enable full-wake-history</p>")
if getopt_proc_name:
if len(bhemitter.match_list) > 1:
print("<p><b>WARNING:</b>\n"
"<br>Multiple match found on -n option <b>%s</b>"
"<ul>" % getopt_proc_name)
for match in bhemitter.match_list:
print "<li>%s</li>" % match
print ("</ul>Showing search result for %s</p>"
% bhemitter.match_list[0].split(":", 1)[0])
elif not bhemitter.match_list:
print("<p><b>WARNING:</b>\n"
"<br>No match on -n option <b>%s</b></p>" % getopt_proc_name)
if not highlight_dict:
print ("Search - <b>%s</b> in <b>%s</b> - did not match any event"
% (getopt_proc_name, getopt_highlight_category))
print ("<pre>(Local time %s - %s, %dm elapsed)</pre>"
% (start_localtime, stop_localtime,
(data_stop_time-data_start_time) / 60))
print ("<p>\n"
"Zoom: <input id=\"scale\" type=\"text\" value=\"100%\"></input>"
"<button type=\"button\" id=\"redrawButton\""
"onclick=\"redrawChart()\">redraw</button></p>\n"
"</p>\n")
power_emitter.report()
if app_cpu_usage:
print "<b>App CPU usage:</b><br />"
print "In user time:<br />"
print "<table border=\"1\"><tr><td>UID</td><td>Duration</td></tr>"
for (uid, use) in sorted(app_cpu_usage.items(),
key=lambda x: -x[1][usr_time]):
print "<tr><td>%s</td>" % uid
print "<td>%s</td></tr>" % format_duration(use[usr_time])
print "</table>"
print "<br />In system time:<br />"
print "<table border=\"1\"><tr><td>UID</td><td>Duration</td></tr>"
for (uid, use) in sorted(app_cpu_usage.items(),
key=lambda x: -x[1][sys_time]):
print "<tr><td>%s</td>" % uid
print "<td>%s</td></tr>" % format_duration(use[sys_time])
print "</table>"
print "<br /><b>Proc/stat summary</b><ul>"
print "<li>Total User Time: %s</li>" % format_duration(
proc_stat_summary["usr"])
print "<li>Total System Time: %s</li>" % format_duration(
proc_stat_summary["sys"])
print "<li>Total IO Time: %s</li>" % format_duration(
proc_stat_summary["io"])
print "<li>Total Irq Time: %s</li>" % format_duration(
proc_stat_summary["irq"])
print "<li>Total Soft Irq Time: %s</li>" % format_duration(
proc_stat_summary["sirq"])
print "<li>Total Idle Time: %s</li>" % format_duration(
proc_stat_summary["idle"])
print "</ul>"
print "<pre>Process table:"
print bhemitter.procs_to_str()
print "</pre>\n"
if not getopt_generate_chart_only:
print "</body>\n</html>"
if __name__ == "__main__":
main()
| 52,562 | 31.87242 | 167 | py |
3DG-STFM | 3DG-STFM-master/train_rgbd_t_s.py | import math
import argparse
import pprint
from distutils.util import strtobool
from pathlib import Path
from loguru import logger as loguru_logger
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.plugins import DDPPlugin
from src.config.default import get_cfg_defaults
from src.utils.misc import get_rank_zero_only_logger, setup_gpus
from src.utils.profiler import build_profiler
from src.lightning.data import RGBDDataModule
from src.lightning.lightning_loftr import PL_LoFTR_RGB_teacher_student
loguru_logger = get_rank_zero_only_logger(loguru_logger)
def parse_args():
# init a costum parser which will be added into pl.Trainer parser
# check documentation: https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'data_cfg_path', type=str, help='data config path')
parser.add_argument(
'main_cfg_path', type=str, help='main config path')
parser.add_argument(
'--exp_name', type=str, default='default_exp_name')
parser.add_argument(
'--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument(
'--num_workers', type=int, default=4)
parser.add_argument(
'--pin_memory', type=lambda x: bool(strtobool(x)),
nargs='?', default=True, help='whether loading data to pinned memory or not')
parser.add_argument(
'--ckpt_path', type=str, default=None,
help='pretrained checkpoint path, helpful for using a pre-trained coarse-only LoFTR')
parser.add_argument(
'--disable_ckpt', action='store_true',
help='disable checkpoint saving (useful for debugging).')
parser.add_argument(
'--profiler_name', type=str, default=None,
help='options: [inference, pytorch], or leave it unset')
parser.add_argument(
'--parallel_load_data', action='store_true',
help='load datasets in with multiple processes.')
parser = pl.Trainer.add_argparse_args(parser)
return parser.parse_args()
def main():
# parse arguments
args = parse_args()
rank_zero_only(pprint.pprint)(vars(args))
# init default-cfg and merge it with the main- and data-cfg
config = get_cfg_defaults()
config.merge_from_file(args.main_cfg_path)
config.merge_from_file(args.data_cfg_path)
pl.seed_everything(config.TRAINER.SEED) # reproducibility
# TODO: Use different seeds for each dataloader workers
# This is needed for data augmentation
# scale lr and warmup-step automatically
args.gpus = _n_gpus = setup_gpus(args.gpus)
config.TRAINER.WORLD_SIZE = _n_gpus * args.num_nodes
config.TRAINER.TRUE_BATCH_SIZE = config.TRAINER.WORLD_SIZE * args.batch_size
_scaling = config.TRAINER.TRUE_BATCH_SIZE / config.TRAINER.CANONICAL_BS
config.TRAINER.SCALING = _scaling
config.TRAINER.TRUE_LR = config.TRAINER.CANONICAL_LR * _scaling
config.TRAINER.WARMUP_STEP = math.floor(config.TRAINER.WARMUP_STEP / _scaling)
# lightning module
profiler = build_profiler(args.profiler_name)
model = PL_LoFTR_RGB_teacher_student(config, pretrained_ckpt=args.ckpt_path, profiler=profiler)
#model = PL_LoFTR_RGB_teacher_student_share(config, pretrained_ckpt=args.ckpt_path, profiler=profiler)
loguru_logger.info(f"LoFTR LightningModule initialized!")
# lightning data
data_module = RGBDDataModule(args, config)
loguru_logger.info(f"LoFTR DataModule initialized!")
# TensorBoard Logger
logger = TensorBoardLogger(save_dir='logs/tb_logs', name=args.exp_name, default_hp_metric=False)
ckpt_dir = Path(logger.log_dir) / 'checkpoints'
# Callbacks
# TODO: update ModelCheckpoint to monitor multiple metrics
ckpt_callback = ModelCheckpoint(monitor='auc@10', verbose=True, save_top_k=5, mode='max',
save_last=True,
dirpath=str(ckpt_dir),
filename='{epoch}-{auc@5:.3f}-{auc@10:.3f}-{auc@20:.3f}')
lr_monitor = LearningRateMonitor(logging_interval='step')
callbacks = [lr_monitor]
if not args.disable_ckpt:
callbacks.append(ckpt_callback)
# Lightning Trainer
trainer = pl.Trainer.from_argparse_args(
args,
plugins=DDPPlugin(find_unused_parameters=True,
num_nodes=args.num_nodes,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0),
gradient_clip_val=config.TRAINER.GRADIENT_CLIPPING,
callbacks=callbacks,
logger=logger,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0,
replace_sampler_ddp=False, # use custom sampler
reload_dataloaders_every_epoch=False, # avoid repeated samples!
weights_summary='full',
profiler=profiler)
loguru_logger.info(f"Trainer initialized!")
loguru_logger.info(f"Start training!")
trainer.fit(model, datamodule=data_module)
if __name__ == '__main__':
main()
| 5,270 | 41.168 | 111 | py |
3DG-STFM | 3DG-STFM-master/train_rgb.py | import math
import argparse
import pprint
from distutils.util import strtobool
from pathlib import Path
from loguru import logger as loguru_logger
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.plugins import DDPPlugin
from src.config.default import get_cfg_defaults
from src.utils.misc import get_rank_zero_only_logger, setup_gpus
from src.utils.profiler import build_profiler
from src.lightning.data import RGBDataModule
from src.lightning.lightning_loftr import PL_LoFTR_RGB
loguru_logger = get_rank_zero_only_logger(loguru_logger)
def parse_args():
# init a costum parser which will be added into pl.Trainer parser
# check documentation: https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'data_cfg_path', type=str, help='data config path')
parser.add_argument(
'main_cfg_path', type=str, help='main config path')
parser.add_argument(
'--exp_name', type=str, default='default_exp_name')
parser.add_argument(
'--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument(
'--num_workers', type=int, default=4)
parser.add_argument(
'--pin_memory', type=lambda x: bool(strtobool(x)),
nargs='?', default=True, help='whether loading data to pinned memory or not')
parser.add_argument(
'--ckpt_path', type=str, default=None,
help='pretrained checkpoint path, helpful for using a pre-trained coarse-only LoFTR')
parser.add_argument(
'--disable_ckpt', action='store_true',
help='disable checkpoint saving (useful for debugging).')
parser.add_argument(
'--profiler_name', type=str, default=None,
help='options: [inference, pytorch], or leave it unset')
parser.add_argument(
'--parallel_load_data', action='store_true',
help='load datasets in with multiple processes.')
parser = pl.Trainer.add_argparse_args(parser)
return parser.parse_args()
def main():
# parse arguments
args = parse_args()
rank_zero_only(pprint.pprint)(vars(args))
# init default-cfg and merge it with the main- and data-cfg
config = get_cfg_defaults()
config.merge_from_file(args.main_cfg_path)
config.merge_from_file(args.data_cfg_path)
pl.seed_everything(config.TRAINER.SEED) # reproducibility
# This is needed for data augmentation
# scale lr and warmup-step automatically
args.gpus = _n_gpus = setup_gpus(args.gpus)
config.TRAINER.WORLD_SIZE = _n_gpus * args.num_nodes
config.TRAINER.TRUE_BATCH_SIZE = config.TRAINER.WORLD_SIZE * args.batch_size
_scaling = config.TRAINER.TRUE_BATCH_SIZE / config.TRAINER.CANONICAL_BS
config.TRAINER.SCALING = _scaling
config.TRAINER.TRUE_LR = config.TRAINER.CANONICAL_LR * _scaling
config.TRAINER.WARMUP_STEP = math.floor(config.TRAINER.WARMUP_STEP / _scaling)
# lightning module
profiler = build_profiler(args.profiler_name)
model = PL_LoFTR_RGB(config, pretrained_ckpt=args.ckpt_path, profiler=profiler)
loguru_logger.info(f"LoFTR LightningModule initialized!")
# lightning data
data_module = RGBDataModule(args, config)
loguru_logger.info(f"LoFTR DataModule initialized!")
# TensorBoard Logger
logger = TensorBoardLogger(save_dir='logs/tb_logs', name=args.exp_name, default_hp_metric=False)
ckpt_dir = Path(logger.log_dir) / 'checkpoints'
# Callbacks
ckpt_callback = ModelCheckpoint(monitor='auc@10', verbose=True, save_top_k=5, mode='max',
save_last=True,
dirpath=str(ckpt_dir),
filename='{epoch}-{auc@5:.3f}-{auc@10:.3f}-{auc@20:.3f}')
lr_monitor = LearningRateMonitor(logging_interval='step')
callbacks = [lr_monitor]
if not args.disable_ckpt:
callbacks.append(ckpt_callback)
# Lightning Trainer
trainer = pl.Trainer.from_argparse_args(
args,
plugins=DDPPlugin(find_unused_parameters=False,
num_nodes=args.num_nodes,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0),
gradient_clip_val=config.TRAINER.GRADIENT_CLIPPING,
callbacks=callbacks,
logger=logger,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0,
replace_sampler_ddp=False, # use custom sampler
reload_dataloaders_every_epoch=False, # avoid repeated samples!
weights_summary='full',
profiler=profiler)
loguru_logger.info(f"Trainer initialized!")
loguru_logger.info(f"Start training!")
trainer.fit(model, datamodule=data_module)
if __name__ == '__main__':
main()
| 5,007 | 40.04918 | 111 | py |
3DG-STFM | 3DG-STFM-master/test_rgbd.py | import pytorch_lightning as pl
import argparse
import pprint
from loguru import logger as loguru_logger
from src.config.default import get_cfg_defaults
from src.utils.profiler import build_profiler
from src.lightning.data import MultiSceneDataModule, RGBDDataModule
from src.lightning.lightning_loftr import PL_LoFTR,PL_LoFTR_RGBD
def parse_args():
# init a costum parser which will be added into pl.Trainer parser
# check documentation: https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'data_cfg_path', type=str, help='data config path')
parser.add_argument(
'main_cfg_path', type=str, help='main config path')
parser.add_argument(
'--ckpt_path', type=str, default="weights/indoor_ds.ckpt", help='path to the checkpoint')
parser.add_argument(
'--dump_dir', type=str, default=None, help="if set, the matching results will be dump to dump_dir")
parser.add_argument(
'--profiler_name', type=str, default=None, help='options: [inference, pytorch], or leave it unset')
parser.add_argument(
'--batch_size', type=int, default=1, help='batch_size per gpu')
parser.add_argument(
'--num_workers', type=int, default=2)
parser.add_argument(
'--thr', type=float, default=None, help='modify the coarse-level matching threshold.')
parser = pl.Trainer.add_argparse_args(parser)
return parser.parse_args()
if __name__ == '__main__':
# parse arguments
args = parse_args()
pprint.pprint(vars(args))
# init default-cfg and merge it with the main- and data-cfg
config = get_cfg_defaults()
config.merge_from_file(args.main_cfg_path)
config.merge_from_file(args.data_cfg_path)
pl.seed_everything(config.TRAINER.SEED) # reproducibility
# tune when testing
if args.thr is not None:
config.LOFTR.MATCH_COARSE.THR = args.thr
loguru_logger.info(f"Args and config initialized!")
# lightning module
profiler = build_profiler(args.profiler_name)
model = PL_LoFTR_RGBD(config, pretrained_ckpt=args.ckpt_path, profiler=profiler, dump_dir=args.dump_dir)
loguru_logger.info(f"LoFTR-lightning initialized!")
# lightning data
data_module = RGBDDataModule(args, config)
loguru_logger.info(f"DataModule initialized!")
# lightning trainer
trainer = pl.Trainer.from_argparse_args(args, replace_sampler_ddp=False, logger=False)
loguru_logger.info(f"Start testing!")
trainer.test(model, datamodule=data_module, verbose=False)
| 2,657 | 37.521739 | 111 | py |
3DG-STFM | 3DG-STFM-master/test_rgb.py | import pytorch_lightning as pl
import argparse
import pprint
from loguru import logger as loguru_logger
from src.config.default import get_cfg_defaults
from src.utils.profiler import build_profiler
from src.lightning.data import RGBDataModule
from src.lightning.lightning_loftr import PL_LoFTR_RGB
def parse_args():
# init a costum parser which will be added into pl.Trainer parser
# check documentation: https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'data_cfg_path', type=str, help='data config path')
parser.add_argument(
'main_cfg_path', type=str, help='main config path')
parser.add_argument(
'--ckpt_path', type=str, default="weights/indoor_ds.ckpt", help='path to the checkpoint')
parser.add_argument(
'--dump_dir', type=str, default=None, help="if set, the matching results will be dump to dump_dir")
parser.add_argument(
'--profiler_name', type=str, default=None, help='options: [inference, pytorch], or leave it unset')
parser.add_argument(
'--batch_size', type=int, default=1, help='batch_size per gpu')
parser.add_argument(
'--num_workers', type=int, default=2)
parser.add_argument(
'--thr', type=float, default=None, help='modify the coarse-level matching threshold.')
parser = pl.Trainer.add_argparse_args(parser)
return parser.parse_args()
if __name__ == '__main__':
# parse arguments
args = parse_args()
pprint.pprint(vars(args))
# init default-cfg and merge it with the main- and data-cfg
config = get_cfg_defaults()
config.merge_from_file(args.main_cfg_path)
config.merge_from_file(args.data_cfg_path)
pl.seed_everything(config.TRAINER.SEED) # reproducibility
# tune when testing
if args.thr is not None:
config.LOFTR.MATCH_COARSE.THR = args.thr
loguru_logger.info(f"Args and config initialized!")
# lightning module
profiler = build_profiler(args.profiler_name)
model = PL_LoFTR_RGB(config, pretrained_ckpt=args.ckpt_path, profiler=profiler, dump_dir=args.dump_dir)
loguru_logger.info(f"LoFTR-lightning initialized!")
# lightning data
data_module = RGBDataModule(args, config)
loguru_logger.info(f"DataModule initialized!")
# lightning trainer
trainer = pl.Trainer.from_argparse_args(args, replace_sampler_ddp=False, logger=False)
loguru_logger.info(f"Start testing!")
trainer.test(model, datamodule=data_module, verbose=False)
| 2,622 | 37.014493 | 111 | py |
3DG-STFM | 3DG-STFM-master/demo.py | import os
import torch
import cv2
import numpy as np
import matplotlib.cm as cm
import matplotlib.colors
from src.loftr import default_cfg, LoFTR_RGBD, LoFTR_RGB
import matplotlib.pyplot as plt
def make_matching_figure(
img0, img1, mkpts0, mkpts1, color,
kpts0=None, kpts1=None, text=[], dpi=75, path=None):
# draw image pair
assert mkpts0.shape[0] == mkpts1.shape[0], f'mkpts0: {mkpts0.shape[0]} v.s. mkpts1: {mkpts1.shape[0]}'
fig, axes = plt.subplots(1, 2, figsize=(10, 6), dpi=dpi)
axes[0].imshow(img0)
axes[1].imshow(img1)
for i in range(2): # clear all frames
axes[i].get_yaxis().set_ticks([])
axes[i].get_xaxis().set_ticks([])
for spine in axes[i].spines.values():
spine.set_visible(False)
plt.tight_layout(pad=1)
if kpts0 is not None:
assert kpts1 is not None
axes[0].scatter(kpts0[:, 0], kpts0[:, 1], c='w', s=2)
axes[1].scatter(kpts1[:, 0], kpts1[:, 1], c='w', s=2)
# draw matches
if mkpts0.shape[0] != 0 and mkpts1.shape[0] != 0:
fig.canvas.draw()
transFigure = fig.transFigure.inverted()
fkpts0 = transFigure.transform(axes[0].transData.transform(mkpts0))
fkpts1 = transFigure.transform(axes[1].transData.transform(mkpts1))
fig.lines = [matplotlib.lines.Line2D((fkpts0[i, 0], fkpts1[i, 0]),
(fkpts0[i, 1], fkpts1[i, 1]),
transform=fig.transFigure, c='b', linewidth=0.5,alpha=0.3)
for i in range(len(mkpts0))]
axes[0].scatter(mkpts0[:, 0], mkpts0[:, 1], c=color, s=5)
axes[1].scatter(mkpts1[:, 0], mkpts1[:, 1], c=color, s=5)
# put txts
txt_color = 'k' if img0[:100, :200].mean() > 200 else 'w'
fig.text(
0.01, 0.99, '\n'.join(text), transform=fig.axes[0].transAxes,
fontsize=15, va='top', ha='left', color='k')
# save or return figure
if path:
plt.savefig(str(path), bbox_inches='tight', pad_inches=0)
print('saved', os.getcwd(), path)
plt.close()
else:
return fig
def pose_filter(mkpts0, mkpts1, K0, K1):
mkpts0 = (mkpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]
mkpts1 = (mkpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]
ransac_thr = 0.5 / np.mean([K0[0, 0], K1[1, 1], K0[0, 0], K1[1, 1]])
if len(mkpts0) < 6:
E = None
mask = None
else:
E, mask = cv2.findEssentialMat(
mkpts0, mkpts1, np.eye(3), threshold=ransac_thr, prob=0.99999, method=cv2.RANSAC)
return E, mask
root_dir = 'inference/'
pretrained_ckpt = "weights/indoor_student.ckpt"
matcher = LoFTR_RGB(config=default_cfg)
img0_pth, img1_pth = 'demo1.jpg','demo2.jpg'
img0_pth, img1_pth = root_dir + img0_pth, root_dir + img1_pth
sd = torch.load(pretrained_ckpt, map_location='cpu')['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in sd.items():
name = k[8:] # remove `matcher.`
new_state_dict[name] = v
matcher.load_state_dict(new_state_dict, strict=False)
matcher = matcher.eval().cuda()
img0_raw = cv2.imread(img0_pth, cv2.IMREAD_COLOR)
img1_raw = cv2.imread(img1_pth, cv2.IMREAD_COLOR)
img0_raw = cv2.resize(img0_raw, (640, 480))
img1_raw = cv2.resize(img1_raw, (640, 480))
img0 = cv2.cvtColor(img0_raw, cv2.COLOR_BGR2RGB)
img1 = cv2.cvtColor(img1_raw, cv2.COLOR_BGR2RGB)
img0 = np.ascontiguousarray(img0)
img1 = np.ascontiguousarray(img1)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
img0 = img0.astype(float)
img1 = img1.astype(float)
img0[:, :, 0] = (img0[:, :, 0] / 255. - mean[0]) / std[0]
img0[:, :, 1] = (img0[:, :, 1] / 255. - mean[1]) / std[1]
img0[:, :, 2] = (img0[:, :, 2] / 255. - mean[2]) / std[2]
img1[:, :, 0] = (img1[:, :, 0] / 255. - mean[0]) / std[0]
img1[:, :, 1] = (img1[:, :, 1] / 255. - mean[1]) / std[1]
img1[:, :, 2] = (img1[:, :, 2] / 255. - mean[2]) / std[2]
img0 = torch.from_numpy(img0).float()[None].cuda()
img1 = torch.from_numpy(img1).float()[None].cuda()
img0 = img0.permute(0, 3, 1, 2)
img1 = img1.permute(0, 3, 1, 2)
batch = {'image0': img0, 'image1': img1}
# Inference with LoFTR and get prediction
with torch.no_grad():
matcher(batch)
mkpts0 = batch['mkpts0_f'].cpu().numpy()
mkpts1 = batch['mkpts1_f'].cpu().numpy()
mconf = batch['mconf'].cpu().numpy()
#_, mask = pose_filter(mkpts0, mkpts1, K0, K1)
# ind_mask = np.where(mask == 1)
# mkpts0 = mkpts0[ind_mask[0], :]
# mkpts1 = mkpts1[ind_mask[0], :]
# mconf = mconf[ind_mask[0]]
# Draw
if mconf!=[]:
mconf=(mconf-mconf.min())/(mconf.max()-mconf.min())
color = cm.jet(mconf)
text = [
'3DG-STFM',
'Matches: {}'.format(len(mkpts0)),
]
fig = make_matching_figure(img0_raw, img1_raw, mkpts0, mkpts1, color, text=text,
path='demo.png')
| 4,874 | 33.574468 | 106 | py |
3DG-STFM | 3DG-STFM-master/train_rgbd.py | import math
import argparse
import pprint
from distutils.util import strtobool
from pathlib import Path
from loguru import logger as loguru_logger
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.plugins import DDPPlugin
from src.config.default import get_cfg_defaults
from src.utils.misc import get_rank_zero_only_logger, setup_gpus
from src.utils.profiler import build_profiler
from src.lightning.data import RGBDDataModule
from src.lightning.lightning_loftr import PL_LoFTR_RGBD
loguru_logger = get_rank_zero_only_logger(loguru_logger)
def parse_args():
# init a costum parser which will be added into pl.Trainer parser
# check documentation: https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'data_cfg_path', type=str, help='data config path')
parser.add_argument(
'main_cfg_path', type=str, help='main config path')
parser.add_argument(
'--exp_name', type=str, default='default_exp_name')
parser.add_argument(
'--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument(
'--num_workers', type=int, default=4)
parser.add_argument(
'--pin_memory', type=lambda x: bool(strtobool(x)),
nargs='?', default=True, help='whether loading data to pinned memory or not')
parser.add_argument(
'--ckpt_path', type=str, default=None,
help='pretrained checkpoint path, helpful for using a pre-trained coarse-only LoFTR')
parser.add_argument(
'--disable_ckpt', action='store_true',
help='disable checkpoint saving (useful for debugging).')
parser.add_argument(
'--profiler_name', type=str, default=None,
help='options: [inference, pytorch], or leave it unset')
parser.add_argument(
'--parallel_load_data', action='store_true',
help='load datasets in with multiple processes.')
parser = pl.Trainer.add_argparse_args(parser)
return parser.parse_args()
def main():
# parse arguments
args = parse_args()
rank_zero_only(pprint.pprint)(vars(args))
# init default-cfg and merge it with the main- and data-cfg
config = get_cfg_defaults()
config.merge_from_file(args.main_cfg_path)
config.merge_from_file(args.data_cfg_path)
pl.seed_everything(config.TRAINER.SEED) # reproducibility
# TODO: Use different seeds for each dataloader workers
# This is needed for data augmentation
# scale lr and warmup-step automatically
args.gpus = _n_gpus = setup_gpus(args.gpus)
config.TRAINER.WORLD_SIZE = _n_gpus * args.num_nodes
config.TRAINER.TRUE_BATCH_SIZE = config.TRAINER.WORLD_SIZE * args.batch_size
_scaling = config.TRAINER.TRUE_BATCH_SIZE / config.TRAINER.CANONICAL_BS
config.TRAINER.SCALING = _scaling
config.TRAINER.TRUE_LR = config.TRAINER.CANONICAL_LR * _scaling
config.TRAINER.WARMUP_STEP = math.floor(config.TRAINER.WARMUP_STEP / _scaling)
# lightning module
profiler = build_profiler(args.profiler_name)
model = PL_LoFTR_RGBD(config, pretrained_ckpt=args.ckpt_path, profiler=profiler)
loguru_logger.info(f"LoFTR LightningModule initialized!")
# lightning data
data_module = RGBDDataModule(args, config)
loguru_logger.info(f"LoFTR DataModule initialized!")
# TensorBoard Logger
logger = TensorBoardLogger(save_dir='logs/tb_logs', name=args.exp_name, default_hp_metric=False)
ckpt_dir = Path(logger.log_dir) / 'checkpoints'
# Callbacks
# TODO: update ModelCheckpoint to monitor multiple metrics
ckpt_callback = ModelCheckpoint(monitor='auc@10', verbose=True, save_top_k=5, mode='max',
save_last=True,
dirpath=str(ckpt_dir),
filename='{epoch}-{auc@5:.3f}-{auc@10:.3f}-{auc@20:.3f}')
lr_monitor = LearningRateMonitor(logging_interval='step')
callbacks = [lr_monitor]
if not args.disable_ckpt:
callbacks.append(ckpt_callback)
# Lightning Trainer
trainer = pl.Trainer.from_argparse_args(
args,
plugins=DDPPlugin(find_unused_parameters=False,
num_nodes=args.num_nodes,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0),
gradient_clip_val=config.TRAINER.GRADIENT_CLIPPING,
callbacks=callbacks,
logger=logger,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0,
replace_sampler_ddp=False, # use custom sampler
reload_dataloaders_every_epoch=False, # avoid repeated samples!
weights_summary='full',
profiler=profiler)
loguru_logger.info(f"Trainer initialized!")
loguru_logger.info(f"Start training!")
trainer.fit(model, datamodule=data_module)
if __name__ == '__main__':
main()
| 5,134 | 40.41129 | 111 | py |
3DG-STFM | 3DG-STFM-master/src/config/default.py | from yacs.config import CfgNode as CN
_CN = CN()
############## ↓ LoFTR Pipeline ↓ ##############
_CN.LOFTR = CN()
_CN.LOFTR.BACKBONE_TYPE = 'ResNetFPN'
_CN.LOFTR.RESOLUTION = (8, 2) # options: [(8, 2), (16, 4)]
_CN.LOFTR.FINE_WINDOW_SIZE = 5 # window_size in fine_level, must be odd
_CN.LOFTR.FINE_CONCAT_COARSE_FEAT = True
# 1. LoFTR-backbone (local feature CNN) config
_CN.LOFTR.RESNETFPN = CN()
_CN.LOFTR.RESNETFPN.INITIAL_DIM = 128
_CN.LOFTR.RESNETFPN.BLOCK_DIMS = [128, 196, 256] # s1, s2, s3
# 2. LoFTR-coarse module config
_CN.LOFTR.COARSE = CN()
_CN.LOFTR.COARSE.D_MODEL = 256
_CN.LOFTR.COARSE.D_FFN = 256
_CN.LOFTR.COARSE.NHEAD = 8
_CN.LOFTR.COARSE.LAYER_NAMES = ['self', 'cross'] * 4
_CN.LOFTR.COARSE.ATTENTION = 'linear' # options: ['linear', 'full']
# 3. Coarse-Matching config
_CN.LOFTR.MATCH_COARSE = CN()
_CN.LOFTR.MATCH_COARSE.THR = 0.2
_CN.LOFTR.MATCH_COARSE.BORDER_RM = 2
_CN.LOFTR.MATCH_COARSE.MATCH_TYPE = 'dual_softmax' # options: ['dual_softmax, 'sinkhorn']
_CN.LOFTR.MATCH_COARSE.DSMAX_TEMPERATURE = 0.1
_CN.LOFTR.MATCH_COARSE.SKH_ITERS = 3
_CN.LOFTR.MATCH_COARSE.SKH_INIT_BIN_SCORE = 1.0
_CN.LOFTR.MATCH_COARSE.SKH_PREFILTER = False
_CN.LOFTR.MATCH_COARSE.TRAIN_COARSE_PERCENT = 0.2 # training tricks: save GPU memory
_CN.LOFTR.MATCH_COARSE.TRAIN_PAD_NUM_GT_MIN = 200 # training tricks: avoid DDP deadlock
_CN.LOFTR.MATCH_COARSE.SPARSE_SPVS = True
# 4. LoFTR-fine module config
_CN.LOFTR.FINE = CN()
_CN.LOFTR.FINE.D_MODEL = 128
_CN.LOFTR.FINE.D_FFN = 128
_CN.LOFTR.FINE.NHEAD = 8
_CN.LOFTR.FINE.LAYER_NAMES = ['self', 'cross'] * 1
_CN.LOFTR.FINE.ATTENTION = 'linear'
# 5. LoFTR Losses
# -- # coarse-level
_CN.LOFTR.LOSS = CN()
_CN.LOFTR.LOSS.COARSE_TYPE = 'focal' # ['focal', 'cross_entropy']
_CN.LOFTR.LOSS.COARSE_WEIGHT = 1.0
# _CN.LOFTR.LOSS.SPARSE_SPVS = False
# -- - -- # focal loss (coarse)
_CN.LOFTR.LOSS.FOCAL_ALPHA = 0.25
_CN.LOFTR.LOSS.FOCAL_GAMMA = 2.0
_CN.LOFTR.LOSS.POS_WEIGHT = 1.0
_CN.LOFTR.LOSS.NEG_WEIGHT = 1.0
# _CN.LOFTR.LOSS.DUAL_SOFTMAX = False # whether coarse-level use dual-softmax or not.
# use `_CN.LOFTR.MATCH_COARSE.MATCH_TYPE`
# -- # fine-level
_CN.LOFTR.LOSS.FINE_TYPE = 'l2_with_std' # ['l2_with_std', 'l2']
_CN.LOFTR.LOSS.FINE_WEIGHT = 1.0
_CN.LOFTR.LOSS.FINE_CORRECT_THR = 1.0 # for filtering valid fine-level gts (some gt matches might fall out of the fine-level window)
############## Dataset ##############
_CN.DATASET = CN()
# 1. data config
# training and validating
_CN.DATASET.TRAINVAL_DATA_SOURCE = None # options: ['ScanNet', 'MegaDepth']
_CN.DATASET.TRAIN_DATA_ROOT = None
_CN.DATASET.TRAIN_POSE_ROOT = None # (optional directory for poses)
_CN.DATASET.TRAIN_NPZ_ROOT = None
_CN.DATASET.TRAIN_LIST_PATH = None
_CN.DATASET.TRAIN_INTRINSIC_PATH = None
_CN.DATASET.VAL_DATA_ROOT = None
_CN.DATASET.VAL_POSE_ROOT = None # (optional directory for poses)
_CN.DATASET.VAL_NPZ_ROOT = None
_CN.DATASET.VAL_LIST_PATH = None # None if val data from all scenes are bundled into a single npz file
_CN.DATASET.VAL_INTRINSIC_PATH = None
# testing
_CN.DATASET.TEST_DATA_SOURCE = None
_CN.DATASET.TEST_DATA_ROOT = None
_CN.DATASET.TEST_POSE_ROOT = None # (optional directory for poses)
_CN.DATASET.TEST_NPZ_ROOT = None
_CN.DATASET.TEST_LIST_PATH = None # None if test data from all scenes are bundled into a single npz file
_CN.DATASET.TEST_INTRINSIC_PATH = None
# 2. dataset config
# general options
_CN.DATASET.MIN_OVERLAP_SCORE_TRAIN = 0.4 # discard data with overlap_score < min_overlap_score
_CN.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0
_CN.DATASET.AUGMENTATION_TYPE = None # options: [None, 'dark', 'mobile']
# MegaDepth options
_CN.DATASET.MGDPT_IMG_RESIZE = 640 # resize the longer side, zero-pad bottom-right to square.
_CN.DATASET.MGDPT_IMG_PAD = True # pad img to square with size = MGDPT_IMG_RESIZE
_CN.DATASET.MGDPT_DEPTH_PAD = True # pad depthmap to square with size = 2000
_CN.DATASET.MGDPT_DF = 8
############## Trainer ##############
_CN.TRAINER = CN()
_CN.TRAINER.WORLD_SIZE = 1
_CN.TRAINER.CANONICAL_BS = 64
_CN.TRAINER.CANONICAL_LR = 6e-3 #6e-3
_CN.TRAINER.SCALING = None # this will be calculated automatically
_CN.TRAINER.FIND_LR = False # use learning rate finder from pytorch-lightning
# optimizer
_CN.TRAINER.OPTIMIZER = "adamw" # [adam, adamw]
_CN.TRAINER.TRUE_LR = None # this will be calculated automatically at runtime
_CN.TRAINER.ADAM_DECAY = 0. # ADAM: for adam
_CN.TRAINER.ADAMW_DECAY = 0.1
# step-based warm-up
_CN.TRAINER.WARMUP_TYPE = 'linear' # [linear, constant]
_CN.TRAINER.WARMUP_RATIO = 0.
_CN.TRAINER.WARMUP_STEP = 4800#4800
# learning rate scheduler
_CN.TRAINER.SCHEDULER = 'MultiStepLR' # [MultiStepLR, CosineAnnealing, ExponentialLR]
_CN.TRAINER.SCHEDULER_INTERVAL = 'epoch' # [epoch, step]
_CN.TRAINER.MSLR_MILESTONES = [3, 6, 9, 12] # MSLR: MultiStepLR
_CN.TRAINER.MSLR_GAMMA = 0.5
_CN.TRAINER.COSA_TMAX = 30 # COSA: CosineAnnealing
_CN.TRAINER.ELR_GAMMA = 0.999992 # ELR: ExponentialLR, this value for 'step' interval
# plotting related
_CN.TRAINER.ENABLE_PLOTTING = True
_CN.TRAINER.N_VAL_PAIRS_TO_PLOT = 32 # number of val/test paris for plotting
_CN.TRAINER.PLOT_MODE = 'evaluation' # ['evaluation', 'confidence']
_CN.TRAINER.PLOT_MATCHES_ALPHA = 'dynamic'
# geometric metrics and pose solver
_CN.TRAINER.EPI_ERR_THR = 5e-4 # recommendation: 5e-4 for ScanNet, 1e-4 for MegaDepth (from SuperGlue)
_CN.TRAINER.POSE_GEO_MODEL = 'E' # ['E', 'F', 'H']
_CN.TRAINER.POSE_ESTIMATION_METHOD = 'RANSAC' # [RANSAC, DEGENSAC, MAGSAC]
_CN.TRAINER.RANSAC_PIXEL_THR = 0.5
_CN.TRAINER.RANSAC_CONF = 0.99999
_CN.TRAINER.RANSAC_MAX_ITERS = 10000
_CN.TRAINER.USE_MAGSACPP = False
# data sampler for train_dataloader
_CN.TRAINER.DATA_SAMPLER = 'scene_balance' # options: ['scene_balance', 'random', 'normal']
# 'scene_balance' config
_CN.TRAINER.N_SAMPLES_PER_SUBSET = 200
_CN.TRAINER.SB_SUBSET_SAMPLE_REPLACEMENT = True # whether sample each scene with replacement or not
_CN.TRAINER.SB_SUBSET_SHUFFLE = True # after sampling from scenes, whether shuffle within the epoch or not
_CN.TRAINER.SB_REPEAT = 1 # repeat N times for training the sampled data
# 'random' config
_CN.TRAINER.RDM_REPLACEMENT = True
_CN.TRAINER.RDM_NUM_SAMPLES = None
# gradient clipping
_CN.TRAINER.GRADIENT_CLIPPING = 0.5
# reproducibility
# This seed affects the data sampling. With the same seed, the data sampling is promised
# to be the same. When resume training from a checkpoint, it's better to use a different
# seed, otherwise the sampled data will be exactly the same as before resuming, which will
# cause less unique data items sampled during the entire training.
# Use of different seed values might affect the final training result, since not all data items
# are used during training on ScanNet. (60M pairs of images sampled during traing from 230M pairs in total.)
_CN.TRAINER.SEED = 66
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _CN.clone()
| 7,068 | 40.339181 | 133 | py |
3DG-STFM | 3DG-STFM-master/src/datasets/sampler.py | import torch
from torch.utils.data import Sampler, ConcatDataset
class RandomConcatSampler(Sampler):
""" Random sampler for ConcatDataset. At each epoch, `n_samples_per_subset` samples will be draw from each subset
in the ConcatDataset. If `subset_replacement` is ``True``, sampling within each subset will be done with replacement.
However, it is impossible to sample data without replacement between epochs, unless bulding a stateful sampler lived along the entire training phase.
For current implementation, the randomness of sampling is ensured no matter the sampler is recreated across epochs or not and call `torch.manual_seed()` or not.
Args:
shuffle (bool): shuffle the random sampled indices across all sub-datsets.
repeat (int): repeatedly use the sampled indices multiple times for training.
[arXiv:1902.05509, arXiv:1901.09335]
NOTE: Don't re-initialize the sampler between epochs (will lead to repeated samples)
NOTE: This sampler behaves differently with DistributedSampler.
It assume the dataset is splitted across ranks instead of replicated.
TODO: Add a `set_epoch()` method to fullfill sampling without replacement across epochs.
ref: https://github.com/PyTorchLightning/pytorch-lightning/blob/e9846dd758cfb1500eb9dba2d86f6912eb487587/pytorch_lightning/trainer/training_loop.py#L373
"""
def __init__(self,
data_source: ConcatDataset,
n_samples_per_subset: int,
subset_replacement: bool=True,
shuffle: bool=True,
repeat: int=1,
seed: int=None):
if not isinstance(data_source, ConcatDataset):
raise TypeError("data_source should be torch.utils.data.ConcatDataset")
self.data_source = data_source
self.n_subset = len(self.data_source.datasets)
self.n_samples_per_subset = n_samples_per_subset
self.n_samples = self.n_subset * self.n_samples_per_subset * repeat
self.subset_replacement = subset_replacement
self.repeat = repeat
self.shuffle = shuffle
self.generator = torch.manual_seed(seed)
assert self.repeat >= 1
def __len__(self):
return self.n_samples
def __iter__(self):
indices = []
# sample from each sub-dataset
for d_idx in range(self.n_subset):
low = 0 if d_idx==0 else self.data_source.cumulative_sizes[d_idx-1]
high = self.data_source.cumulative_sizes[d_idx]
if self.subset_replacement:
rand_tensor = torch.randint(low, high, (self.n_samples_per_subset, ),
generator=self.generator, dtype=torch.int64)
else: # sample without replacement
len_subset = len(self.data_source.datasets[d_idx])
rand_tensor = torch.randperm(len_subset, generator=self.generator) + low
if len_subset >= self.n_samples_per_subset:
rand_tensor = rand_tensor[:self.n_samples_per_subset]
else: # padding with replacement
rand_tensor_replacement = torch.randint(low, high, (self.n_samples_per_subset - len_subset, ),
generator=self.generator, dtype=torch.int64)
rand_tensor = torch.cat([rand_tensor, rand_tensor_replacement])
indices.append(rand_tensor)
indices = torch.cat(indices)
if self.shuffle: # shuffle the sampled dataset (from multiple subsets)
rand_tensor = torch.randperm(len(indices), generator=self.generator)
indices = indices[rand_tensor]
# repeat the sampled indices (can be used for RepeatAugmentation or pure RepeatSampling)
if self.repeat > 1:
repeat_indices = [indices.clone() for _ in range(self.repeat - 1)]
if self.shuffle:
_choice = lambda x: x[torch.randperm(len(x), generator=self.generator)]
repeat_indices = map(_choice, repeat_indices)
indices = torch.cat([indices, *repeat_indices], 0)
assert indices.shape[0] == self.n_samples
return iter(indices.tolist())
| 4,293 | 54.051282 | 164 | py |
3DG-STFM | 3DG-STFM-master/src/datasets/megadepth.py | import os.path as osp
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from loguru import logger
import cv2
from src.utils.dataset import read_megadepth_gray, read_megadepth_depth, read_megadepth_rgb
class MegaDepth_RGB_Dataset(Dataset):
def __init__(self,
root_dir,
npz_path,
mode='train',
min_overlap_score=0.4,
img_resize=None,
df=None,
img_padding=False,
depth_padding=False,
augment_fn=None,
**kwargs):
"""
Manage one scene(npz_path) of MegaDepth dataset.
Args:
root_dir (str): megadepth root directory that has `phoenix`.
npz_path (str): {scene_id}.npz path. This contains image pair information of a scene.
mode (str): options are ['train', 'val', 'test']
min_overlap_score (float): how much a pair should have in common. In range of [0, 1]. Set to 0 when testing.
img_resize (int, optional): the longer edge of resized images. None for no resize. 640 is recommended.
This is useful during training with batches and testing with memory intensive algorithms.
df (int, optional): image size division factor. NOTE: this will change the final image size after img_resize.
img_padding (bool): If set to 'True', zero-pad the image to squared size. This is useful during training.
depth_padding (bool): If set to 'True', zero-pad depthmap to (2000, 2000). This is useful during training.
augment_fn (callable, optional): augments images with pre-defined visual effects.
"""
super().__init__()
self.root_dir = root_dir
self.mode = mode
self.scene_id = npz_path.split('.')[0]
# prepare scene_info and pair_info
if mode == 'test' and min_overlap_score != 0:
logger.warning("You are using `min_overlap_score`!=0 in test mode. Set to 0.")
min_overlap_score = 0
self.scene_info = np.load(npz_path, allow_pickle=True)
self.pair_infos = self.scene_info['pair_infos'].copy()
del self.scene_info['pair_infos']
self.pair_infos = [pair_info for pair_info in self.pair_infos if pair_info[1] > min_overlap_score]
# parameters for image resizing, padding and depthmap padding
if mode == 'train':
assert img_resize is not None and img_padding and depth_padding
self.img_resize = img_resize
self.df = df
self.img_padding = img_padding
self.depth_max_size = 2000 if depth_padding else None # the upperbound of depthmaps size in megadepth.
# for training LoFTR
self.augment_fn = augment_fn if mode == 'train' else None
self.coarse_scale = getattr(kwargs, 'coarse_scale', 0.125)
def __len__(self):
return len(self.pair_infos)
def __getitem__(self, idx):
(idx0, idx1), overlap_score, central_matches = self.pair_infos[idx]
# read grayscale image and mask. (1, h, w) and (h, w)
img_name0 = osp.join(self.root_dir, self.scene_info['image_paths'][idx0])
img_name1 = osp.join(self.root_dir, self.scene_info['image_paths'][idx1])
# TODO: Support augmentation & handle seeds for each worker correctly.
image0, mask0, scale0 = read_megadepth_rgb(
img_name0, self.img_resize, self.df, self.img_padding, None)
# np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
image1, mask1, scale1 = read_megadepth_rgb(
img_name1, self.img_resize, self.df, self.img_padding, None)
# np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
# read depth. shape: (h, w)
if self.mode in ['train', 'val']:
depth0 = read_megadepth_depth(
osp.join(self.root_dir, self.scene_info['depth_paths'][idx0]), pad_to=self.depth_max_size)
depth1 = read_megadepth_depth(
osp.join(self.root_dir, self.scene_info['depth_paths'][idx1]), pad_to=self.depth_max_size)
else:
depth0 = depth1 = torch.tensor([])
# read intrinsics of original size
K_0 = torch.tensor(self.scene_info['intrinsics'][idx0].copy(), dtype=torch.float).reshape(3, 3)
K_1 = torch.tensor(self.scene_info['intrinsics'][idx1].copy(), dtype=torch.float).reshape(3, 3)
# read and compute relative poses
T0 = self.scene_info['poses'][idx0]
T1 = self.scene_info['poses'][idx1]
T_0to1 = torch.tensor(np.matmul(T1, np.linalg.inv(T0)), dtype=torch.float)[:4, :4] # (4, 4)
T_1to0 = T_0to1.inverse()
data = {
'image0': image0, # (3, h, w)
'depth0': depth0, # (h, w)
'image1': image1,
'depth1': depth1,
'T_0to1': T_0to1, # (4, 4)
'T_1to0': T_1to0,
'K0': K_0, # (3, 3)
'K1': K_1,
'scale0': scale0, # [scale_w, scale_h]
'scale1': scale1,
'dataset_name': 'MegaDepth',
'scene_id': self.scene_id,
'pair_id': idx,
'pair_names': (self.scene_info['image_paths'][idx0], self.scene_info['image_paths'][idx1]),
}
# for LoFTR training
if mask0 is not None: # img_padding is True
if self.coarse_scale:
[ts_mask_0, ts_mask_1] = F.interpolate(torch.stack([mask0[0], mask1[0]], dim=0)[None].float(),
scale_factor=self.coarse_scale,
mode='nearest',
recompute_scale_factor=False)[0].bool()
data.update({'mask0': ts_mask_0, 'mask1': ts_mask_1})
return data
class MegaDepth_RGBD_Dataset(Dataset):
def __init__(self,
root_dir,
npz_path,
mode='train',
min_overlap_score=0.4,
img_resize=None,
df=None,
img_padding=False,
depth_padding=False,
augment_fn=None,
**kwargs):
"""
Manage one scene(npz_path) of MegaDepth dataset.
Args:
root_dir (str): megadepth root directory that has `phoenix`.
npz_path (str): {scene_id}.npz path. This contains image pair information of a scene.
mode (str): options are ['train', 'val', 'test']
min_overlap_score (float): how much a pair should have in common. In range of [0, 1]. Set to 0 when testing.
img_resize (int, optional): the longer edge of resized images. None for no resize. 640 is recommended.
This is useful during training with batches and testing with memory intensive algorithms.
df (int, optional): image size division factor. NOTE: this will change the final image size after img_resize.
img_padding (bool): If set to 'True', zero-pad the image to squared size. This is useful during training.
depth_padding (bool): If set to 'True', zero-pad depthmap to (2000, 2000). This is useful during training.
augment_fn (callable, optional): augments images with pre-defined visual effects.
"""
super().__init__()
self.root_dir = root_dir
self.mode = mode
self.scene_id = npz_path.split('.')[0]
# prepare scene_info and pair_info
if mode == 'test' and min_overlap_score != 0:
logger.warning("You are using `min_overlap_score`!=0 in test mode. Set to 0.")
min_overlap_score = 0
self.scene_info = np.load(npz_path, allow_pickle=True)
self.pair_infos = self.scene_info['pair_infos'].copy()
del self.scene_info['pair_infos']
self.pair_infos = [pair_info for pair_info in self.pair_infos if pair_info[1] > min_overlap_score]
# parameters for image resizing, padding and depthmap padding
if mode == 'train':
assert img_resize is not None and img_padding and depth_padding
self.img_resize = img_resize
self.df = df
self.img_padding = img_padding
self.depth_max_size = 2000 if depth_padding else None # the upperbound of depthmaps size in megadepth.
# for training LoFTR
self.augment_fn = augment_fn if mode == 'train' else None
self.coarse_scale = getattr(kwargs, 'coarse_scale', 0.125)
def __len__(self):
return len(self.pair_infos)
def __getitem__(self, idx):
(idx0, idx1), overlap_score, central_matches = self.pair_infos[idx]
# read grayscale image and mask. (1, h, w) and (h, w)
img_name0 = osp.join(self.root_dir, self.scene_info['image_paths'][idx0])
img_name1 = osp.join(self.root_dir, self.scene_info['image_paths'][idx1])
# TODO: Support augmentation & handle seeds for each worker correctly.
image0, mask0, scale0 = read_megadepth_rgb(
img_name0, self.img_resize, self.df, self.img_padding, None)
# np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
image1, mask1, scale1 = read_megadepth_rgb(
img_name1, self.img_resize, self.df, self.img_padding, None)
# np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
# read depth. shape: (h, w)
if self.mode in ['train', 'val','test']:
depth0 = read_megadepth_depth(
osp.join(self.root_dir, self.scene_info['depth_paths'][idx0]), pad_to=self.depth_max_size)
depth1 = read_megadepth_depth(
osp.join(self.root_dir, self.scene_info['depth_paths'][idx1]), pad_to=self.depth_max_size)
else:
depth0 = depth1 = torch.tensor([])
# read intrinsics of original size
K_0 = torch.tensor(self.scene_info['intrinsics'][idx0].copy(), dtype=torch.float).reshape(3, 3)
K_1 = torch.tensor(self.scene_info['intrinsics'][idx1].copy(), dtype=torch.float).reshape(3, 3)
# read and compute relative poses
T0 = self.scene_info['poses'][idx0]
T1 = self.scene_info['poses'][idx1]
T_0to1 = torch.tensor(np.matmul(T1, np.linalg.inv(T0)), dtype=torch.float)[:4, :4] # (4, 4)
T_1to0 = T_0to1.inverse()
resize_x,resize_y= image0.shape[-2:]
#print(osp.join(self.root_dir, self.scene_info['depth_paths'][idx1]))
resized_dpt0 = cv2.resize(np.float32(depth0), (resize_x, resize_y), interpolation=cv2.INTER_NEAREST)
resized_dpt1 = cv2.resize(np.float32(depth1), (resize_x, resize_y), interpolation=cv2.INTER_NEAREST)
resized_dpt0 =np.clip(resized_dpt0, 0, 3e8)
resized_dpt1 = np.clip(resized_dpt1, 0, 3e8)
max_ele = max(resized_dpt0.max(),resized_dpt1.max())
min_ele = min(resized_dpt0.min(),resized_dpt1.min())
resized_dpt0 = (resized_dpt0-min_ele)/(max_ele-min_ele)
resized_dpt1 = (resized_dpt1-min_ele)/(max_ele-min_ele)
#resized_dpt0 = np.clip(resized_dpt0, 0.6, 350)
#resized_dpt1 = np.clip(resized_dpt1, 0.6, 350)
#resized_dpt0 = np.log(resized_dpt0+1)
#resized_dpt1 = np.log(resized_dpt1+1)
resized_dpt0 = torch.from_numpy(resized_dpt0).float()
resized_dpt1 = torch.from_numpy(resized_dpt1).float()
image0 = torch.cat((image0, resized_dpt0[None, ...]/1.), dim = 0)
image1 = torch.cat((image1, resized_dpt1[None, ...]/1.), dim = 0)
data = {
'image0': image0, # (3, h, w)
'depth0': depth0, # (h, w)
'image1': image1,
'depth1': depth1,
'T_0to1': T_0to1, # (4, 4)
'T_1to0': T_1to0,
'K0': K_0, # (3, 3)
'K1': K_1,
'scale0': scale0, # [scale_w, scale_h]
'scale1': scale1,
'dataset_name': 'MegaDepth',
'scene_id': self.scene_id,
'pair_id': idx,
'pair_names': (self.scene_info['image_paths'][idx0], self.scene_info['image_paths'][idx1]),
}
# for LoFTR training
if mask0 is not None: # img_padding is True
if self.coarse_scale:
[ts_mask_0, ts_mask_1] = F.interpolate(torch.stack([mask0[0], mask1[0]], dim=0)[None].float(),
scale_factor=self.coarse_scale,
mode='nearest',
recompute_scale_factor=False)[0].bool()
data.update({'mask0': ts_mask_0, 'mask1': ts_mask_1})
return data | 12,808 | 46.6171 | 129 | py |
3DG-STFM | 3DG-STFM-master/src/datasets/scannet.py | from os import path as osp
from typing import Dict
from unicodedata import name
import numpy as np
import torch
import torch.utils as utils
from numpy.linalg import inv
from src.utils.dataset import (
read_scannet_rgb,
read_scannet_gray,
read_scannet_depth,
read_scannet_pose,
read_scannet_intrinsic
)
class ScanNet_RGB_Dataset(utils.data.Dataset):
def __init__(self,
root_dir,
npz_path,
intrinsic_path,
mode='train',
min_overlap_score=0.4,
augment_fn=None,
pose_dir=None,
**kwargs):
"""Manage one scene of ScanNet Dataset.
Args:
root_dir (str): ScanNet root directory that contains scene folders.
npz_path (str): {scene_id}.npz path. This contains image pair information of a scene.
intrinsic_path (str): path to depth-camera intrinsic file.
mode (str): options are ['train', 'val', 'test'].
augment_fn (callable, optional): augments images with pre-defined visual effects.
pose_dir (str): ScanNet root directory that contains all poses.
(we use a separate (optional) pose_dir since we store images and poses separately.)
"""
super().__init__()
self.root_dir = root_dir
self.pose_dir = pose_dir if pose_dir is not None else root_dir
self.mode = mode
# prepare data_names, intrinsics and extrinsics(T)
with np.load(npz_path) as data:
self.data_names = data['name']
if 'score' in data.keys() and mode not in ['val' or 'test']:
kept_mask = data['score'] > min_overlap_score
self.data_names = self.data_names[kept_mask]
self.intrinsics = dict(np.load(intrinsic_path))
# for training LoFTR
self.augment_fn = augment_fn if mode == 'train' else None
def __len__(self):
return len(self.data_names)
def _read_abs_pose(self, scene_name, name):
pth = osp.join(self.pose_dir,
scene_name,
'pose', f'{name}.txt')
return read_scannet_pose(pth)
def _compute_rel_pose(self, scene_name, name0, name1):
pose0 = self._read_abs_pose(scene_name, name0)
pose1 = self._read_abs_pose(scene_name, name1)
return np.matmul(pose1, inv(pose0)) # (4, 4)
def __getitem__(self, idx):
data_name = self.data_names[idx]
scene_name, scene_sub_name, stem_name_0, stem_name_1 = data_name
scene_name = f'scene{scene_name:04d}_{scene_sub_name:02d}'
# read the grayscale image which will be resized to (1, 480, 640)
img_name0 = osp.join(self.root_dir, scene_name, 'color', f'{stem_name_0}.jpg')
img_name1 = osp.join(self.root_dir, scene_name, 'color', f'{stem_name_1}.jpg')
# img_name0 = osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png')#depth image as color for inference--Runyu
# img_name1 = osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png')#depth image as color for inference--Runyu
# TODO: Support augmentation & handle seeds for each worker correctly.
#print(img_name0,img_name1)
image0 = read_scannet_rgb(img_name0, resize=(640, 480), augment_fn=None)
# augment_fn=np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
image1 = read_scannet_rgb(img_name1, resize=(640, 480), augment_fn=None)
# augment_fn=np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
# read the depthmap which is stored as (480, 640)
if self.mode in ['train', 'val', 'test']: # original not include 'test' mode
depth0 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png'))
depth1 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png'))
else:
depth0 = depth1 = torch.tensor([])
image0 = image0.permute(0, 3, 1, 2)
image1 = image1.permute(0, 3, 1, 2)
# depth0 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png'))
# depth1 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png'))
# image0 = depth0/10.
# image0 = image0[None]
# image1 = depth1/10.
# image1 = image1[None]
# read the intrinsic of depthmap
K_0 = K_1 = torch.tensor(self.intrinsics[scene_name].copy(), dtype=torch.float).reshape(3, 3)
# read and compute relative poses
T_0to1 = torch.tensor(self._compute_rel_pose(scene_name, stem_name_0, stem_name_1),
dtype=torch.float32)
T_1to0 = T_0to1.inverse()
#image0 = torch.cat((image0, depth0[None, ...]/10.), dim = 0)
#image1 = torch.cat((image1, depth1[None, ...]/10.), dim = 0)
data = {
'image0': image0[0], # (2, h, w)
'depth0': depth0, # (h, w)
'image1': image1[0],
'depth1': depth1,
'T_0to1': T_0to1, # (4, 4)
'T_1to0': T_1to0,
'K0': K_0, # (3, 3)
'K1': K_1,
'dataset_name': 'ScanNet',
'scene_id': scene_name,
'pair_id': idx,
'pair_names': (osp.join(scene_name, 'color', f'{stem_name_0}.jpg'),
osp.join(scene_name, 'color', f'{stem_name_1}.jpg'))
}
return data
class ScanNet_RGBD_Dataset(utils.data.Dataset):
def __init__(self,
root_dir,
npz_path,
intrinsic_path,
mode='train',
min_overlap_score=0.4,
augment_fn=None,
pose_dir=None,
**kwargs):
"""Manage one scene of ScanNet Dataset.
Args:
root_dir (str): ScanNet root directory that contains scene folders.
npz_path (str): {scene_id}.npz path. This contains image pair information of a scene.
intrinsic_path (str): path to depth-camera intrinsic file.
mode (str): options are ['train', 'val', 'test'].
augment_fn (callable, optional): augments images with pre-defined visual effects.
pose_dir (str): ScanNet root directory that contains all poses.
(we use a separate (optional) pose_dir since we store images and poses separately.)
"""
super().__init__()
self.root_dir = root_dir
self.pose_dir = pose_dir if pose_dir is not None else root_dir
self.mode = mode
# prepare data_names, intrinsics and extrinsics(T)
with np.load(npz_path) as data:
self.data_names = data['name']
if 'score' in data.keys() and mode not in ['val' or 'test']:
kept_mask = data['score'] > min_overlap_score
self.data_names = self.data_names[kept_mask]
self.intrinsics = dict(np.load(intrinsic_path))
# for training LoFTR
self.augment_fn = augment_fn if mode == 'train' else None
def __len__(self):
return len(self.data_names)
def _read_abs_pose(self, scene_name, name):
pth = osp.join(self.pose_dir,
scene_name,
'pose', f'{name}.txt')
return read_scannet_pose(pth)
def _compute_rel_pose(self, scene_name, name0, name1):
pose0 = self._read_abs_pose(scene_name, name0)
pose1 = self._read_abs_pose(scene_name, name1)
return np.matmul(pose1, inv(pose0)) # (4, 4)
def __getitem__(self, idx):
data_name = self.data_names[idx]
scene_name, scene_sub_name, stem_name_0, stem_name_1 = data_name
scene_name = f'scene{scene_name:04d}_{scene_sub_name:02d}'
# read the grayscale image which will be resized to (1, 480, 640)
img_name0 = osp.join(self.root_dir, scene_name, 'color', f'{stem_name_0}.jpg')
img_name1 = osp.join(self.root_dir, scene_name, 'color', f'{stem_name_1}.jpg')
# img_name0 = osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png')#depth image as color for inference--Runyu
# img_name1 = osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png')#depth image as color for inference--Runyu
# TODO: Support augmentation & handle seeds for each worker correctly.
image0 = read_scannet_rgb(img_name0, resize=(640, 480), augment_fn=None)
# augment_fn=np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
image1 = read_scannet_rgb(img_name1, resize=(640, 480), augment_fn=None)
# augment_fn=np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
gray0 = read_scannet_gray(img_name0, resize=(640, 480), augment_fn=None)
# augment_fn=np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
gray1 = read_scannet_gray(img_name1, resize=(640, 480), augment_fn=None)
# read the depthmap which is stored as (480, 640)
if self.mode in ['train', 'val', 'test']: # original not include 'test' mode
depth0 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png'))##changed
depth1 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png'))##changed
else:
depth0 = depth1 = torch.tensor([])
image0 = image0.permute(0, 3, 1, 2)
image0 = image0[0]
image1 = image1.permute(0, 3, 1, 2)
image1 = image1[0]
# depth0 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png'))
# depth1 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png'))
# image0 = depth0/10.
# image0 = image0[None]
# image1 = depth1/10.
# image1 = image1[None]
# read the intrinsic of depthmap
K_0 = K_1 = torch.tensor(self.intrinsics[scene_name].copy(), dtype=torch.float).reshape(3, 3)
# read and compute relative poses
T_0to1 = torch.tensor(self._compute_rel_pose(scene_name, stem_name_0, stem_name_1),
dtype=torch.float32)
T_1to0 = T_0to1.inverse()
#depth0 = depth0*10000./255.##changed
#depth1 = depth1*10000./255.##changed
image0 = torch.cat((image0, depth0[None, ...]/10.), dim = 0)
image1 = torch.cat((image1, depth1[None, ...]/10.), dim = 0)
data = {
'image0': image0, # (4, h, w)
'depth0': depth0, # (h, w)
'gray0':gray0,
'image1': image1,
'depth1': depth1,
'gray1': gray1,
'T_0to1': T_0to1, # (4, 4)
'T_1to0': T_1to0,
'K0': K_0, # (3, 3)
'K1': K_1,
'dataset_name': 'ScanNet',
'scene_id': scene_name,
'pair_id': idx,
'pair_names': (osp.join(scene_name, 'color', f'{stem_name_0}.jpg'),
osp.join(scene_name, 'color', f'{stem_name_1}.jpg'))
}
return data | 11,203 | 43.995984 | 130 | py |
3DG-STFM | 3DG-STFM-master/src/lightning/lightning_loftr.py |
from collections import defaultdict
import pprint
from loguru import logger
from pathlib import Path
import torch
import numpy as np
import pytorch_lightning as pl
from matplotlib import pyplot as plt
from src.loftr import LoFTR_RGB,LoFTR_RGBD,LoFTR_RGBD_teacher,LoFTR_RGB_student
from src.loftr.utils.supervision import compute_supervision_coarse, compute_supervision_fine
from src.losses.loftr_loss import LoFTRLoss,LoFTRLoss_t_s
from src.optimizers import build_optimizer, build_scheduler
from src.utils.metrics import (
compute_symmetrical_epipolar_errors,
compute_pose_errors,
compute_homo_errors,
aggregate_metrics_homo,
aggregate_metrics,
filter_depth_inconsist_point,
filter_unsampled_point
)
from src.utils.plotting import make_matching_figures
from src.utils.comm import gather, all_gather
from src.utils.misc import lower_config, flattenList
from src.utils.profiler import PassThroughProfiler
import torch.nn as nn
class PL_LoFTR_RGB(pl.LightningModule):
def __init__(self, config, pretrained_ckpt=None, profiler=None, dump_dir=None):
"""
TODO:
- use the new version of PL logging API.
"""
super().__init__()
# Misc
self.config = config # full config
_config = lower_config(self.config)
self.loftr_cfg = lower_config(_config['loftr'])
self.profiler = profiler or PassThroughProfiler()
self.n_vals_plot = max(config.TRAINER.N_VAL_PAIRS_TO_PLOT // config.TRAINER.WORLD_SIZE, 1)
# Matcher: LoFTR
self.matcher = LoFTR_RGB(config=_config['loftr'])
self.loss = LoFTRLoss(_config)
# Pretrained weights
if pretrained_ckpt:
#self.matcher.load_state_dict(torch.load(pretrained_ckpt, map_location='cpu')['state_dict'])
sd = torch.load(pretrained_ckpt, map_location='cpu')['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in sd.items():
name = k[8:] # remove `matcher.`
new_state_dict[name] = v
self.matcher.load_state_dict(new_state_dict, strict=False)
logger.info(f"Load \'{pretrained_ckpt}\' as pretrained checkpoint")
# Testing
self.dump_dir = dump_dir
def configure_optimizers(self):
# FIXME: The scheduler did not work properly when `--resume_from_checkpoint`
optimizer = build_optimizer(self, self.config)
scheduler = build_scheduler(self.config, optimizer)
return [optimizer], [scheduler]
def optimizer_step(
self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# learning rate warm up
warmup_step = self.config.TRAINER.WARMUP_STEP
if self.trainer.global_step < warmup_step:
if self.config.TRAINER.WARMUP_TYPE == 'linear':
base_lr = self.config.TRAINER.WARMUP_RATIO * self.config.TRAINER.TRUE_LR
lr = base_lr + \
(self.trainer.global_step / self.config.TRAINER.WARMUP_STEP) * \
abs(self.config.TRAINER.TRUE_LR - base_lr)
for pg in optimizer.param_groups:
pg['lr'] = lr
elif self.config.TRAINER.WARMUP_TYPE == 'constant':
pass
else:
raise ValueError(f'Unknown lr warm-up strategy: {self.config.TRAINER.WARMUP_TYPE}')
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
def _trainval_inference(self, batch):
with self.profiler.profile("Compute coarse supervision"):
compute_supervision_coarse(batch, self.config)
with self.profiler.profile("LoFTR"):
self.matcher(batch)
with self.profiler.profile("Compute fine supervision"):
compute_supervision_fine(batch, self.config)
with self.profiler.profile("Compute losses"):
self.loss(batch)
def _compute_metrics(self, batch):
with self.profiler.profile("Copmute metrics"):
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'][batch['m_bids'] == b].cpu().numpy() for b in range(bs)],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom_sample(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def training_step(self, batch, batch_idx):
self._trainval_inference(batch)
# logging
if self.trainer.global_rank == 0 and self.global_step % self.trainer.log_every_n_steps == 0:
# scalars
for k, v in batch['loss_scalars'].items():
self.logger.experiment.add_scalar(f'train/{k}', v, self.global_step)
# net-params
if self.config.LOFTR.MATCH_COARSE.MATCH_TYPE == 'sinkhorn':
self.logger.experiment.add_scalar(
f'skh_bin_score', self.matcher.coarse_matching.bin_score.clone().detach().cpu().data,
self.global_step)
# figures
if self.config.TRAINER.ENABLE_PLOTTING:
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
figures = make_matching_figures(batch, self.config, self.config.TRAINER.PLOT_MODE)
for k, v in figures.items():
self.logger.experiment.add_figure(f'train_match/{k}', v, self.global_step)
return {'loss': batch['loss']}
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
if self.trainer.global_rank == 0:
self.logger.experiment.add_scalar(
'train/avg_loss_on_epoch', avg_loss,
global_step=self.current_epoch)
def validation_step(self, batch, batch_idx):
self._trainval_inference(batch)
ret_dict, _ = self._compute_metrics(batch)
val_plot_interval = max(self.trainer.num_val_batches[0] // self.n_vals_plot, 1)
figures = {self.config.TRAINER.PLOT_MODE: []}
if batch_idx % val_plot_interval == 0:
figures = make_matching_figures(batch, self.config, mode=self.config.TRAINER.PLOT_MODE)
return {
**ret_dict,
'loss_scalars': batch['loss_scalars'],
'figures': figures,
}
def validation_epoch_end(self, outputs):
# handle multiple validation sets
multi_outputs = [outputs] if not isinstance(outputs[0], (list, tuple)) else outputs
multi_val_metrics = defaultdict(list)
for valset_idx, outputs in enumerate(multi_outputs):
# since pl performs sanity_check at the very begining of the training
cur_epoch = self.trainer.current_epoch
if not self.trainer.resume_from_checkpoint and self.trainer.running_sanity_check:
cur_epoch = -1
# 1. loss_scalars: dict of list, on cpu
_loss_scalars = [o['loss_scalars'] for o in outputs]
loss_scalars = {k: flattenList(all_gather([_ls[k] for _ls in _loss_scalars])) for k in _loss_scalars[0]}
# 2. val metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(all_gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# NOTE: all ranks need to `aggregate_merics`, but only log at rank-0
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
for thr in [5, 10, 20]:
multi_val_metrics[f'auc@{thr}'].append(val_metrics_4tb[f'auc@{thr}'])
# 3. figures
_figures = [o['figures'] for o in outputs]
figures = {k: flattenList(gather(flattenList([_me[k] for _me in _figures]))) for k in _figures[0]}
# tensorboard records only on rank 0
if self.trainer.global_rank == 0:
for k, v in loss_scalars.items():
mean_v = torch.stack(v).mean()
self.logger.experiment.add_scalar(f'val_{valset_idx}/avg_{k}', mean_v, global_step=cur_epoch)
for k, v in val_metrics_4tb.items():
self.logger.experiment.add_scalar(f"metrics_{valset_idx}/{k}", v, global_step=cur_epoch)
for k, v in figures.items():
if self.trainer.global_rank == 0:
for plot_idx, fig in enumerate(v):
self.logger.experiment.add_figure(
f'val_match_{valset_idx}/{k}/pair-{plot_idx}', fig, cur_epoch, close=True)
plt.close('all')
for thr in [5, 10, 20]:
# log on all ranks for ModelCheckpoint callback to work properly
self.log(f'auc@{thr}', torch.tensor(np.mean(multi_val_metrics[f'auc@{thr}']))) # ckpt monitors on this
def test_step(self, batch, batch_idx):
with self.profiler.profile("LoFTR"):
self.matcher(batch)
setting = 'Normal'
if setting == 'Normal':
ret_dict, rel_pair_names = self._compute_metrics(batch)
elif setting == 'depth_check':
# print("Using the depth information to remove the matching outliers")
ret_dict, rel_pair_names = self._compute_metrics_custom(batch)
elif setting == 'depth_sample':
ret_dict, rel_pair_names = self._compute_metrics_custom_sample(batch)
with self.profiler.profile("dump_results"):
if self.dump_dir is not None:
# dump results for further analysis
keys_to_save = {'mkpts0_f', 'mkpts1_f', 'mconf', 'epi_errs'}
pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].shape[0]
dumps = []
for b_id in range(bs):
item = {}
mask = batch['m_bids'] == b_id
item['pair_names'] = pair_names[b_id]
item['identifier'] = '#'.join(rel_pair_names[b_id])
for key in keys_to_save:
if setting == 'depth_check':
item[key] = batch[key][:].cpu().numpy()
elif setting == 'Normal':
item[key] = batch[key][mask].cpu().numpy()
elif setting == 'depth_sample':
print('here')
for key in ['R_errs', 't_errs', 'inliers']:
item[key] = batch[key][b_id]
dumps.append(item)
ret_dict['dumps'] = dumps
return ret_dict
def test_epoch_end(self, outputs):
# metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# [{key: [{...}, *#bs]}, *#batch]
if self.dump_dir is not None:
Path(self.dump_dir).mkdir(parents=True, exist_ok=True)
_dumps = flattenList([o['dumps'] for o in outputs]) # [{...}, #bs*#batch]
dumps = flattenList(gather(_dumps)) # [{...}, #proc*#bs*#batch]
logger.info(f'Prediction and evaluation results will be saved to: {self.dump_dir}')
if self.trainer.global_rank == 0:
print(self.profiler.summary())
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
logger.info('\n' + pprint.pformat(val_metrics_4tb))
if self.dump_dir is not None:
np.save(Path(self.dump_dir) / 'LoFTR_pred_eval_our', dumps)
class PL_LoFTR_RGBD(pl.LightningModule):
def __init__(self, config, pretrained_ckpt=None, profiler=None, dump_dir=None):
"""
TODO:
- use the new version of PL logging API.
"""
super().__init__()
# Misc
self.config = config # full config
_config = lower_config(self.config)
self.loftr_cfg = lower_config(_config['loftr'])
self.profiler = profiler or PassThroughProfiler()
self.n_vals_plot = max(config.TRAINER.N_VAL_PAIRS_TO_PLOT // config.TRAINER.WORLD_SIZE, 1)
# Matcher: LoFTR
self.matcher = LoFTR_RGBD(config=_config['loftr'])
self.loss = LoFTRLoss(_config)
# Pretrained weights
if pretrained_ckpt:
#self.matcher.load_state_dict(torch.load(pretrained_ckpt, map_location='cpu')['state_dict'])
sd = torch.load(pretrained_ckpt, map_location='cpu')['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in sd.items():
name = k[8:] # remove `matcher.`
new_state_dict[name] = v
self.matcher.load_state_dict(new_state_dict, strict=False)
logger.info(f"Load \'{pretrained_ckpt}\' as pretrained checkpoint")
# Testing
self.dump_dir = dump_dir
def configure_optimizers(self):
# FIXME: The scheduler did not work properly when `--resume_from_checkpoint`
optimizer = build_optimizer(self, self.config)
scheduler = build_scheduler(self.config, optimizer)
return [optimizer], [scheduler]
def optimizer_step(
self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# learning rate warm up
warmup_step = self.config.TRAINER.WARMUP_STEP
#print(self.trainer.global_step, self.config.TRAINER.WARMUP_STEP)
if self.trainer.global_step < warmup_step:
if self.config.TRAINER.WARMUP_TYPE == 'linear':
base_lr = self.config.TRAINER.WARMUP_RATIO * self.config.TRAINER.TRUE_LR
lr = base_lr + \
(self.trainer.global_step / self.config.TRAINER.WARMUP_STEP) * \
abs(self.config.TRAINER.TRUE_LR - base_lr)
for pg in optimizer.param_groups:
pg['lr'] = lr
elif self.config.TRAINER.WARMUP_TYPE == 'constant':
pass
else:
raise ValueError(f'Unknown lr warm-up strategy: {self.config.TRAINER.WARMUP_TYPE}')
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
def _trainval_inference(self, batch):
with self.profiler.profile("Compute coarse supervision"):
compute_supervision_coarse(batch, self.config)
with self.profiler.profile("LoFTR"):
self.matcher(batch)
with self.profiler.profile("Compute fine supervision"):
compute_supervision_fine(batch, self.config)
with self.profiler.profile("Compute losses"):
self.loss(batch)
def _compute_metrics(self, batch):
with self.profiler.profile("Copmute metrics"):
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'][batch['m_bids'] == b].cpu().numpy() for b in range(bs)],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom_sample(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def training_step(self, batch, batch_idx):
self._trainval_inference(batch)
# logging
if self.trainer.global_rank == 0 and self.global_step % self.trainer.log_every_n_steps == 0:
# scalars
for k, v in batch['loss_scalars'].items():
self.logger.experiment.add_scalar(f'train/{k}', v, self.global_step)
# net-params
if self.config.LOFTR.MATCH_COARSE.MATCH_TYPE == 'sinkhorn':
self.logger.experiment.add_scalar(
f'skh_bin_score', self.matcher.coarse_matching.bin_score.clone().detach().cpu().data,
self.global_step)
# figures
if self.config.TRAINER.ENABLE_PLOTTING:
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
figures = make_matching_figures(batch, self.config, self.config.TRAINER.PLOT_MODE)
for k, v in figures.items():
self.logger.experiment.add_figure(f'train_match/{k}', v, self.global_step)
return {'loss': batch['loss']}
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
if self.trainer.global_rank == 0:
self.logger.experiment.add_scalar(
'train/avg_loss_on_epoch', avg_loss,
global_step=self.current_epoch)
def validation_step(self, batch, batch_idx):
self._trainval_inference(batch)
ret_dict, _ = self._compute_metrics(batch)
val_plot_interval = max(self.trainer.num_val_batches[0] // self.n_vals_plot, 1)
figures = {self.config.TRAINER.PLOT_MODE: []}
if batch_idx % val_plot_interval == 0:
figures = make_matching_figures(batch, self.config, mode=self.config.TRAINER.PLOT_MODE)
return {
**ret_dict,
'loss_scalars': batch['loss_scalars'],
'figures': figures,
}
def validation_epoch_end(self, outputs):
# handle multiple validation sets
multi_outputs = [outputs] if not isinstance(outputs[0], (list, tuple)) else outputs
multi_val_metrics = defaultdict(list)
for valset_idx, outputs in enumerate(multi_outputs):
# since pl performs sanity_check at the very begining of the training
cur_epoch = self.trainer.current_epoch
if not self.trainer.resume_from_checkpoint and self.trainer.running_sanity_check:
cur_epoch = -1
# 1. loss_scalars: dict of list, on cpu
_loss_scalars = [o['loss_scalars'] for o in outputs]
loss_scalars = {k: flattenList(all_gather([_ls[k] for _ls in _loss_scalars])) for k in _loss_scalars[0]}
# 2. val metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(all_gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# NOTE: all ranks need to `aggregate_merics`, but only log at rank-0
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
for thr in [5, 10, 20]:
multi_val_metrics[f'auc@{thr}'].append(val_metrics_4tb[f'auc@{thr}'])
# 3. figures
_figures = [o['figures'] for o in outputs]
figures = {k: flattenList(gather(flattenList([_me[k] for _me in _figures]))) for k in _figures[0]}
# tensorboard records only on rank 0
if self.trainer.global_rank == 0:
for k, v in loss_scalars.items():
mean_v = torch.stack(v).mean()
self.logger.experiment.add_scalar(f'val_{valset_idx}/avg_{k}', mean_v, global_step=cur_epoch)
for k, v in val_metrics_4tb.items():
self.logger.experiment.add_scalar(f"metrics_{valset_idx}/{k}", v, global_step=cur_epoch)
for k, v in figures.items():
if self.trainer.global_rank == 0:
for plot_idx, fig in enumerate(v):
self.logger.experiment.add_figure(
f'val_match_{valset_idx}/{k}/pair-{plot_idx}', fig, cur_epoch, close=True)
plt.close('all')
for thr in [5, 10, 20]:
# log on all ranks for ModelCheckpoint callback to work properly
self.log(f'auc@{thr}', torch.tensor(np.mean(multi_val_metrics[f'auc@{thr}']))) # ckpt monitors on this
def test_step(self, batch, batch_idx):
with self.profiler.profile("LoFTR"):
self.matcher(batch)
setting = 'Normal'
if setting == 'Normal':
ret_dict, rel_pair_names = self._compute_metrics(batch)
elif setting == 'depth_check':
# print("Using the depth information to remove the matching outliers")
ret_dict, rel_pair_names = self._compute_metrics_custom(batch)
elif setting == 'depth_sample':
ret_dict, rel_pair_names = self._compute_metrics_custom_sample(batch)
with self.profiler.profile("dump_results"):
if self.dump_dir is not None:
# dump results for further analysis
keys_to_save = {'mkpts0_f', 'mkpts1_f', 'mconf', 'epi_errs'}
pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].shape[0]
dumps = []
for b_id in range(bs):
item = {}
mask = batch['m_bids'] == b_id
item['pair_names'] = pair_names[b_id]
item['identifier'] = '#'.join(rel_pair_names[b_id])
for key in keys_to_save:
if setting == 'depth_check':
item[key] = batch[key][:].cpu().numpy()
elif setting == 'Normal':
item[key] = batch[key][mask].cpu().numpy()
elif setting == 'depth_sample':
print('here')
for key in ['R_errs', 't_errs', 'inliers']:
item[key] = batch[key][b_id]
dumps.append(item)
ret_dict['dumps'] = dumps
return ret_dict
def test_epoch_end(self, outputs):
# metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# [{key: [{...}, *#bs]}, *#batch]
if self.dump_dir is not None:
Path(self.dump_dir).mkdir(parents=True, exist_ok=True)
_dumps = flattenList([o['dumps'] for o in outputs]) # [{...}, #bs*#batch]
dumps = flattenList(gather(_dumps)) # [{...}, #proc*#bs*#batch]
logger.info(f'Prediction and evaluation results will be saved to: {self.dump_dir}')
if self.trainer.global_rank == 0:
print(self.profiler.summary())
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
logger.info('\n' + pprint.pformat(val_metrics_4tb))
if self.dump_dir is not None:
np.save(Path(self.dump_dir) / 'LoFTR_pred_eval', dumps)
class PL_LoFTR_RGB_teacher_student(pl.LightningModule):
def __init__(self, config, pretrained_ckpt=None, profiler=None, dump_dir=None):
"""
TODO:
- use the new version of PL logging API.
"""
super().__init__()
# Misc
self.config = config # full config
_config = lower_config(self.config)
self.loftr_cfg = lower_config(_config['loftr'])
self.profiler = profiler or PassThroughProfiler()
self.n_vals_plot = max(config.TRAINER.N_VAL_PAIRS_TO_PLOT // config.TRAINER.WORLD_SIZE, 1)
# Matcher: LoFTR
self.matcher = LoFTR_RGB_student(config=_config['loftr'])
self.loss = LoFTRLoss_t_s(_config)
#pretrained_rgb = "./logs/tb_logs/4gpu_mini_rgb_rgbd/rgb/checkpoints/epoch=28-auc@5=0.151-auc@10=0.313-auc@20=0.484.ckpt"
#sd = torch.load(pretrained_rgb, map_location='cpu')['state_dict']
#from collections import OrderedDict
#new_state_dict = OrderedDict()
#for k, v in sd.items():
# name = k[8:] # remove `matcher.`
# new_state_dict[name] = v
#self.matcher.load_state_dict(new_state_dict, strict=False)
# Pretrained weights
if pretrained_ckpt:
self.matcher.load_state_dict(torch.load(pretrained_ckpt, map_location='cpu')['state_dict'])
logger.info(f"Load \'{pretrained_ckpt}\' as pretrained checkpoint")
# Testing
self.dump_dir = dump_dir
def configure_optimizers(self):
# FIXME: The scheduler did not work properly when `--resume_from_checkpoint`
optimizer = build_optimizer(self, self.config)
scheduler = build_scheduler(self.config, optimizer)
return [optimizer], [scheduler]
def optimizer_step(
self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# learning rate warm up
warmup_step = self.config.TRAINER.WARMUP_STEP
#print(self.trainer.global_step,self.config.TRAINER.WARMUP_STEP)
if self.trainer.global_step < warmup_step:
if self.config.TRAINER.WARMUP_TYPE == 'linear':
base_lr = self.config.TRAINER.WARMUP_RATIO * self.config.TRAINER.TRUE_LR
lr = base_lr + \
(self.trainer.global_step / self.config.TRAINER.WARMUP_STEP) * \
abs(self.config.TRAINER.TRUE_LR - base_lr)
for pg in optimizer.param_groups:
pg['lr'] = lr
elif self.config.TRAINER.WARMUP_TYPE == 'constant':
pass
else:
raise ValueError(f'Unknown lr warm-up strategy: {self.config.TRAINER.WARMUP_TYPE}')
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
def _trainval_inference(self, batch):
with self.profiler.profile("Compute coarse supervision"):
compute_supervision_coarse(batch, self.config)
with self.profiler.profile("LoFTR"):
self.matcher(batch)
with self.profiler.profile("Compute fine supervision"):
compute_supervision_fine(batch, self.config)
with self.profiler.profile("Compute losses"):
self.loss(batch)
def _compute_metrics(self, batch):
with self.profiler.profile("Copmute metrics"):
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'][batch['m_bids'] == b].cpu().numpy() for b in range(bs)],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom_sample(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def training_step(self, batch, batch_idx):
self._trainval_inference(batch)
# logging
if self.trainer.global_rank == 0 and self.global_step % self.trainer.log_every_n_steps == 0:
# scalars
for k, v in batch['loss_scalars'].items():
self.logger.experiment.add_scalar(f'train/{k}', v, self.global_step)
# net-params
if self.config.LOFTR.MATCH_COARSE.MATCH_TYPE == 'sinkhorn':
self.logger.experiment.add_scalar(
f'skh_bin_score', self.matcher.coarse_matching.bin_score.clone().detach().cpu().data,
self.global_step)
# figures
if self.config.TRAINER.ENABLE_PLOTTING:
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
figures = make_matching_figures(batch, self.config, self.config.TRAINER.PLOT_MODE)
for k, v in figures.items():
self.logger.experiment.add_figure(f'train_match/{k}', v, self.global_step)
return {'loss': batch['loss']}
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
if self.trainer.global_rank == 0:
self.logger.experiment.add_scalar(
'train/avg_loss_on_epoch', avg_loss,
global_step=self.current_epoch)
def validation_step(self, batch, batch_idx):
self._trainval_inference(batch)
ret_dict, _ = self._compute_metrics(batch)
val_plot_interval = max(self.trainer.num_val_batches[0] // self.n_vals_plot, 1)
figures = {self.config.TRAINER.PLOT_MODE: []}
if batch_idx % val_plot_interval == 0:
figures = make_matching_figures(batch, self.config, mode=self.config.TRAINER.PLOT_MODE)
return {
**ret_dict,
'loss_scalars': batch['loss_scalars'],
'figures': figures,
}
def validation_epoch_end(self, outputs):
# handle multiple validation sets
multi_outputs = [outputs] if not isinstance(outputs[0], (list, tuple)) else outputs
multi_val_metrics = defaultdict(list)
for valset_idx, outputs in enumerate(multi_outputs):
# since pl performs sanity_check at the very begining of the training
cur_epoch = self.trainer.current_epoch
if not self.trainer.resume_from_checkpoint and self.trainer.running_sanity_check:
cur_epoch = -1
# 1. loss_scalars: dict of list, on cpu
_loss_scalars = [o['loss_scalars'] for o in outputs]
loss_scalars = {k: flattenList(all_gather([_ls[k] for _ls in _loss_scalars])) for k in _loss_scalars[0]}
# 2. val metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(all_gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# NOTE: all ranks need to `aggregate_merics`, but only log at rank-0
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
for thr in [5, 10, 20]:
multi_val_metrics[f'auc@{thr}'].append(val_metrics_4tb[f'auc@{thr}'])
# 3. figures
_figures = [o['figures'] for o in outputs]
figures = {k: flattenList(gather(flattenList([_me[k] for _me in _figures]))) for k in _figures[0]}
# tensorboard records only on rank 0
if self.trainer.global_rank == 0:
for k, v in loss_scalars.items():
mean_v = torch.stack(v).mean()
self.logger.experiment.add_scalar(f'val_{valset_idx}/avg_{k}', mean_v, global_step=cur_epoch)
for k, v in val_metrics_4tb.items():
self.logger.experiment.add_scalar(f"metrics_{valset_idx}/{k}", v, global_step=cur_epoch)
for k, v in figures.items():
if self.trainer.global_rank == 0:
for plot_idx, fig in enumerate(v):
self.logger.experiment.add_figure(
f'val_match_{valset_idx}/{k}/pair-{plot_idx}', fig, cur_epoch, close=True)
plt.close('all')
for thr in [5, 10, 20]:
# log on all ranks for ModelCheckpoint callback to work properly
self.log(f'auc@{thr}', torch.tensor(np.mean(multi_val_metrics[f'auc@{thr}']))) # ckpt monitors on this
def test_step(self, batch, batch_idx):
with self.profiler.profile("LoFTR"):
self.matcher(batch)
setting = 'Normal'
if setting == 'Normal':
ret_dict, rel_pair_names = self._compute_metrics(batch)
elif setting == 'depth_check':
# print("Using the depth information to remove the matching outliers")
ret_dict, rel_pair_names = self._compute_metrics_custom(batch)
elif setting == 'depth_sample':
ret_dict, rel_pair_names = self._compute_metrics_custom_sample(batch)
with self.profiler.profile("dump_results"):
if self.dump_dir is not None:
# dump results for further analysis
keys_to_save = {'mkpts0_f', 'mkpts1_f', 'mconf', 'epi_errs'}
pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].shape[0]
dumps = []
for b_id in range(bs):
item = {}
mask = batch['m_bids'] == b_id
item['pair_names'] = pair_names[b_id]
item['identifier'] = '#'.join(rel_pair_names[b_id])
for key in keys_to_save:
if setting == 'depth_check':
item[key] = batch[key][:].cpu().numpy()
elif setting == 'Normal':
item[key] = batch[key][mask].cpu().numpy()
elif setting == 'depth_sample':
print('here')
for key in ['R_errs', 't_errs', 'inliers']:
item[key] = batch[key][b_id]
dumps.append(item)
ret_dict['dumps'] = dumps
return ret_dict
def test_epoch_end(self, outputs):
# metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# [{key: [{...}, *#bs]}, *#batch]
if self.dump_dir is not None:
Path(self.dump_dir).mkdir(parents=True, exist_ok=True)
_dumps = flattenList([o['dumps'] for o in outputs]) # [{...}, #bs*#batch]
dumps = flattenList(gather(_dumps)) # [{...}, #proc*#bs*#batch]
logger.info(f'Prediction and evaluation results will be saved to: {self.dump_dir}')
if self.trainer.global_rank == 0:
print(self.profiler.summary())
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
logger.info('\n' + pprint.pformat(val_metrics_4tb))
if self.dump_dir is not None:
np.save(Path(self.dump_dir) / 'LoFTR_pred_eval', dumps)
| 40,636 | 45.021518 | 129 | py |
3DG-STFM | 3DG-STFM-master/src/lightning/data.py | import os
import math
from collections import abc
from loguru import logger
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
from os import path as osp
from pathlib import Path
from joblib import Parallel, delayed
import pytorch_lightning as pl
from torch import distributed as dist
from torch.utils.data import (
Dataset,
DataLoader,
ConcatDataset,
DistributedSampler,
RandomSampler,
dataloader
)
from src.utils.augment import build_augmentor
from src.utils.dataloader import get_local_split
from src.utils.misc import tqdm_joblib
from src.utils import comm
from src.datasets.megadepth import MegaDepth_RGB_Dataset,MegaDepth_RGBD_Dataset
from src.datasets.scannet import ScanNet_RGB_Dataset,ScanNet_RGBD_Dataset
from src.datasets.sampler import RandomConcatSampler
class RGBDataModule(pl.LightningDataModule):
"""
For distributed training, each training process is assgined
only a part of the training scenes to reduce memory overhead.
"""
def __init__(self, args, config):
super().__init__()
# 1. data config
# Train and Val should from the same data source
self.trainval_data_source = config.DATASET.TRAINVAL_DATA_SOURCE
self.test_data_source = config.DATASET.TEST_DATA_SOURCE
# training and validating
self.train_data_root = config.DATASET.TRAIN_DATA_ROOT
self.train_pose_root = config.DATASET.TRAIN_POSE_ROOT # (optional)
self.train_npz_root = config.DATASET.TRAIN_NPZ_ROOT
self.train_list_path = config.DATASET.TRAIN_LIST_PATH
self.train_intrinsic_path = config.DATASET.TRAIN_INTRINSIC_PATH
self.val_data_root = config.DATASET.VAL_DATA_ROOT
self.val_pose_root = config.DATASET.VAL_POSE_ROOT # (optional)
self.val_npz_root = config.DATASET.VAL_NPZ_ROOT
self.val_list_path = config.DATASET.VAL_LIST_PATH
self.val_intrinsic_path = config.DATASET.VAL_INTRINSIC_PATH
# testing
self.test_data_root = config.DATASET.TEST_DATA_ROOT
self.test_pose_root = config.DATASET.TEST_POSE_ROOT # (optional)
self.test_npz_root = config.DATASET.TEST_NPZ_ROOT
self.test_list_path = config.DATASET.TEST_LIST_PATH
self.test_intrinsic_path = config.DATASET.TEST_INTRINSIC_PATH
# 2. dataset config
# general options
self.min_overlap_score_test = config.DATASET.MIN_OVERLAP_SCORE_TEST # 0.4, omit data with overlap_score < min_overlap_score
self.min_overlap_score_train = config.DATASET.MIN_OVERLAP_SCORE_TRAIN
self.augment_fn = build_augmentor(config.DATASET.AUGMENTATION_TYPE) # None, options: [None, 'dark', 'mobile']
# MegaDepth options
self.mgdpt_img_resize = config.DATASET.MGDPT_IMG_RESIZE # 840
self.mgdpt_img_pad = config.DATASET.MGDPT_IMG_PAD # True
self.mgdpt_depth_pad = config.DATASET.MGDPT_DEPTH_PAD # True
self.mgdpt_df = config.DATASET.MGDPT_DF # 8
self.coarse_scale = 1 / config.LOFTR.RESOLUTION[0] # 0.125. for training loftr.
# 3.loader parameters
self.train_loader_params = {
'batch_size': args.batch_size,
'num_workers': args.num_workers,
'pin_memory': getattr(args, 'pin_memory', True)
}
self.val_loader_params = {
'batch_size': 1,
'shuffle': False,
'num_workers': args.num_workers,
'pin_memory': getattr(args, 'pin_memory', True)
}
self.test_loader_params = {
'batch_size': 1,
'shuffle': False,
'num_workers': args.num_workers,
'pin_memory': True
}
# 4. sampler
self.data_sampler = config.TRAINER.DATA_SAMPLER
self.n_samples_per_subset = config.TRAINER.N_SAMPLES_PER_SUBSET
self.subset_replacement = config.TRAINER.SB_SUBSET_SAMPLE_REPLACEMENT
self.shuffle = config.TRAINER.SB_SUBSET_SHUFFLE
self.repeat = config.TRAINER.SB_REPEAT
# (optional) RandomSampler for debugging
# misc configurations
self.parallel_load_data = getattr(args, 'parallel_load_data', False)
self.seed = config.TRAINER.SEED # 66
def setup(self, stage=None):
"""
Setup train / val / test dataset. This method will be called by PL automatically.
Args:
stage (str): 'fit' in training phase, and 'test' in testing phase.
"""
assert stage in ['fit', 'test'], "stage must be either fit or test"
try:
self.world_size = dist.get_world_size()
self.rank = dist.get_rank()
logger.info(f"[rank:{self.rank}] world_size: {self.world_size}")
except AssertionError as ae:
self.world_size = 1
self.rank = 0
logger.warning(str(ae) + " (set wolrd_size=1 and rank=0)")
if stage == 'fit':
self.train_dataset = self._setup_dataset(
self.train_data_root,
self.train_npz_root,
self.train_list_path,
self.train_intrinsic_path,
mode='train',
min_overlap_score=self.min_overlap_score_train,
pose_dir=self.train_pose_root)
# setup multiple (optional) validation subsets
if isinstance(self.val_list_path, (list, tuple)):
self.val_dataset = []
if not isinstance(self.val_npz_root, (list, tuple)):
self.val_npz_root = [self.val_npz_root for _ in range(len(self.val_list_path))]
for npz_list, npz_root in zip(self.val_list_path, self.val_npz_root):
self.val_dataset.append(self._setup_dataset(
self.val_data_root,
npz_root,
npz_list,
self.val_intrinsic_path,
mode='val',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.val_pose_root))
else:
self.val_dataset = self._setup_dataset(
self.val_data_root,
self.val_npz_root,
self.val_list_path,
self.val_intrinsic_path,
mode='val',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.val_pose_root)
logger.info(f'[rank:{self.rank}] Train & Val Dataset loaded!')
else: # stage == 'test
self.test_dataset = self._setup_dataset(
self.test_data_root,
self.test_npz_root,
self.test_list_path,
self.test_intrinsic_path,
mode='test',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.test_pose_root)
logger.info(f'[rank:{self.rank}]: Test Dataset loaded!')
def _setup_dataset(self,
data_root,
split_npz_root,
scene_list_path,
intri_path,
mode='train',
min_overlap_score=0.,
pose_dir=None):
""" Setup train / val / test set"""
with open(scene_list_path, 'r') as f:
npz_names = [name.split()[0] for name in f.readlines()]
if mode == 'train':
local_npz_names = get_local_split(npz_names, self.world_size, self.rank, self.seed)
else:
local_npz_names = npz_names
logger.info(f'[rank {self.rank}]: {len(local_npz_names)} scene(s) assigned.')
dataset_builder = self._build_concat_dataset_parallel \
if self.parallel_load_data \
else self._build_concat_dataset
return dataset_builder(data_root, local_npz_names, split_npz_root, intri_path,
mode=mode, min_overlap_score=min_overlap_score, pose_dir=pose_dir)
def _build_concat_dataset(
self,
data_root,
npz_names,
npz_dir,
intrinsic_path,
mode,
min_overlap_score=0.,
pose_dir=None
):
datasets = []
augment_fn = self.augment_fn if mode == 'train' else None
data_source = self.trainval_data_source if mode in ['train', 'val'] else self.test_data_source
if str(data_source).lower() == 'megadepth':
npz_names = [f'{n}.npz' for n in npz_names]
for npz_name in tqdm(npz_names,
desc=f'[rank:{self.rank}] loading {mode} datasets',
disable=int(self.rank) != 0):
# `ScanNetDataset`/`MegaDepthDataset` load all data from npz_path when initialized, which might take time.
npz_path = osp.join(npz_dir, npz_name)
if data_source == 'ScanNet':
datasets.append(
ScanNet_RGB_Dataset(data_root,
npz_path,
intrinsic_path,
mode=mode,
min_overlap_score=min_overlap_score,
augment_fn=augment_fn,
pose_dir=pose_dir))
elif data_source == 'MegaDepth':
datasets.append(
MegaDepth_RGB_Dataset(data_root,
npz_path,
mode=mode,
min_overlap_score=min_overlap_score,
img_resize=self.mgdpt_img_resize,
df=self.mgdpt_df,
img_padding=self.mgdpt_img_pad,
depth_padding=self.mgdpt_depth_pad,
augment_fn=augment_fn,
coarse_scale=self.coarse_scale))
else:
raise NotImplementedError()
return ConcatDataset(datasets)
def _build_concat_dataset_parallel(
self,
data_root,
npz_names,
npz_dir,
intrinsic_path,
mode,
min_overlap_score=0.,
pose_dir=None,
):
augment_fn = self.augment_fn if mode == 'train' else None
data_source = self.trainval_data_source if mode in ['train', 'val'] else self.test_data_source
if str(data_source).lower() == 'megadepth':
npz_names = [f'{n}.npz' for n in npz_names]
with tqdm_joblib(tqdm(desc=f'[rank:{self.rank}] loading {mode} datasets',
total=len(npz_names), disable=int(self.rank) != 0)):
if data_source == 'ScanNet':
datasets = Parallel(n_jobs=math.floor(len(os.sched_getaffinity(0)) * 0.9 / comm.get_local_size()))(
delayed(lambda x: _build_dataset(
ScanNet_RGB_Dataset,
data_root,
osp.join(npz_dir, x),
intrinsic_path,
mode=mode,
min_overlap_score=min_overlap_score,
augment_fn=augment_fn,
pose_dir=pose_dir))(name)
for name in npz_names)
elif data_source == 'MegaDepth':
# TODO: _pickle.PicklingError: Could not pickle the task to send it to the workers.
raise NotImplementedError()
datasets = Parallel(n_jobs=math.floor(len(os.sched_getaffinity(0)) * 0.9 / comm.get_local_size()))(
delayed(lambda x: _build_dataset(
MegaDepth_RGB_Dataset,
data_root,
osp.join(npz_dir, x),
mode=mode,
min_overlap_score=min_overlap_score,
img_resize=self.mgdpt_img_resize,
df=self.mgdpt_df,
img_padding=self.mgdpt_img_pad,
depth_padding=self.mgdpt_depth_pad,
augment_fn=augment_fn,
coarse_scale=self.coarse_scale))(name)
for name in npz_names)
else:
raise ValueError(f'Unknown dataset: {data_source}')
return ConcatDataset(datasets)
def train_dataloader(self):
""" Build training dataloader for ScanNet / MegaDepth. """
assert self.data_sampler in ['scene_balance']
logger.info(
f'[rank:{self.rank}/{self.world_size}]: Train Sampler and DataLoader re-init (should not re-init between epochs!).')
if self.data_sampler == 'scene_balance':
sampler = RandomConcatSampler(self.train_dataset,
self.n_samples_per_subset,
self.subset_replacement,
self.shuffle, self.repeat, self.seed)
else:
sampler = None
dataloader = DataLoader(self.train_dataset, sampler=sampler, **self.train_loader_params)
return dataloader
def val_dataloader(self):
""" Build validation dataloader for ScanNet / MegaDepth. """
logger.info(f'[rank:{self.rank}/{self.world_size}]: Val Sampler and DataLoader re-init.')
if not isinstance(self.val_dataset, abc.Sequence):
sampler = DistributedSampler(self.val_dataset, shuffle=False)
return DataLoader(self.val_dataset, sampler=sampler, **self.val_loader_params)
else:
dataloaders = []
for dataset in self.val_dataset:
sampler = DistributedSampler(dataset, shuffle=False)
dataloaders.append(DataLoader(dataset, sampler=sampler, **self.val_loader_params))
return dataloaders
def test_dataloader(self, *args, **kwargs):
logger.info(f'[rank:{self.rank}/{self.world_size}]: Test Sampler and DataLoader re-init.')
sampler = DistributedSampler(self.test_dataset, shuffle=False)
return DataLoader(self.test_dataset, sampler=sampler, **self.test_loader_params)
class RGBDDataModule(pl.LightningDataModule):
"""
For distributed training, each training process is assgined
only a part of the training scenes to reduce memory overhead.
"""
def __init__(self, args, config):
super().__init__()
# 1. data config
# Train and Val should from the same data source
self.trainval_data_source = config.DATASET.TRAINVAL_DATA_SOURCE
self.test_data_source = config.DATASET.TEST_DATA_SOURCE
# training and validating
self.train_data_root = config.DATASET.TRAIN_DATA_ROOT
self.train_pose_root = config.DATASET.TRAIN_POSE_ROOT # (optional)
self.train_npz_root = config.DATASET.TRAIN_NPZ_ROOT
self.train_list_path = config.DATASET.TRAIN_LIST_PATH
self.train_intrinsic_path = config.DATASET.TRAIN_INTRINSIC_PATH
self.val_data_root = config.DATASET.VAL_DATA_ROOT
self.val_pose_root = config.DATASET.VAL_POSE_ROOT # (optional)
self.val_npz_root = config.DATASET.VAL_NPZ_ROOT
self.val_list_path = config.DATASET.VAL_LIST_PATH
self.val_intrinsic_path = config.DATASET.VAL_INTRINSIC_PATH
# testing
self.test_data_root = config.DATASET.TEST_DATA_ROOT
self.test_pose_root = config.DATASET.TEST_POSE_ROOT # (optional)
self.test_npz_root = config.DATASET.TEST_NPZ_ROOT
self.test_list_path = config.DATASET.TEST_LIST_PATH
self.test_intrinsic_path = config.DATASET.TEST_INTRINSIC_PATH
# 2. dataset config
# general options
self.min_overlap_score_test = config.DATASET.MIN_OVERLAP_SCORE_TEST # 0.4, omit data with overlap_score < min_overlap_score
self.min_overlap_score_train = config.DATASET.MIN_OVERLAP_SCORE_TRAIN
self.augment_fn = build_augmentor(config.DATASET.AUGMENTATION_TYPE) # None, options: [None, 'dark', 'mobile']
# MegaDepth options
self.mgdpt_img_resize = config.DATASET.MGDPT_IMG_RESIZE # 840
self.mgdpt_img_pad = config.DATASET.MGDPT_IMG_PAD # True
self.mgdpt_depth_pad = config.DATASET.MGDPT_DEPTH_PAD # True
self.mgdpt_df = config.DATASET.MGDPT_DF # 8
self.coarse_scale = 1 / config.LOFTR.RESOLUTION[0] # 0.125. for training loftr.
# 3.loader parameters
self.train_loader_params = {
'batch_size': args.batch_size,
'num_workers': args.num_workers,
'pin_memory': getattr(args, 'pin_memory', True)
}
self.val_loader_params = {
'batch_size': 1,
'shuffle': False,
'num_workers': args.num_workers,
'pin_memory': getattr(args, 'pin_memory', True)
}
self.test_loader_params = {
'batch_size': 1,
'shuffle': False,
'num_workers': args.num_workers,
'pin_memory': True
}
# 4. sampler
self.data_sampler = config.TRAINER.DATA_SAMPLER
self.n_samples_per_subset = config.TRAINER.N_SAMPLES_PER_SUBSET
self.subset_replacement = config.TRAINER.SB_SUBSET_SAMPLE_REPLACEMENT
self.shuffle = config.TRAINER.SB_SUBSET_SHUFFLE
self.repeat = config.TRAINER.SB_REPEAT
# (optional) RandomSampler for debugging
# misc configurations
self.parallel_load_data = getattr(args, 'parallel_load_data', False)
self.seed = config.TRAINER.SEED # 66
def setup(self, stage=None):
"""
Setup train / val / test dataset. This method will be called by PL automatically.
Args:
stage (str): 'fit' in training phase, and 'test' in testing phase.
"""
assert stage in ['fit', 'test'], "stage must be either fit or test"
try:
self.world_size = dist.get_world_size()
self.rank = dist.get_rank()
logger.info(f"[rank:{self.rank}] world_size: {self.world_size}")
except AssertionError as ae:
self.world_size = 1
self.rank = 0
logger.warning(str(ae) + " (set wolrd_size=1 and rank=0)")
if stage == 'fit':
self.train_dataset = self._setup_dataset(
self.train_data_root,
self.train_npz_root,
self.train_list_path,
self.train_intrinsic_path,
mode='train',
min_overlap_score=self.min_overlap_score_train,
pose_dir=self.train_pose_root)
# setup multiple (optional) validation subsets
if isinstance(self.val_list_path, (list, tuple)):
self.val_dataset = []
if not isinstance(self.val_npz_root, (list, tuple)):
self.val_npz_root = [self.val_npz_root for _ in range(len(self.val_list_path))]
for npz_list, npz_root in zip(self.val_list_path, self.val_npz_root):
self.val_dataset.append(self._setup_dataset(
self.val_data_root,
npz_root,
npz_list,
self.val_intrinsic_path,
mode='val',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.val_pose_root))
else:
self.val_dataset = self._setup_dataset(
self.val_data_root,
self.val_npz_root,
self.val_list_path,
self.val_intrinsic_path,
mode='val',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.val_pose_root)
logger.info(f'[rank:{self.rank}] Train & Val Dataset loaded!')
else: # stage == 'test
self.test_dataset = self._setup_dataset(
self.test_data_root,
self.test_npz_root,
self.test_list_path,
self.test_intrinsic_path,
mode='test',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.test_pose_root)
logger.info(f'[rank:{self.rank}]: Test Dataset loaded!')
def _setup_dataset(self,
data_root,
split_npz_root,
scene_list_path,
intri_path,
mode='train',
min_overlap_score=0.,
pose_dir=None):
""" Setup train / val / test set"""
with open(scene_list_path, 'r') as f:
npz_names = [name.split()[0] for name in f.readlines()]
if mode == 'train':
local_npz_names = get_local_split(npz_names, self.world_size, self.rank, self.seed)
else:
local_npz_names = npz_names
logger.info(f'[rank {self.rank}]: {len(local_npz_names)} scene(s) assigned.')
dataset_builder = self._build_concat_dataset_parallel \
if self.parallel_load_data \
else self._build_concat_dataset
return dataset_builder(data_root, local_npz_names, split_npz_root, intri_path,
mode=mode, min_overlap_score=min_overlap_score, pose_dir=pose_dir)
def _build_concat_dataset(
self,
data_root,
npz_names,
npz_dir,
intrinsic_path,
mode,
min_overlap_score=0.,
pose_dir=None
):
datasets = []
augment_fn = self.augment_fn if mode == 'train' else None
data_source = self.trainval_data_source if mode in ['train', 'val'] else self.test_data_source
if str(data_source).lower() == 'megadepth':
npz_names = [f'{n}.npz' for n in npz_names]
for npz_name in tqdm(npz_names,
desc=f'[rank:{self.rank}] loading {mode} datasets',
disable=int(self.rank) != 0):
# `ScanNetDataset`/`MegaDepthDataset` load all data from npz_path when initialized, which might take time.
npz_path = osp.join(npz_dir, npz_name)
if data_source == 'ScanNet':
datasets.append(
ScanNet_RGBD_Dataset(data_root,
npz_path,
intrinsic_path,
mode=mode,
min_overlap_score=min_overlap_score,
augment_fn=augment_fn,
pose_dir=pose_dir))
elif data_source == 'MegaDepth':
datasets.append(
MegaDepth_RGBD_Dataset(data_root,
npz_path,
mode=mode,
min_overlap_score=min_overlap_score,
img_resize=self.mgdpt_img_resize,
df=self.mgdpt_df,
img_padding=self.mgdpt_img_pad,
depth_padding=self.mgdpt_depth_pad,
augment_fn=augment_fn,
coarse_scale=self.coarse_scale))
else:
raise NotImplementedError()
return ConcatDataset(datasets)
def _build_concat_dataset_parallel(
self,
data_root,
npz_names,
npz_dir,
intrinsic_path,
mode,
min_overlap_score=0.,
pose_dir=None,
):
augment_fn = self.augment_fn if mode == 'train' else None
data_source = self.trainval_data_source if mode in ['train', 'val'] else self.test_data_source
if str(data_source).lower() == 'megadepth':
npz_names = [f'{n}.npz' for n in npz_names]
with tqdm_joblib(tqdm(desc=f'[rank:{self.rank}] loading {mode} datasets',
total=len(npz_names), disable=int(self.rank) != 0)):
if data_source == 'ScanNet':
datasets = Parallel(n_jobs=math.floor(len(os.sched_getaffinity(0)) * 0.9 / comm.get_local_size()))(
delayed(lambda x: _build_dataset(
ScanNet_RGBD_Dataset,
data_root,
osp.join(npz_dir, x),
intrinsic_path,
mode=mode,
min_overlap_score=min_overlap_score,
augment_fn=augment_fn,
pose_dir=pose_dir))(name)
for name in npz_names)
elif data_source == 'MegaDepth':
# TODO: _pickle.PicklingError: Could not pickle the task to send it to the workers.
raise NotImplementedError()
datasets = Parallel(n_jobs=math.floor(len(os.sched_getaffinity(0)) * 0.9 / comm.get_local_size()))(
delayed(lambda x: _build_dataset(
MegaDepthDataset,
data_root,
osp.join(npz_dir, x),
mode=mode,
min_overlap_score=min_overlap_score,
img_resize=self.mgdpt_img_resize,
df=self.mgdpt_df,
img_padding=self.mgdpt_img_pad,
depth_padding=self.mgdpt_depth_pad,
augment_fn=augment_fn,
coarse_scale=self.coarse_scale))(name)
for name in npz_names)
else:
raise ValueError(f'Unknown dataset: {data_source}')
return ConcatDataset(datasets)
def train_dataloader(self):
""" Build training dataloader for ScanNet / MegaDepth. """
assert self.data_sampler in ['scene_balance']
logger.info(
f'[rank:{self.rank}/{self.world_size}]: Train Sampler and DataLoader re-init (should not re-init between epochs!).')
if self.data_sampler == 'scene_balance':
sampler = RandomConcatSampler(self.train_dataset,
self.n_samples_per_subset,
self.subset_replacement,
self.shuffle, self.repeat, self.seed)
else:
sampler = None
dataloader = DataLoader(self.train_dataset, sampler=sampler, **self.train_loader_params)
return dataloader
def val_dataloader(self):
""" Build validation dataloader for ScanNet / MegaDepth. """
logger.info(f'[rank:{self.rank}/{self.world_size}]: Val Sampler and DataLoader re-init.')
if not isinstance(self.val_dataset, abc.Sequence):
sampler = DistributedSampler(self.val_dataset, shuffle=False)
return DataLoader(self.val_dataset, sampler=sampler, **self.val_loader_params)
else:
dataloaders = []
for dataset in self.val_dataset:
sampler = DistributedSampler(dataset, shuffle=False)
dataloaders.append(DataLoader(dataset, sampler=sampler, **self.val_loader_params))
return dataloaders
def test_dataloader(self, *args, **kwargs):
logger.info(f'[rank:{self.rank}/{self.world_size}]: Test Sampler and DataLoader re-init.')
sampler = DistributedSampler(self.test_dataset, shuffle=False)
return DataLoader(self.test_dataset, sampler=sampler, **self.test_loader_params)
def _build_dataset(dataset: Dataset, *args, **kwargs):
return dataset(*args, **kwargs)
| 27,968 | 44.626427 | 132 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/loftr.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.einops import rearrange, repeat
from .backbone import build_backbone_rgb,build_backbone_rgbd
from .utils.position_encoding import PositionEncodingSine
from .loftr_module import LocalFeatureTransformer, FinePreprocess
from .utils.coarse_matching import CoarseMatching,CoarseMatching_t
from .utils.fine_matching import FineMatching,FineMatching_t
class LoFTR_RGB(nn.Module):
def __init__(self, config):
super().__init__()
# Misc
self.config = config
# Modules
self.backbone = build_backbone_rgb(config)
self.pos_encoding = PositionEncodingSine(config['coarse']['d_model'])
self.loftr_coarse = LocalFeatureTransformer(config['coarse'])
self.coarse_matching = CoarseMatching(config['match_coarse'])
self.fine_preprocess = FinePreprocess(config)
self.loftr_fine = LocalFeatureTransformer(config["fine"])
self.fine_matching = FineMatching()
def forward(self, data):
"""
Update:
data (dict): {
'image0': (torch.Tensor): (N, 1, H, W)
'image1': (torch.Tensor): (N, 1, H, W)
'mask0'(optional) : (torch.Tensor): (N, H, W) '0' indicates a padded position
'mask1'(optional) : (torch.Tensor): (N, H, W)
}
"""
# 1. Local Feature CNN
data.update({
'bs': data['image0'].size(0),
'hw0_i': data['image0'].shape[2:], 'hw1_i': data['image1'].shape[2:]
})
if data['hw0_i'] == data['hw1_i']: # faster & better BN convergence
feats_c, feats_f = self.backbone(torch.cat([data['image0'], data['image1']], dim=0))
(feat_c0, feat_c1), (feat_f0, feat_f1) = feats_c.split(data['bs']), feats_f.split(data['bs'])
else: # handle different input shapes
(feat_c0, feat_f0), (feat_c1, feat_f1) = self.backbone(data['image0']), self.backbone(data['image1'])
data.update({
'hw0_c': feat_c0.shape[2:], 'hw1_c': feat_c1.shape[2:],
'hw0_f': feat_f0.shape[2:], 'hw1_f': feat_f1.shape[2:]
})
# 2. coarse-level loftr module
# add featmap with positional encoding, then flatten it to sequence [N, HW, C]
feat_c0 = rearrange(self.pos_encoding(feat_c0), 'n c h w -> n (h w) c')
feat_c1 = rearrange(self.pos_encoding(feat_c1), 'n c h w -> n (h w) c')
mask_c0 = mask_c1 = None # mask is useful in training
if 'mask0' in data:
mask_c0, mask_c1 = data['mask0'].flatten(-2), data['mask1'].flatten(-2)
feat_c0, feat_c1 = self.loftr_coarse(feat_c0, feat_c1, mask_c0, mask_c1)
# 3. match coarse-level
self.coarse_matching(feat_c0, feat_c1, data, mask_c0=mask_c0, mask_c1=mask_c1)
# 4. fine-level refinement
feat_f0_unfold, feat_f1_unfold = self.fine_preprocess(feat_f0, feat_f1, feat_c0, feat_c1, data)
if feat_f0_unfold.size(0) != 0: # at least one coarse level predicted
feat_f0_unfold, feat_f1_unfold = self.loftr_fine(feat_f0_unfold, feat_f1_unfold)
# 5. match fine-level
self.fine_matching(feat_f0_unfold, feat_f1_unfold, data)
class LoFTR_RGBD(nn.Module):
def __init__(self, config):
super().__init__()
# Misc
self.config = config
# Modules
self.backbone = build_backbone_rgbd(config)
self.pos_encoding = PositionEncodingSine(config['coarse']['d_model'])
self.loftr_coarse = LocalFeatureTransformer(config['coarse'])
self.coarse_matching = CoarseMatching(config['match_coarse'])
self.fine_preprocess = FinePreprocess(config)
self.loftr_fine = LocalFeatureTransformer(config["fine"])
self.fine_matching = FineMatching()
def forward(self, data):
"""
Update:
data (dict): {
'image0': (torch.Tensor): (N, 1, H, W)
'image1': (torch.Tensor): (N, 1, H, W)
'mask0'(optional) : (torch.Tensor): (N, H, W) '0' indicates a padded position
'mask1'(optional) : (torch.Tensor): (N, H, W)
}
"""
# 1. Local Feature CNN
data.update({
'bs': data['image0'].size(0),
'hw0_i': data['image0'].shape[2:], 'hw1_i': data['image1'].shape[2:]
})
if data['hw0_i'] == data['hw1_i']: # faster & better BN convergence
feats_c, feats_f = self.backbone(torch.cat([data['image0'], data['image1']], dim=0))
(feat_c0, feat_c1), (feat_f0, feat_f1) = feats_c.split(data['bs']), feats_f.split(data['bs'])
else: # handle different input shapes
(feat_c0, feat_f0), (feat_c1, feat_f1) = self.backbone(data['image0']), self.backbone(data['image1'])
data.update({
'hw0_c': feat_c0.shape[2:], 'hw1_c': feat_c1.shape[2:],
'hw0_f': feat_f0.shape[2:], 'hw1_f': feat_f1.shape[2:]
})
# 2. coarse-level loftr module
# add featmap with positional encoding, then flatten it to sequence [N, HW, C]
feat_c0 = rearrange(self.pos_encoding(feat_c0), 'n c h w -> n (h w) c')
feat_c1 = rearrange(self.pos_encoding(feat_c1), 'n c h w -> n (h w) c')
mask_c0 = mask_c1 = None # mask is useful in training
if 'mask0' in data:
mask_c0, mask_c1 = data['mask0'].flatten(-2), data['mask1'].flatten(-2)
feat_c0, feat_c1 = self.loftr_coarse(feat_c0, feat_c1, mask_c0, mask_c1)
# 3. match coarse-level
self.coarse_matching(feat_c0, feat_c1, data, mask_c0=mask_c0, mask_c1=mask_c1)
# 4. fine-level refinement
feat_f0_unfold, feat_f1_unfold = self.fine_preprocess(feat_f0, feat_f1, feat_c0, feat_c1, data)
if feat_f0_unfold.size(0) != 0: # at least one coarse level predicted
feat_f0_unfold, feat_f1_unfold = self.loftr_fine(feat_f0_unfold, feat_f1_unfold)
# 5. match fine-level
self.fine_matching(feat_f0_unfold, feat_f1_unfold, data)
class LoFTR_RGBD_teacher(nn.Module):
def __init__(self, config):
super().__init__()
# Misc
self.config = config
# Modules
self.backbone = build_backbone_rgbd(config)
self.pos_encoding = PositionEncodingSine(config['coarse']['d_model'])
self.loftr_coarse = LocalFeatureTransformer(config['coarse'])
self.coarse_matching = CoarseMatching_t(config['match_coarse'])
self.fine_preprocess = FinePreprocess(config)
self.loftr_fine = LocalFeatureTransformer(config["fine"])
self.fine_matching = FineMatching_t()
def forward(self, data):
"""
Update:
data (dict): {
'image0': (torch.Tensor): (N, 1, H, W)
'image1': (torch.Tensor): (N, 1, H, W)
'mask0'(optional) : (torch.Tensor): (N, H, W) '0' indicates a padded position
'mask1'(optional) : (torch.Tensor): (N, H, W)
}
"""
# 1. Local Feature CNN
if data['hw0_i'] == data['hw1_i']: # faster & better BN convergence
feats_c, feats_f = self.backbone(torch.cat([data['image0'], data['image1']], dim=0))
(feat_c0, feat_c1), (feat_f0, feat_f1) = feats_c.split(data['bs']), feats_f.split(data['bs'])
else: # handle different input shapes
(feat_c0, feat_f0), (feat_c1, feat_f1) = self.backbone(data['image0']), self.backbone(data['image1'])
data.update({
'hw0_c_t': feat_c0, 'hw1_c_t': feat_c1,
'hw0_f_t': feat_f0, 'hw1_f_t': feat_f1
})
class LoFTR_RGB_student(nn.Module):
def __init__(self, config):
super().__init__()
# Misc
self.config = config
# Modules
self.backbone = build_backbone_rgb(config)
self.pos_encoding = PositionEncodingSine(config['coarse']['d_model'])
self.loftr_coarse = LocalFeatureTransformer(config['coarse'])
self.coarse_matching = CoarseMatching(config['match_coarse'])
self.fine_preprocess = FinePreprocess(config)
self.loftr_fine = LocalFeatureTransformer(config["fine"])
self.fine_matching = FineMatching()
self.teacher = LoFTR_RGBD_teacher(config)
pretrained_t = "./logs/tb_logs/indoor/indoor_rgbd_teacher.ckpt" # hard code , modified in the future, load teacher model
sd = torch.load(pretrained_t, map_location='cpu')['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in sd.items():
name = k[8:] # remove `matcher.`
new_state_dict[name] = v
self.teacher.load_state_dict(new_state_dict,strict=True)
self.teacher.eval()
for param in self.teacher.parameters():
param.requires_grad = False
def fine_preprocess_teacher_branch(self, feat_f0, feat_f1, feat_c0, feat_c1, data,b_ids,i_ids,j_ids):
W = data['W']
stride = data['hw0_f'][0] // data['hw0_c'][0]
d_model_f = 128
if b_ids.shape[0] == 0:
feat0 = torch.empty(0, W**2, d_model_f, device=feat_f0.device)
feat1 = torch.empty(0, W**2, d_model_f, device=feat_f0.device)
return feat0, feat1
# 1. unfold(crop) all local windows
feat_f0_unfold = F.unfold(feat_f0, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f0_unfold = rearrange(feat_f0_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
feat_f1_unfold = F.unfold(feat_f1, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f1_unfold = rearrange(feat_f1_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
# 2. select only the predicted matches
feat_f0_unfold = feat_f0_unfold[b_ids, i_ids] # [n, ww, cf]
feat_f1_unfold = feat_f1_unfold[b_ids, j_ids]
# option: use coarse-level loftr feature as context: concat and linear
if True:#self.cat_c_feat:
feat_c_win = self.teacher.fine_preprocess.down_proj(torch.cat([feat_c0[b_ids, i_ids],
feat_c1[b_ids, j_ids]], 0)) # [2n, c]
feat_cf_win = self.teacher.fine_preprocess.merge_feat(torch.cat([
torch.cat([feat_f0_unfold, feat_f1_unfold], 0), # [2n, ww, cf]
repeat(feat_c_win, 'n c -> n ww c', ww=W**2), # [2n, ww, cf]
], -1))
feat_f0_unfold, feat_f1_unfold = torch.chunk(feat_cf_win, 2, dim=0)
return feat_f0_unfold, feat_f1_unfold
def forward(self, data):
"""
Update:
data (dict): {
'image0': (torch.Tensor): (N, 1, H, W)
'image1': (torch.Tensor): (N, 1, H, W)
'mask0'(optional) : (torch.Tensor): (N, H, W) '0' indicates a padded position
'mask1'(optional) : (torch.Tensor): (N, H, W)
}
"""
# 1. Local Feature CNN
data.update({
'bs': data['image0'].size(0),
'hw0_i': data['image0'].shape[2:], 'hw1_i': data['image1'].shape[2:]
})
image0 = data['image0'][:, :3, :, :].clone()
image1 = data['image1'][:, :3, :, :].clone()
if data['hw0_i'] == data['hw1_i']: # faster & better BN convergence
feats_c, feats_f = self.backbone(torch.cat([image0, image1], dim=0))
(feat_c0, feat_c1), (feat_f0, feat_f1) = feats_c.split(data['bs']), feats_f.split(data['bs'])
else: # handle different input shapes
(feat_c0, feat_f0), (feat_c1, feat_f1) = self.backbone(image0), self.backbone(image1)
data.update({
'hw0_c': feat_c0.shape[2:], 'hw1_c': feat_c1.shape[2:],
'hw0_f': feat_f0.shape[2:], 'hw1_f': feat_f1.shape[2:]
})
# 2. coarse-level loftr module
# add featmap with positional encoding, then flatten it to sequence [N, HW, C]
feat_c0 = rearrange(self.pos_encoding(feat_c0), 'n c h w -> n (h w) c')
feat_c1 = rearrange(self.pos_encoding(feat_c1), 'n c h w -> n (h w) c')
mask_c0 = mask_c1 = None # mask is useful in training
if 'mask0' in data:
mask_c0, mask_c1 = data['mask0'].flatten(-2), data['mask1'].flatten(-2)
feat_c0, feat_c1 = self.loftr_coarse(feat_c0, feat_c1, mask_c0, mask_c1)
# 3. match coarse-level
self.coarse_matching(feat_c0, feat_c1, data, mask_c0=mask_c0, mask_c1=mask_c1)
# 4. fine-level refinement
feat_f0_unfold, feat_f1_unfold = self.fine_preprocess(feat_f0, feat_f1, feat_c0, feat_c1, data)
save_bi,save_ii,save_ji = data['b_ids'], data['i_ids'],data['j_ids']
#feat_f0_unfold_t, feat_f1_unfold_t = feat_f0_unfold.clone(), feat_f1_unfold.clone()
if feat_f0_unfold.size(0) != 0: # at least one coarse level predicted
feat_f0_unfold, feat_f1_unfold = self.loftr_fine(feat_f0_unfold, feat_f1_unfold)
# 5. match fine-level
self.fine_matching(feat_f0_unfold, feat_f1_unfold, data)
## teacher inference
if data['hw0_i'] == data['hw1_i']: # faster & better BN convergence
feats_c_t, feats_f_t = self.teacher.backbone(torch.cat([data['image0'], data['image1']], dim=0))
(feat_c0_t, feat_c1_t), (feat_f0_t, feat_f1_t) = feats_c_t.split(data['bs']), feats_f_t.split(data['bs'])
else: # handle different input shapes
(feat_c0_t, feat_f0_t), (feat_c1_t, feat_f1_t) = self.teacher.backbone(data['image0']), self.teacher.backbone(data['image1'])
feat_c0_t = rearrange(self.teacher.pos_encoding(feat_c0_t), 'n c h w -> n (h w) c')
feat_c1_t = rearrange(self.teacher.pos_encoding(feat_c1_t), 'n c h w -> n (h w) c')
mask_c0_t = mask_c1_t = None # mask is useful in training
#if 'mask0' in data:
# mask_c0_t, mask_c1_t = data['mask0'].flatten(-2), data['mask1'].flatten(-2)
feat_c0_t, feat_c1_t = self.teacher.loftr_coarse(feat_c0_t, feat_c1_t, mask_c0_t, mask_c1_t)
self.teacher.coarse_matching(feat_c0_t, feat_c1_t, data, mask_c0=None, mask_c1=None)
feat_f0_unfold_t, feat_f1_unfold_t = self.fine_preprocess_teacher_branch(feat_f0_t, feat_f1_t, feat_c0_t, feat_c1_t, data,save_bi,save_ii,save_ji)
if feat_f0_unfold.size(0) != 0: # at least one coarse level predicted
feat_f0_unfold_t, feat_f1_unfold_t = self.teacher.loftr_fine(feat_f0_unfold_t, feat_f1_unfold_t)
else:
feat_f0_unfold_t, feat_f1_unfold_t = feat_f0_unfold.clone(), feat_f1_unfold.clone()
#feat_f0_unfold_t, feat_f1_unfold_t = self.teacher.loftr_fine(feat_f0_unfold_t, feat_f1_unfold_t)
self.teacher.fine_matching(feat_f0_unfold_t, feat_f1_unfold_t, data)
| 14,794 | 45.671924 | 154 | py |