python_code
stringlengths 0
66.4k
|
---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import torch
import torch.nn.functional as F
from torch.autograd import grad
def gPenalty(inputs, loss, lam, q):
# Gradient penalty
bs, c, h, w = inputs.size()
d_in = c * h * w
g = grad(loss, inputs, create_graph=True)[0] * bs
g = g.view(bs, -1)
qnorms = g.norm(q, 1).mean()
lam = lam * math.pow(d_in, 1. - 1. / q)
return lam * qnorms.mean() / 2.
def advAugment(net, inputs, targets, loss, lam, q):
# Single-step adversarial augmentation (e.g. FGSM)
bs, c, h, w = inputs.size()
d_in = c * h * w
g = grad(loss, inputs, retain_graph=True)[0] * bs
g = g.view(bs, -1).detach()
if q == 1:
lam = lam
dx = lam * g.sign()
else:
p = 1. / (1. - 1. / q)
lam = lam * math.pow(d_in, 1. - 1. / q)
dx = g.sign() * g.abs().pow(q - 1) # sign when q uneven
pnorms = dx.norm(p, 1, keepdim=True)
dx = lam * dx / pnorms
dx = dx.view_as(inputs)
advInputs = (inputs + dx).detach()
advOutputs = net(advInputs)
advLoss = F.cross_entropy(advOutputs, targets)
return (advLoss - loss) / 2.
def pgd(net, inputs, targets, loss, lam, steps, step_size,
random_start=True, train=True):
# Projected gradient descent (i.e. iterative FGSM) with random starts
bs, c, h, w = inputs.size()
if random_start:
if torch.cuda.is_available():
noise = torch.cuda.FloatTensor(bs, c, h, w).uniform_(-lam, lam)
else:
noise = torch.FloatTensor(bs, c, h, w).uniform_(-lam, lam)
else:
if torch.cuda.is_available():
noise = torch.cuda.FloatTensor(bs, c, h, w).fill_(0)
else:
noise = torch.FloatTensor(bs, c, h, w).fill_(0)
advInputs = (inputs + noise).detach()
advInputs.requires_grad = True
advOutputs = net(advInputs)
advLoss = F.cross_entropy(advOutputs, targets)
for i in range(steps):
retain_graph = ((i + 1 == steps) and train)
g = grad(advLoss, advInputs, retain_graph=retain_graph)[0] * bs
g = g.view(bs, -1).detach()
dx = step_size * g.sign()
dx = dx.view_as(advInputs)
advInputs = advInputs + dx
advInputs = inputs + torch.clamp(advInputs - inputs, -lam, lam)
advInputs = advInputs.detach()
advInputs.requires_grad = True
advOutputs = net(advInputs)
advLoss = F.cross_entropy(advOutputs, targets)
return advLoss - loss, advOutputs
def crossLip(inputs, outputs, lam):
gk = []
n, K, cLpen = outputs.size(0), outputs.size(1), 0.
for k in range(K):
gk.append(grad(outputs[:, k].sum(), inputs, create_graph=True)[0])
for l in range(K):
for m in range(l + 1, K):
cLpen += (gk[l] - gk[m]) ** 2
cLpen = 2. / n / K ** 2 * cLpen.sum()
return lam * cLpen
def addPenalty(net, inputs, outputs, targets, loss, args):
if args.penalty == 'grad':
penalty = gPenalty(inputs, loss, args.lam, args.q)
elif args.penalty == 'adv':
penalty = advAugment(net, inputs, targets, loss, args.lam, args.q)
elif args.penalty == 'pgd':
penalty, _ = pgd( # uses linf attacks
net, inputs, targets, loss, args.lam,
args.steps, step_size=args.lam / (.75 * args.steps))
elif args.penalty == 'crossLip':
penalty = crossLip(inputs, outputs, args.lam)
else:
raise NotImplementedError("Unknown penalty %r" % args.penalty)
return penalty
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
""" Some utilities """
import os
import math
import warnings
import configargparse
import torch
from nets import ConvNet
def argument_parser():
parser = configargparse.ArgParser(
description='First-order vulnerability and input dimension')
parser.add(
'--config', required=True, is_config_file=True,
help='configuration file path')
parser.add_argument(
'--name', type=str,
help='Experiment name. Results will be saved/loaded from directory '
'./results/name (which will be created if needed).')
parser.add_argument(
'--datapath', type=str, default=None,
help="Data location. Default: '~/datasets/' + `dataset`")
parser.add_argument(
'--dataset', type=str, default='cifar',
help='mnist, cifar, imgnet12 (default: cifar)')
parser.add_argument(
'--img_size', type=int, default=None,
help='only for imgnet. Resize img to 32, 64, 128 or 256.')
parser.add_argument(
'--n_layers', type=int, default=5,
help='number of hidden layers')
parser.add_argument(
'--bs', type=int, default=128,
help='batch size')
parser.add_argument(
'--epochs', type=int, default=200,
help='number of training epochs')
parser.add_argument(
'--no_BN', action='store_true',
help='Do not use batch norms (except before the very 1st layer)')
parser.add_argument(
'--no_act', action='store_true',
help='No activation functions (f.ex. no ReLUs)')
parser.add_argument(
'--raw_inputs', action='store_true',
help='Do not normalize inputs (hence no bn as first network layer)')
parser.add_argument(
'--log_step', type=int, default=None,
help='print training info every log_step batches (default: None)')
# training
parser.add_argument(
'--lr', type=float, default=.01,
help='Initial learning rate')
parser.add_argument(
'--no_training', action='store_true',
help='Do not train the network')
parser.add_argument(
'--crop', action='store_true',
help='Use cropping instead of resizing image.')
# Penalties/Regularizers
penalties = ['grad', 'adv', 'pgd', 'crossLip']
parser.add_argument(
'--lam', type=float, default=0.,
help='global regularization weight')
parser.add_argument(
'--penalty', type=str, choices=penalties, default=None,
help='penalty type:' + ' | '.join(penalties))
parser.add_argument(
'--q', type=int, default=None,
help="defense-norm q; dual of attack-norm p. "
"For FGSM, use penalty='adv' and 'q=1'")
parser.add_argument(
'--steps', type=int, default=None,
help='number of optimization steps per attack when using PGD')
# Vulnerability.py specific
parser.add_argument(
'--n_attacks', type=int, default=-1,
help='number of attack iterations; -1 for whole dataset')
parser.add_argument(
'--log_vul', action='store_true',
help='Print detailed logs of vulnerability computation')
# ConvNet specific
pooltypes = ['avgpool', 'maxpool', 'weightpool', 'subsamp']
last_layers = ['maxpool', 'avgpool', 'fc', 'weightpool']
parser.add_argument(
'--poolings', nargs='*', type=int, default=[],
help='Where to do poolings. Should be a list of '
'integers smaller than n_layers. Defaults to None. (ConvNet)')
parser.add_argument(
'--pooltype', type=str,
choices=pooltypes, default='subsamp',
help='penalty type:' + ' | '.join(penalties) + 'default: subsamp')
parser.add_argument(
'--dilations', nargs='*', type=int, default=None,
help='Dilations to use for each layer. List of n_layers int. '
'Defaults to 1 for all layers. (ConvNet)')
parser.add_argument(
'--last_layers', type=str, choices=last_layers,
default='avgpool', help='penalty type:' + ' | '.join(last_layers))
args = parser.parse_args()
if args.datapath is None:
args.datapath = os.path.join('~/datasets/', args.dataset)
args.datapath = os.path.expanduser(args.datapath)
# DATASET SPECIFIC SETTINGS
if args.dataset == 'mnist':
if args.img_size is None:
args.img_size = 32
elif args.img_size not in {32, 64, 128, 256, 512}:
raise Exception(
"img_size must be 32, 64, 128, 256. "
"But provided %r" % args.img_size)
args.categories = 10
args.in_planes = 1
elif args.dataset == 'cifar':
if args.img_size is None:
args.img_size = 32
elif args.img_size not in {32, 64, 128, 256, 512}:
raise Exception(
"img_size must be 32, 64, 128, 256, or 512. "
"But provided %r" % args.img_size)
args.categories = 10
args.in_planes = 3
elif args.dataset == 'imgnet12':
if args.img_size is None:
args.img_size = 256
elif args.img_size not in {32, 64, 128, 256}:
raise Exception(
"img_size must be 32, 64, 128, or 256. "
"But provided %r" % args.img_size)
if args.bs > 32:
raise Exception(
"With imgnet12, Batchsize bs should be <= 32. "
"Otherwise, you'll probably run out of GPU memory")
args.categories = 12
args.in_planes = 3
else:
raise NotImplementedError("Dataset unknown")
# NETWORK DOUBLE-CHECKS/WARNINGS
if args.no_BN and args.raw_inputs:
warnings.warn(
"no_BN also removes the first BN layer before the net "
"which serves as normalization of data when using raw_inputs. "
"Thus data input data stays unnormalized between 0 and 1")
if args.dilations is None:
dilation = 1 if args.crop else int(args.img_size / 32)
args.dilations = [dilation] * args.n_layers
elif len(args.dilations) == 1:
args.dilations = args.dilations * args.n_layers
elif len(args.dilations) != args.n_layers:
raise Exception(
'Argument dilations must be single integer, or a list of '
'integers of length n_layers')
# PENALTY/REGULARIZATION WARNINGS
if (args.lam, args.penalty, args.q) != (0., None, None):
if args.lam == 0.:
warnings.warn(
"Arguments penalty and/or q are given, but lam = 0. "
"Set lam > 0., otherwise not penalty is used")
elif args.penalty is None:
raise Exception("Argument lam > 0., but no penalty is defined.")
elif (args.penalty in {'adv', 'grad'}) and (args.q is None):
raise Exception(
"If argument penalty is 'adv' or 'grad', q must be in "
"[1, infty]")
if (args.penalty == 'pgd') and (args.steps is None):
raise Exception(
"Arguments steps must be specified with "
"penalty-option pgd")
return parser, args
def create_net(args):
net = ConvNet(
args.categories, args.n_layers, args.img_size, args.poolings,
args.pooltype, args.no_BN, args.no_act, args.dilations,
normalize_inputs=(not args.raw_inputs),
last_layers=args.last_layers, in_planes=args.in_planes)
return net
def initialize_params(m, no_act=False, distribution='normal'):
# gain = sqrt 2 for ReLU
gain = 1. if no_act else math.sqrt(2)
try: # if last layer, then gain = 1.
if m.unit_gain: # test if module as attribute 'last'
gain = 1.
except AttributeError:
pass
if type(m) in {torch.nn.Conv2d, torch.nn.Linear}:
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0.)
out_ = m.weight.data.size(0)
in_ = m.weight.data.view(out_, -1).size(1)
sigma = gain / math.sqrt(in_)
if distribution is 'uniform':
xmax = math.sqrt(3) * sigma
torch.nn.init.uniform_(m.weight, a=-xmax, b=xmax)
elif distribution is 'normal':
torch.nn.init.normal_(m.weight, std=sigma)
else:
raise NotImplementedError(
"Argument distribution must be 'uniform' or 'normal'. "
"Got: '%r'" % distribution)
elif type(m) == torch.nn.BatchNorm2d:
if m.affine:
torch.nn.init.constant_(m.bias, 0.)
torch.nn.init.constant_(m.weight, 1.)
if m.track_running_stats:
torch.nn.init.constant_(m.running_mean, 0.)
torch.nn.init.constant_(m.running_var, 1.)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import time
import torch
import torch.nn.functional as F
from torch.autograd import grad
from data import CIFAR10, IMGNET12, MNIST
from vulnerability import compute_vulnerability
from utils import argument_parser, create_net, initialize_params
from penalties import addPenalty, pgd
# NB: Logger cannot be pushed to utils.py, because of eval(name)
class Logger(object):
def __init__(self):
self.logs = dict()
def log(self, step, delta_time, *args):
for name in args:
if type(name) != str:
raise Exception(
"Logger takes strings as inputs. "
"But got %s" % type(name))
if name not in self.logs:
self.logs[name] = []
self.logs[name].append([eval(name), step, delta_time])
def get_logs(self):
return self.logs
def set_logs(self, logs):
self.logs = logs # logs : dict
return
def grad_norms(loss, inputs, train=False):
bs = inputs.size(0)
g = grad(loss, inputs, retain_graph=train)[0] * bs
g = g.view(bs, -1)
norm1, norm2 = g.norm(1, 1).mean(), g.norm(2, 1).mean()
return norm1.item(), norm2.item()
def do_epoch(epoch, net, optimizer, loader, mode, args):
if mode not in {'train', 'eval', 'test', 'init'}:
# 'init' -> for initialization of batchnorms
# 'train' -> training (but no logging of vul & dam)
# 'eval' -> compute acc & gnorms but not vul & dam on validation
# 'test' -> compute all logged values on test set
raise Exception('Argument mode must be train, eval or init')
net.eval() if mode in {'eval', 'test'} else net.train()
device = next(net.parameters()).device
cum_loss = cum_pen = cum_norm1 = cum_norm2 = total = correct = 0.
advVul = advCorrect = cum_dam = 0.
predictedAdv = None
for i, (inputs, targets) in enumerate(loader):
optimizer.zero_grad()
inputs, targets = inputs.to(device), targets.to(device)
inputs.requires_grad = True
outputs = net(inputs)
loss = F.cross_entropy(outputs, targets)
norm1, norm2 = grad_norms(loss, inputs, mode == 'train')
if mode == 'train':
if args.lam > 0.:
penalty = addPenalty(net, inputs, outputs, targets, loss, args)
loss += penalty
cum_pen += penalty.item()
cum_loss += loss.item()
loss.backward()
optimizer.step()
elif mode == 'test': # compute adv vul & damage using custom PGD
eps = .004
advDam, advOutputs = pgd(
net, inputs, targets, loss, lam=eps, steps=10,
step_size=eps / (.75 * 10), random_start=False, train=False)
# Compute logging info
cum_norm1 += norm1
cum_norm2 += norm2
cum_loss += loss.item()
total += targets.size(0)
_, predicted = torch.max(outputs.data, 1)
correct += predicted.eq(targets.data).float().cpu().sum().item()
if mode == 'test':
cum_dam += advDam.item() / eps
_, predictedAdv = torch.max(advOutputs.data, 1)
advVul += predicted.size(0) - (
predictedAdv.eq(predicted.data).float().cpu().sum().item())
advCorrect += predictedAdv.eq(
targets.data).float().cpu().sum().item()
results = {
'acc': 100 * correct / total, # accuracy
'loss': cum_loss / (i + 1), # loss
'pen': cum_pen / (i + 1), # penalty
'norm1': cum_norm1 / (i + 1), # avg l1-gradient norm
'norm2': cum_norm2 / (i + 1), # avg l2-gradient norm
'av': 100 * advVul / total, # adversarial vulnerability
'da': cum_dam / (i + 1), # adversarial damage
'aa': 100 * advCorrect / total # adversarial accuracy
}
if args.log_step is not None and i % args.log_step == 0:
print("Epoch: %03d Batch: %04d Mode: %-5s Acc: %4.1f Loss: %4.2f "
"Pen: %5.3f gNorm1: %6.2f gNorm2: %6.3f Vul: %4.1f "
"Dam: %6.2f AdAcc %4.1f" % (
epoch, i, mode, *[
results[i] for i in ['acc', 'loss', 'pen', 'norm1',
'norm2', 'av', 'da', 'aa']]))
return results
if __name__ == '__main__':
parser, args = argument_parser()
logger = Logger()
args.path = os.path.join('results', args.name)
net = create_net(args)
# print(net)
if not os.path.exists(args.path):
os.makedirs(args.path, exist_ok=True) # requires Python >= 3.2
if os.path.isfile(os.path.join(args.path, 'last.pt')):
print('> Loading last saved state/network...')
state = torch.load(os.path.join(args.path, 'last.pt'))
net.load_state_dict(state['state_dict'])
lr = state['lr']
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
optimizer.load_state_dict(state['optimizer'])
best_va_acc = state['best_va_acc']
start_ep = state['epoch'] + 1
logger.set_logs(state['logs'])
else: # initialize new net
print('> Initializing new network...')
net.apply(lambda m: initialize_params(m, args.no_act, 'normal'))
lr = args.lr
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
best_va_acc = -1.
start_ep = -1
print('> Done.')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = net.to(device)
torch.backends.cudnn.benchmark = True
print('> Loading dataset...')
if args.dataset == 'mnist':
tr_loader, va_loader, te_loader = MNIST(
root=args.datapath, bs=args.bs, valid_size=.1,
size=args.img_size, normalize=(not args.raw_inputs))
elif args.dataset == 'cifar':
tr_loader, va_loader, te_loader = CIFAR10(
root=args.datapath, bs=args.bs, valid_size=.1,
size=args.img_size, normalize=(not args.raw_inputs))
elif args.dataset == 'imgnet12':
tr_loader, va_loader, te_loader = IMGNET12(
root=args.datapath, bs=args.bs, valid_size=.1,
size=args.img_size, normalize=(not args.raw_inputs))
else:
raise NotImplementedError
print('> Done.')
print('> Starting training.')
time_start = time.time()
epochs = 0 if args.no_training else args.epochs
for epoch in range(start_ep, epochs):
time_start = time.time()
if epoch % 30 == 0 and epoch > 0:
# reload best parameters on validation set
net.load_state_dict(
torch.load(os.path.join(
args.path, 'best.pt'))['state_dict'])
# update learning rate
lr *= .5
for param_group in optimizer.param_groups:
param_group['lr'] = lr
mode = 'init' if epoch < 0 else 'train'
tr_res = do_epoch(epoch, net, optimizer, tr_loader, mode, args)
va_res = do_epoch(epoch, net, optimizer, va_loader, 'eval', args)
te_res = do_epoch(epoch, net, optimizer, te_loader, 'test', args)
time_per_epoch = time.time() - time_start
print("epoch %3d lr %.1e te_norm1 %7.3f te_norm2 %6.4f tr_loss %6.3f "
"tr_acc %5.2f te_acc %5.2f te_aa %5.2f te_av %5.2f te_da %6.3f "
"va_acc %5.2f be_va_acc %5.2f time %d" % (
epoch, lr, te_res['norm1'], te_res['norm2'], tr_res['loss'],
tr_res['acc'], te_res['acc'], te_res['aa'], te_res['av'],
te_res['da'], va_res['acc'], best_va_acc,
time_per_epoch))
# Log and save results
logger.log(epoch, time_per_epoch, 'lr', 'tr_res', 'va_res', 'te_res')
state = {
'lr': lr,
'epoch': epoch,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
'args': args,
'logs': logger.get_logs(),
'best_va_acc': best_va_acc
}
torch.save(state, os.path.join(args.path, 'last.pt'))
if va_res['acc'] > best_va_acc:
best_va_acc = va_res['acc']
torch.save(state, os.path.join(args.path, 'best.pt'))
print('> Finished Training')
# Compute adversarial vulnerability with foolbox
print('\n> Starting attacks.')
attacks = {'l1'}
# attacks = {'l1', 'l2', 'itl1', 'itl2', 'deepFool', 'pgd', 'boundary'}
for attack in attacks:
vulnerability = compute_vulnerability(
args, attack, net, args.n_attacks)
torch.save(vulnerability,
os.path.join(args.path, 'vulnerability_%s.pt' % attack))
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import time
import numpy as np
import scipy.stats as st
from functools import partial
import torch
from torch.autograd import grad
import foolbox
from foolbox.distances import Linfinity, MSE
from data import CIFAR10, IMGNET12, MNIST
def do_pass(net, loader, args, means, stds):
correct = total = 0.
device = next(net.parameters()).device
means = torch.FloatTensor(means).to(device)
stds = torch.FloatTensor(stds).to(device)
for i, (inputs, targets) in enumerate(loader):
inputs, targets = inputs.to(device), targets.to(device)
inputs = (inputs - means) / stds
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).float().sum().item()
if args.log_step is not None and i % args.log_step == 0:
print("Batch: %03d Acc: %4.1f" % (i, 100 * correct / total))
return 100 * correct / total
def classify(net, x, args, means, stds):
device = next(net.parameters()).device
x = x.to(device).view(1, 3, args.img_size, args.img_size)
means = torch.FloatTensor(means).to(device)
stds = torch.FloatTensor(stds).to(device)
x = ((x - means) / stds).detach()
x.requires_grad = True
y = net(x)
g = grad(y.sum(), x)[0].view(x.size(0), -1).norm().item()
_, top_indices = y.data.cpu().view(-1).topk(2)
return top_indices[0].item(), g
def myPrint(string, args):
if args.log_vul:
print(string)
def conf95(a):
return st.t.interval(
0.95, len(a) - 1, loc=np.nanmean(a),
scale=st.sem(a, nan_policy='omit'))
def compute_vulnerability(args, attack_name, net, n_attacks=-1):
"""
Computes vulnerability using foolbox package of net
Parameters
----------
args : :class:`argparse.ArgumentParser`
The arguments passed to main.py
attack_name : string
The attack type. Must be one of
{'l1', 'l2', 'itl1', 'itl2', 'pgd', 'deepfool'}
net : :class:`torch.nn.Module`
The network whose vulnerability is computed.
n_attacks : int
The number of attacks to use for the computation of vulnerbaility.
If -1 or greater than dataset-size, uses the entire dataset.
Default: -1.
"""
print('\nStarting attacks of type ' + attack_name)
# Reload data without normalizing it
print('> Loading dataset %s...' % args.dataset)
if args.dataset == 'mnist':
_, loader = MNIST(
root=args.datapath, bs=args.bs, valid_size=0.,
size=args.img_size, normalize=False)
elif args.dataset == 'cifar':
_, loader = CIFAR10(
root=args.datapath, bs=args.bs, valid_size=0.,
size=args.img_size, normalize=False)
elif args.dataset == 'imgnet12':
_, loader = IMGNET12(
root=args.datapath, bs=args.bs, valid_size=0.,
size=args.img_size, normalize=False)
else:
raise NotImplementedError
print('> Done.')
# Image-normalizations (must be same as in data.py)
if args.raw_inputs:
means = [0., 0., 0.]
stds = [1., 1., 1.]
elif args.dataset == "mnist":
means = [0.1307]
stds = [0.3081]
elif args.dataset == "cifar":
means = [0.4914, 0.4822, 0.4465]
stds = [0.2023, 0.1994, 0.2010]
elif args.dataset == "imgnet12":
means = [.453, .443, .403]
stds = {
256: [.232, .226, .225],
128: [.225, .218, .218],
64: [.218, .211, .211],
32: [.206, .200, .200]
}[args.img_size]
else:
raise NotImplementedError
means = np.array(means).reshape(-1, 1, 1)
stds = np.array(stds).reshape(-1, 1, 1)
net.eval()
print('> Computing test accuracy...')
te_acc = do_pass(net, loader, args, means, stds)
print('> Done. Computed test accuracy: %5.2f' % te_acc)
# construct attack
bounds = (0, 1)
model = foolbox.models.PyTorchModel(net, bounds=bounds,
preprocessing=(means, stds),
num_classes=args.categories)
# Choosing attack type
if attack_name == 'l1':
# vulnerability increases like sqrt(d) \propto img_size
# therefore, we divide the linfty-threshold by img_size
attack = partial(foolbox.attacks.FGSM(model, distance=Linfinity),
epsilons=1000, max_epsilon=1. / args.img_size)
elif attack_name == 'l2':
# to be visually constant, the l2-threshold increases like sqrt d;
# but vulnerability also increases like sqrt d;
# therefore, use constant max_epsilon accross dimension d
attack = partial(foolbox.attacks.GradientAttack(model, distance=MSE),
epsilons=1000, max_epsilon=1.)
elif attack_name == 'itl1':
it, eps = 10, 1. / args.img_size
attack = partial(
foolbox.attacks.LinfinityBasicIterativeAttack(
model, distance=Linfinity),
iterations=it, epsilon=eps,
stepsize=eps / (.75 * it), binary_search=True)
elif attack_name == 'itl2':
it, eps = 10, 1.
attack = partial(
foolbox.attacks.L2BasicIterativeAttack(
model, distance=MSE),
iterations=it, epsilon=eps,
stepsize=eps / (.75 * it), binary_search=True)
elif attack_name == 'pgd':
it, eps = 10, 1. / args.img_size
attack = partial(foolbox.attacks.RandomPGD(model, distance=Linfinity),
iterations=it, epsilon=eps,
stepsize=eps / (.75 * it), binary_search=True)
elif attack_name == 'deepFool':
attack = foolbox.attacks.DeepFoolAttack(model, distance=MSE)
elif attack_name == 'boundary':
attack = partial(foolbox.attacks.BoundaryAttack(model, distance=MSE),
iterations=2000, log_every_n_steps=np.Infinity,
verbose=False)
else:
raise NotImplementedError(
"attack_name must be 'l1', 'l2', 'itl1', 'itl2', "
"'deepFool' or 'boundary'")
n_iterations = 0
results = {}
results['l2_snr'] = []
results['clean_grad'] = []
results['dirty_grad'] = []
results['l2_norm'] = []
results['linf_norm'] = []
n_fooled = 0
print('> Creating empty image-tensors')
n_saved = 64 if (n_attacks == -1) else min(n_attacks, 64)
clean_images = torch.zeros(n_saved, 3, args.img_size, args.img_size)
dirty_images = torch.zeros(n_saved, 3, args.img_size, args.img_size)
print('> Done.')
myPrint(("{:>15} " * 5).format(
"clean_grad", "dirty_grad", "linf_norm", "l2_norm", "l2_snr"), args)
t0 = time.time()
for i, (images, labels) in enumerate(loader):
if n_iterations == n_attacks:
break
for i, clean_image in enumerate(images):
clean_label, clean_grad = classify(net, clean_image,
args, means, stds)
dirty_image_np = attack(clean_image.numpy(), clean_label)
if dirty_image_np is not None: # i.e. if adversarial was found
dirty_image = torch.Tensor(dirty_image_np)
_, dirty_grad = classify(net, dirty_image,
args, means, stds)
if i < n_saved: # only save n_saved first images
dirty_images[i] = dirty_image.clone()
clean_images[i] = clean_image.clone()
l2_norm = (clean_image - dirty_image).norm().item()
linf_norm = (clean_image - dirty_image).abs().max().item()
l2_snr = 20. * math.log10(
clean_image.norm().item() / (l2_norm + 1e-6))
else:
l2_snr = dirty_grad = l2_norm = linf_norm = np.NaN
results['l2_snr'].append(l2_snr)
results['clean_grad'].append(clean_grad)
results['dirty_grad'].append(dirty_grad)
results['l2_norm'].append(l2_norm)
results['linf_norm'].append(linf_norm)
fmt_str = "{:>15.6f} " * 5
if ((attack.func._default_distance == MSE and
l2_norm < .005 * np.sqrt(args.img_size)) or
(attack.func._default_distance == Linfinity and
linf_norm < .005)):
fmt_str += " * fooled!"
n_fooled += 1
myPrint(fmt_str.format(clean_grad, dirty_grad, linf_norm,
l2_norm, l2_snr),
args)
n_iterations += 1
if n_iterations == n_attacks:
break
# Printing summary
summary = {}
print("\n Summary for network in '{}' of test accuracy {}".format(
args.path, te_acc))
for key, value in results.items():
low95, high95 = conf95(value)
print("{:>10} mean:{:>10.5f} std:{:>10.5f} conf95:({:>10.5f}, "
"{:>10.5f}) minmax:({:>10.5f}, {:>10.5f})".format(
key, np.nanmean(value), np.nanstd(value), low95, high95,
np.nanmin(value), np.nanmax(value)))
summary[key] = [np.nanmean(value), np.nanstd(value), low95, high95]
percent = 100 * n_fooled / float(n_iterations)
print("{:>10} {:10d}s".format("Time", int(time.time() - t0)))
print("{:>10} {:10.1f}%".format("percent", percent))
# Preparing the output
output = dict()
output['summary'] = summary
output['results'] = results
output['clean_images'] = clean_images
output['dirty_images'] = dirty_images
output['percent'] = percent
output['te_acc'] = te_acc
return output
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
from PIL import Image
import torch
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision.transforms as transforms
import torchvision.datasets as datasets
def IMGNET12(root='~/datasets/imgnet12/', bs=32, bs_test=None, num_workers=32,
valid_size=.1, size=256, crop=False, normalize=False):
# Datafolder '~/datasets/imgnet12/' should contain folders train/ and val/,
# each of which whould contain 12 subfolders (1 per class) with .jpg files
root = os.path.expanduser(root)
# original means = [.485, .456, .406]
# original stds = [0.229, 0.224, 0.225]
means = [.453, .443, .403]
stds = {
256: [.232, .226, .225],
128: [.225, .218, .218],
64: [.218, .211, .211],
32: [.206, .200, .200]
}
if normalize:
normalize = transforms.Normalize(mean=means,
std=stds[size])
else:
normalize = transforms.Normalize((0., 0., 0),
(1., 1., 1.))
if bs_test is None:
bs_test = bs
if crop:
tr_downsamplingOp = transforms.RandomCrop(size)
te_downsamplingOp = transforms.CenterCrop(size)
else:
tr_downsamplingOp = transforms.Resize(size)
te_downsamplingOp = transforms.Resize(size)
preprocess = [transforms.Resize(256), transforms.CenterCrop(256)]
tr_transforms = transforms.Compose([
*preprocess,
tr_downsamplingOp,
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize, ])
te_transforms = transforms.Compose([
*preprocess,
te_downsamplingOp,
transforms.ToTensor(),
normalize, ])
tr_dataset = datasets.ImageFolder(root + '/train', transform=tr_transforms)
te_dataset = datasets.ImageFolder(root + '/val', transform=te_transforms)
# Split training in train and valid set
num_train = len(tr_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
np.random.seed(42)
np.random.shuffle(indices)
tr_idx, va_idx = indices[split:], indices[:split]
tr_sampler = SubsetRandomSampler(tr_idx)
va_sampler = SubsetRandomSampler(va_idx)
tr_loader = torch.utils.data.DataLoader(
tr_dataset, batch_size=bs,
num_workers=num_workers, pin_memory=True, sampler=tr_sampler)
va_loader = torch.utils.data.DataLoader(
tr_dataset, batch_size=bs_test,
num_workers=num_workers, pin_memory=True, sampler=va_sampler)
te_loader = torch.utils.data.DataLoader(
te_dataset, batch_size=bs_test, shuffle=False,
num_workers=num_workers, pin_memory=True)
if valid_size > 0.:
return tr_loader, va_loader, te_loader
else:
return tr_loader, te_loader
def CIFAR10(root='~/datasets/cifar10/', bs=128, bs_test=None,
augment_training=True, valid_size=0., size=32, num_workers=1,
normalize=False):
root = os.path.expanduser(root)
if bs_test is None:
bs_test = bs
if normalize:
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))
else:
normalize = transforms.Normalize((0., 0., 0),
(1., 1., 1.))
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.Resize(size, Image.NEAREST),
transforms.ToTensor(),
normalize
])
transform_test = transforms.Compose([
transforms.Resize(size, Image.NEAREST),
transforms.ToTensor(),
normalize
])
transform_valid = transform_test
if augment_training is False:
transform_train = transform_test
dataset_tr = datasets.CIFAR10(root=root,
train=True,
transform=transform_train)
dataset_va = datasets.CIFAR10(root=root,
train=True,
transform=transform_valid)
dataset_te = datasets.CIFAR10(root=root,
train=False,
transform=transform_test)
# Split training in train and valid set
num_train = len(dataset_tr)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
np.random.seed(42)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
loader_tr = torch.utils.data.DataLoader(dataset_tr,
batch_size=bs,
sampler=train_sampler,
num_workers=num_workers)
loader_va = torch.utils.data.DataLoader(dataset_va,
batch_size=bs,
sampler=valid_sampler,
num_workers=num_workers)
# add pin_memory
loader_te = torch.utils.data.DataLoader(dataset_te,
batch_size=bs_test,
shuffle=False,
num_workers=num_workers)
if valid_size > 0:
return loader_tr, loader_va, loader_te
else:
return loader_tr, loader_te
def MNIST(root='~/datasets/mnist/', bs=128, bs_test=None,
augment_training=True, valid_size=0., size=32, num_workers=1,
normalize=False):
root = os.path.expanduser(root)
if bs_test is None:
bs_test = bs
if normalize:
normalize = transforms.Normalize((0.1307,), (0.3081,))
else:
normalize = transforms.Normalize((0.,), (1.,))
transform = transforms.Compose([
transforms.Resize(32, Image.BILINEAR),
transforms.Resize(size, Image.NEAREST),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
normalize
])
dataset_tr = datasets.MNIST(root=root,
train=True,
transform=transform)
dataset_va = datasets.MNIST(root=root,
train=True,
transform=transform)
dataset_te = datasets.MNIST(root=root,
train=False,
transform=transform)
# Split training in train and valid set
num_train = len(dataset_tr)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
np.random.seed(42)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
loader_tr = torch.utils.data.DataLoader(dataset_tr,
batch_size=bs,
sampler=train_sampler,
num_workers=num_workers)
loader_va = torch.utils.data.DataLoader(dataset_va,
batch_size=bs,
sampler=valid_sampler,
num_workers=num_workers)
# add pin_memory
loader_te = torch.utils.data.DataLoader(dataset_te,
batch_size=bs_test,
shuffle=False,
num_workers=num_workers)
if valid_size > 0:
return loader_tr, loader_va, loader_te
else:
return loader_tr, loader_te
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import reduce
import torch.nn as nn
import torch.nn.functional as F
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class FlexibleAvgPool2d(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs):
return F.avg_pool2d(inputs, kernel_size=inputs.size(2))
class WeightPool(nn.Module):
def __init__(self, in_planes, kernel_size):
super(WeightPool, self).__init__()
self.conv = nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size,
stride=kernel_size, groups=in_planes, bias=False)
self.conv.unit_gain = True
def forward(self, x):
return self.conv(x)
class WeightPoolOut(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(WeightPoolOut, self).__init__()
self.in_planes = in_planes
self.conv = nn.Conv2d(in_planes, in_planes, kernel_size=plane_size,
groups=in_planes, bias=False)
self.linear = nn.Linear(in_planes, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = self.conv(x)
out = out.view(-1, self.in_planes)
return self.linear(out)
class MaxPoolOut(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(MaxPoolOut, self).__init__()
self.in_planes = in_planes
self.maxpool = nn.MaxPool2d(kernel_size=plane_size)
self.linear = nn.Linear(in_planes, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = self.maxpool(x)
out = out.view(-1, self.in_planes)
return self.linear(out)
class AvgPoolOut(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(AvgPoolOut, self).__init__()
self.in_planes = in_planes
self.avgpool = nn.AvgPool2d(kernel_size=plane_size)
self.linear = nn.Linear(in_planes, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = self.avgpool(x)
out = out.view(-1, self.in_planes)
return self.linear(out)
class FCout(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(FCout, self).__init__()
if type(plane_size) == tuple and len(plane_size) == 2:
plane_size = reduce(lambda x, y: x * y, plane_size)
else:
plane_size = plane_size ** 2
print('Plane size = ', plane_size)
self.in_planes = in_planes
self.plane_size = plane_size
self.linear = nn.Linear(in_planes * plane_size, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = x.view(-1, self.in_planes * self.plane_size)
return self.linear(out)
class ConvLayer(nn.Module):
def __init__(self, in_planes, planes, pooltype=None, no_BN=False,
no_act=False, dilation=1):
super(ConvLayer, self).__init__()
self.pad = nn.ReflectionPad2d(dilation)
if pooltype is None: # Usual conv
self.conv = nn.Conv2d(in_planes, planes, 3, padding=0,
stride=1, dilation=dilation)
elif pooltype == 'avgpool': # Average Pool
self.conv = nn.Sequential(
nn.Conv2d(in_planes, planes, 3, dilation=dilation),
nn.AvgPool2d(2))
elif pooltype == 'subsamp': # Strided Conv
self.conv = nn.Conv2d(
in_planes, planes, 3, stride=2, dilation=dilation)
elif pooltype == 'maxpool': # Max Pool
self.conv = nn.Sequential(
nn.Conv2d(in_planes, planes, 3, dilation=dilation),
nn.MaxPool2d(2))
elif pooltype == 'weightpool':
self.conv = nn.Sequential(
nn.Conv2d(in_planes, planes, 3, dilation=dilation),
WeightPool(planes, 2))
else:
raise NotImplementedError
if no_act:
self.act = lambda x: x
else:
self.act = nn.ReLU()
if no_BN:
self.bn = lambda x: x # Identity()
else:
self.bn = nn.BatchNorm2d(planes)
def forward(self, x):
out = self.act(self.bn(self.conv(self.pad(x))))
return out
class ConvNet(nn.Module):
def __init__(
self, categories=10, n_layers=3, in_size=32, poolings=None,
pooltype='avgpool', no_BN=False, no_act=False, dilations=1,
normalize_inputs=False, last_layers='maxpool', in_planes=3):
# last_layers in {'maxpool', 'fc', 'weightpool'}
super(ConvNet, self).__init__()
poolings = [] if poolings is None else poolings
if type(dilations) != list:
dilations = [dilations] * n_layers
self.in_planes = in_planes
if normalize_inputs or no_BN:
self.bn = (lambda x: x)
else:
self.bn = nn.BatchNorm2d(self.in_planes)
self.layers = self._make_layers(
ConvLayer, 64, n_layers, poolings, pooltype,
no_BN, no_act, dilations)
# compute input-size to last layers from input-size of the net
# self.in_planes is changed by _make_layers to the nbr of out-planes
out_size = int(in_size / (2 ** (len(poolings))))
self.last_layers = self._make_last_layers(
out_size, categories, last_layers)
def _make_layers(self, block, planes, num_blocks, poolings,
pooltype, no_BN, no_act, dilations):
# pooltypes = [0] + [0] * (num_blocks - 1)
pooltypes = [None] * num_blocks
for pool in poolings:
pooltypes[pool] = pooltype
layers = []
for pool, dilation in zip(pooltypes, dilations):
layers.append(block(self.in_planes, planes, pool, no_BN, no_act,
dilation))
self.in_planes = planes
return nn.Sequential(*layers)
def _make_last_layers(self, in_size, categories, last_layers):
if last_layers == 'maxpool':
last_layers = MaxPoolOut(
self.in_planes, in_size, categories, unit_gain=True)
elif last_layers == 'avgpool':
last_layers = AvgPoolOut(
self.in_planes, in_size, categories, unit_gain=True)
elif last_layers == 'weightpool':
last_layers = WeightPoolOut(
self.in_planes, in_size, categories, unit_gain=True)
elif last_layers == 'fc':
last_layers = FCout(
self.in_planes, in_size, categories, unit_gain=True)
else:
raise NotImplementedError(
'Argument last_layers must be maxpool, fc, weightpool. '
'But got: %s' % last_layers)
return last_layers
def forward(self, x):
out = self.layers(self.bn(x))
out = self.last_layers(out)
return out
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import default_argument_parser, default_setup, launch
from adapteacher import add_ateacher_config
from adapteacher.engine.trainer import ATeacherTrainer, BaselineTrainer
# hacky way to register
from adapteacher.modeling.meta_arch.rcnn import TwoStagePseudoLabGeneralizedRCNN, DAobjTwoStagePseudoLabGeneralizedRCNN
from adapteacher.modeling.meta_arch.vgg import build_vgg_backbone # noqa
from adapteacher.modeling.proposal_generator.rpn import PseudoLabRPN
from adapteacher.modeling.roi_heads.roi_heads import StandardROIHeadsPseudoLab
import adapteacher.data.datasets.builtin
from adapteacher.modeling.meta_arch.ts_ensemble import EnsembleTSModel
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_ateacher_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if cfg.SEMISUPNET.Trainer == "ateacher":
Trainer = ATeacherTrainer
elif cfg.SEMISUPNET.Trainer == "baseline":
Trainer = BaselineTrainer
else:
raise ValueError("Trainer Name is not found.")
if args.eval_only:
if cfg.SEMISUPNET.Trainer == "ateacher":
model = Trainer.build_model(cfg)
model_teacher = Trainer.build_model(cfg)
ensem_ts_model = EnsembleTSModel(model_teacher, model)
DetectionCheckpointer(
ensem_ts_model, save_dir=cfg.OUTPUT_DIR
).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, ensem_ts_model.modelTeacher)
else:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.config import CfgNode as CN
def add_ateacher_config(cfg):
"""
Add config for semisupnet.
"""
_C = cfg
_C.TEST.VAL_LOSS = True
_C.MODEL.RPN.UNSUP_LOSS_WEIGHT = 1.0
_C.MODEL.RPN.LOSS = "CrossEntropy"
_C.MODEL.ROI_HEADS.LOSS = "CrossEntropy"
_C.SOLVER.IMG_PER_BATCH_LABEL = 1
_C.SOLVER.IMG_PER_BATCH_UNLABEL = 1
_C.SOLVER.FACTOR_LIST = (1,)
_C.DATASETS.TRAIN_LABEL = ("coco_2017_train",)
_C.DATASETS.TRAIN_UNLABEL = ("coco_2017_train",)
_C.DATASETS.CROSS_DATASET = True
_C.TEST.EVALUATOR = "COCOeval"
_C.SEMISUPNET = CN()
# Output dimension of the MLP projector after `res5` block
_C.SEMISUPNET.MLP_DIM = 128
# Semi-supervised training
_C.SEMISUPNET.Trainer = "ateacher"
_C.SEMISUPNET.BBOX_THRESHOLD = 0.7
_C.SEMISUPNET.PSEUDO_BBOX_SAMPLE = "thresholding"
_C.SEMISUPNET.TEACHER_UPDATE_ITER = 1
_C.SEMISUPNET.BURN_UP_STEP = 12000
_C.SEMISUPNET.EMA_KEEP_RATE = 0.0
_C.SEMISUPNET.UNSUP_LOSS_WEIGHT = 4.0
_C.SEMISUPNET.SUP_LOSS_WEIGHT = 0.5
_C.SEMISUPNET.LOSS_WEIGHT_TYPE = "standard"
_C.SEMISUPNET.DIS_TYPE = "res4"
_C.SEMISUPNET.DIS_LOSS_WEIGHT = 0.1
# dataloader
# supervision level
_C.DATALOADER.SUP_PERCENT = 100.0 # 5 = 5% dataset as labeled set
_C.DATALOADER.RANDOM_DATA_SEED = 0 # random seed to read data
_C.DATALOADER.RANDOM_DATA_SEED_PATH = "dataseed/COCO_supervision.txt"
_C.EMAMODEL = CN()
_C.EMAMODEL.SUP_CONSIST = True
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import add_ateacher_config
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.checkpoint.c2_model_loading import align_and_update_state_dicts
from detectron2.checkpoint import DetectionCheckpointer
# for load_student_model
from typing import Any
from fvcore.common.checkpoint import _strip_prefix_if_present, _IncompatibleKeys
class DetectionTSCheckpointer(DetectionCheckpointer):
def _load_model(self, checkpoint):
if checkpoint.get("__author__", None) == "Caffe2":
# pretrained model weight: only update student model
if checkpoint.get("matching_heuristics", False):
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
model_state_dict = self.model.modelStudent.state_dict()
align_and_update_state_dicts(
model_state_dict,
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
)
checkpoint["model"] = model_state_dict
# for non-caffe2 models, use standard ways to load it
incompatible = self._load_student_model(checkpoint)
model_buffers = dict(self.model.modelStudent.named_buffers(recurse=False))
for k in ["pixel_mean", "pixel_std"]:
# Ignore missing key message about pixel_mean/std.
# Though they may be missing in old checkpoints, they will be correctly
# initialized from config anyway.
if k in model_buffers:
try:
incompatible.missing_keys.remove(k)
except ValueError:
pass
return incompatible
else: # whole model
if checkpoint.get("matching_heuristics", False):
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
model_state_dict = self.model.state_dict()
align_and_update_state_dicts(
model_state_dict,
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
)
checkpoint["model"] = model_state_dict
# for non-caffe2 models, use standard ways to load it
incompatible = super()._load_model(checkpoint)
model_buffers = dict(self.model.named_buffers(recurse=False))
for k in ["pixel_mean", "pixel_std"]:
# Ignore missing key message about pixel_mean/std.
# Though they may be missing in old checkpoints, they will be correctly
# initialized from config anyway.
if k in model_buffers:
try:
incompatible.missing_keys.remove(k)
except ValueError:
pass
return incompatible
def _load_student_model(self, checkpoint: Any) -> _IncompatibleKeys: # pyre-ignore
checkpoint_state_dict = checkpoint.pop("model")
self._convert_ndarray_to_tensor(checkpoint_state_dict)
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching.
_strip_prefix_if_present(checkpoint_state_dict, "module.")
# work around https://github.com/pytorch/pytorch/issues/24139
model_state_dict = self.model.modelStudent.state_dict()
incorrect_shapes = []
for k in list(checkpoint_state_dict.keys()):
if k in model_state_dict:
shape_model = tuple(model_state_dict[k].shape)
shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
if shape_model != shape_checkpoint:
incorrect_shapes.append((k, shape_checkpoint, shape_model))
checkpoint_state_dict.pop(k)
# pyre-ignore
incompatible = self.model.modelStudent.load_state_dict(
checkpoint_state_dict, strict=False
)
return _IncompatibleKeys(
missing_keys=incompatible.missing_keys,
unexpected_keys=incompatible.unexpected_keys,
incorrect_shapes=incorrect_shapes,
)
# class DetectionCheckpointer(Checkpointer):
# """
# Same as :class:`Checkpointer`, but is able to handle models in detectron & detectron2
# model zoo, and apply conversions for legacy models.
# """
# def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
# is_main_process = comm.is_main_process()
# super().__init__(
# model,
# save_dir,
# save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
# **checkpointables,
# )
# def _load_file(self, filename):
# if filename.endswith(".pkl"):
# with PathManager.open(filename, "rb") as f:
# data = pickle.load(f, encoding="latin1")
# if "model" in data and "__author__" in data:
# # file is in Detectron2 model zoo format
# self.logger.info("Reading a file from '{}'".format(data["__author__"]))
# return data
# else:
# # assume file is from Caffe2 / Detectron1 model zoo
# if "blobs" in data:
# # Detection models have "blobs", but ImageNet models don't
# data = data["blobs"]
# data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
# return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
# loaded = super()._load_file(filename) # load native pth checkpoint
# if "model" not in loaded:
# loaded = {"model": loaded}
# return loaded
# def _load_model(self, checkpoint):
# if checkpoint.get("matching_heuristics", False):
# self._convert_ndarray_to_tensor(checkpoint["model"])
# # convert weights by name-matching heuristics
# model_state_dict = self.model.state_dict()
# align_and_update_state_dicts(
# model_state_dict,
# checkpoint["model"],
# c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
# )
# checkpoint["model"] = model_state_dict
# # for non-caffe2 models, use standard ways to load it
# super()._load_model(checkpoint) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from detectron2.config import CfgNode
from detectron2.solver.lr_scheduler import WarmupCosineLR, WarmupMultiStepLR
from .lr_scheduler import WarmupTwoStageMultiStepLR
def build_lr_scheduler(
cfg: CfgNode, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
"""
Build a LR scheduler from config.
"""
name = cfg.SOLVER.LR_SCHEDULER_NAME
if name == "WarmupMultiStepLR":
return WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
elif name == "WarmupCosineLR":
return WarmupCosineLR(
optimizer,
cfg.SOLVER.MAX_ITER,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
elif name == "WarmupTwoStageMultiStepLR":
return WarmupTwoStageMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
factor_list=cfg.SOLVER.FACTOR_LIST,
gamma=cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
else:
raise ValueError("Unknown LR scheduler: {}".format(name))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from bisect import bisect_right
from typing import List
import torch
from detectron2.solver.lr_scheduler import _get_warmup_factor_at_iter
class WarmupTwoStageMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
milestones: List[int],
factor_list: List[int],
gamma: float = 0.1,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if len(milestones) + 1 != len(factor_list):
raise ValueError("Length of milestones should match length of factor_list.")
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.factor_list = factor_list
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [
base_lr
* warmup_factor
* self.factor_list[bisect_right(self.milestones, self.last_epoch)]
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
from detectron2.config import configurable
# from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
# from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
import logging
from typing import Dict, Tuple, List, Optional
from collections import OrderedDict
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.modeling.backbone import build_backbone, Backbone
from detectron2.modeling.roi_heads import build_roi_heads
from detectron2.utils.events import get_event_storage
from detectron2.structures import ImageList
############### Image discriminator ##############
class FCDiscriminator_img(nn.Module):
def __init__(self, num_classes, ndf1=256, ndf2=128):
super(FCDiscriminator_img, self).__init__()
self.conv1 = nn.Conv2d(num_classes, ndf1, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(ndf1, ndf2, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(ndf2, ndf2, kernel_size=3, padding=1)
self.classifier = nn.Conv2d(ndf2, 1, kernel_size=3, padding=1)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.classifier(x)
return x
#################################
################ Gradient reverse function
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
return GradReverse.apply(x)
#######################
@META_ARCH_REGISTRY.register()
class DAobjTwoStagePseudoLabGeneralizedRCNN(GeneralizedRCNN):
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
input_format: Optional[str] = None,
vis_period: int = 0,
dis_type: str,
# dis_loss_weight: float = 0,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super(GeneralizedRCNN, self).__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.input_format = input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
# @yujheli: you may need to build your discriminator here
self.dis_type = dis_type
self.D_img = None
# self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels['res4']) # Need to know the channel
# self.D_img = None
self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels[self.dis_type]) # Need to know the channel
# self.bceLoss_func = nn.BCEWithLogitsLoss()
def build_discriminator(self):
self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels[self.dis_type]).to(self.device) # Need to know the channel
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
"dis_type": cfg.SEMISUPNET.DIS_TYPE,
# "dis_loss_ratio": cfg.xxx,
}
def preprocess_image_train(self, batched_inputs: List[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
images_t = [x["image_unlabeled"].to(self.device) for x in batched_inputs]
images_t = [(x - self.pixel_mean) / self.pixel_std for x in images_t]
images_t = ImageList.from_tensors(images_t, self.backbone.size_divisibility)
return images, images_t
def forward(
self, batched_inputs, branch="supervised", given_proposals=None, val_mode=False
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if self.D_img == None:
self.build_discriminator()
if (not self.training) and (not val_mode): # only conduct when testing mode
return self.inference(batched_inputs)
source_label = 0
target_label = 1
if branch == "domain":
# self.D_img.train()
# source_label = 0
# target_label = 1
# images = self.preprocess_image(batched_inputs)
images_s, images_t = self.preprocess_image_train(batched_inputs)
features = self.backbone(images_s.tensor)
# import pdb
# pdb.set_trace()
features_s = grad_reverse(features[self.dis_type])
D_img_out_s = self.D_img(features_s)
loss_D_img_s = F.binary_cross_entropy_with_logits(D_img_out_s, torch.FloatTensor(D_img_out_s.data.size()).fill_(source_label).to(self.device))
features_t = self.backbone(images_t.tensor)
features_t = grad_reverse(features_t[self.dis_type])
# features_t = grad_reverse(features_t['p2'])
D_img_out_t = self.D_img(features_t)
loss_D_img_t = F.binary_cross_entropy_with_logits(D_img_out_t, torch.FloatTensor(D_img_out_t.data.size()).fill_(target_label).to(self.device))
# import pdb
# pdb.set_trace()
losses = {}
losses["loss_D_img_s"] = loss_D_img_s
losses["loss_D_img_t"] = loss_D_img_t
return losses, [], [], None
# self.D_img.eval()
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
# TODO: remove the usage of if else here. This needs to be re-organized
if branch == "supervised":
features_s = grad_reverse(features[self.dis_type])
D_img_out_s = self.D_img(features_s)
loss_D_img_s = F.binary_cross_entropy_with_logits(D_img_out_s, torch.FloatTensor(D_img_out_s.data.size()).fill_(source_label).to(self.device))
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
compute_loss=True,
targets=gt_instances,
branch=branch,
)
# visualization
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals_rpn, branch)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
losses["loss_D_img_s"] = loss_D_img_s*0.001
return losses, [], [], None
elif branch == "supervised_target":
# features_t = grad_reverse(features_t[self.dis_type])
# D_img_out_t = self.D_img(features_t)
# loss_D_img_t = F.binary_cross_entropy_with_logits(D_img_out_t, torch.FloatTensor(D_img_out_t.data.size()).fill_(target_label).to(self.device))
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
compute_loss=True,
targets=gt_instances,
branch=branch,
)
# visualization
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals_rpn, branch)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
# losses["loss_D_img_t"] = loss_D_img_t*0.001
# losses["loss_D_img_s"] = loss_D_img_s*0.001
return losses, [], [], None
elif branch == "unsup_data_weak":
"""
unsupervised weak branch: input image without any ground-truth label; output proposals of rpn and roi-head
"""
# Region proposal network
proposals_rpn, _ = self.proposal_generator(
images, features, None, compute_loss=False
)
# roi_head lower branch (keep this for further production)
# notice that we do not use any target in ROI head to do inference!
proposals_roih, ROI_predictions = self.roi_heads(
images,
features,
proposals_rpn,
targets=None,
compute_loss=False,
branch=branch,
)
# if self.vis_period > 0:
# storage = get_event_storage()
# if storage.iter % self.vis_period == 0:
# self.visualize_training(batched_inputs, proposals_rpn, branch)
return {}, proposals_rpn, proposals_roih, ROI_predictions
elif branch == "unsup_data_strong":
raise NotImplementedError()
elif branch == "val_loss":
raise NotImplementedError()
def visualize_training(self, batched_inputs, proposals, branch=""):
"""
This function different from the original one:
- it adds "branch" to the `vis_name`.
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 predicted object
proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = (
"Left: GT bounding boxes "
+ branch
+ "; Right: Predicted proposals "
+ branch
)
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
@META_ARCH_REGISTRY.register()
class TwoStagePseudoLabGeneralizedRCNN(GeneralizedRCNN):
def forward(
self, batched_inputs, branch="supervised", given_proposals=None, val_mode=False
):
if (not self.training) and (not val_mode):
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if branch == "supervised":
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# # roi_head lower branch
_, detector_losses = self.roi_heads(
images, features, proposals_rpn, gt_instances, branch=branch
)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, [], [], None
elif branch == "unsup_data_weak":
# Region proposal network
proposals_rpn, _ = self.proposal_generator(
images, features, None, compute_loss=False
)
# roi_head lower branch (keep this for further production) # notice that we do not use any target in ROI head to do inference !
proposals_roih, ROI_predictions = self.roi_heads(
images,
features,
proposals_rpn,
targets=None,
compute_loss=False,
branch=branch,
)
return {}, proposals_rpn, proposals_roih, ROI_predictions
elif branch == "val_loss":
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances, compute_val_loss=True
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
gt_instances,
branch=branch,
compute_val_loss=True,
)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, [], [], None
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.nn as nn
import copy
import torch
from typing import Union, List, Dict, Any, cast
from detectron2.modeling.backbone import (
ResNet,
Backbone,
build_resnet_backbone,
BACKBONE_REGISTRY
)
from detectron2.modeling.backbone.fpn import FPN, LastLevelMaxPool, LastLevelP6P7
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class vgg_backbone(Backbone):
"""
Backbone (bottom-up) for FBNet.
Hierarchy:
trunk0:
xif0_0
xif0_1
...
trunk1:
xif1_0
xif1_1
...
...
Output features:
The outputs from each "stage", i.e. trunkX.
"""
def __init__(self, cfg):
super().__init__()
self.vgg = make_layers(cfgs['vgg16'],batch_norm=True)
self._initialize_weights()
# self.stage_names_index = {'vgg1':3, 'vgg2':8 , 'vgg3':15, 'vgg4':22, 'vgg5':29}
_out_feature_channels = [64, 128, 256, 512, 512]
_out_feature_strides = [2, 4, 8, 16, 32]
# stages, shape_specs = build_fbnet(
# cfg,
# name="trunk",
# in_channels=cfg.MODEL.FBNET_V2.STEM_IN_CHANNELS
# )
# nn.Sequential(*list(self.vgg.features._modules.values())[:14])
self.stages = [nn.Sequential(*list(self.vgg._modules.values())[0:7]),\
nn.Sequential(*list(self.vgg._modules.values())[7:14]),\
nn.Sequential(*list(self.vgg._modules.values())[14:24]),\
nn.Sequential(*list(self.vgg._modules.values())[24:34]),\
nn.Sequential(*list(self.vgg._modules.values())[34:]),]
self._out_feature_channels = {}
self._out_feature_strides = {}
self._stage_names = []
for i, stage in enumerate(self.stages):
name = "vgg{}".format(i)
self.add_module(name, stage)
self._stage_names.append(name)
self._out_feature_channels[name] = _out_feature_channels[i]
self._out_feature_strides[name] = _out_feature_strides[i]
self._out_features = self._stage_names
del self.vgg
def forward(self, x):
features = {}
for name, stage in zip(self._stage_names, self.stages):
x = stage(x)
# if name in self._out_features:
# outputs[name] = x
features[name] = x
# import pdb
# pdb.set_trace()
return features
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
@BACKBONE_REGISTRY.register() #already register in baseline model
def build_vgg_backbone(cfg, _):
return vgg_backbone(cfg)
@BACKBONE_REGISTRY.register() #already register in baseline model
def build_vgg_fpn_backbone(cfg, _):
# backbone = FPN(
# bottom_up=build_vgg_backbone(cfg),
# in_features=cfg.MODEL.FPN.IN_FEATURES,
# out_channels=cfg.MODEL.FPN.OUT_CHANNELS,
# norm=cfg.MODEL.FPN.NORM,
# top_block=LastLevelMaxPool(),
# )
bottom_up = vgg_backbone(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
# fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
# return backbone
return backbone
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from torch.nn.parallel import DataParallel, DistributedDataParallel
import torch.nn as nn
class EnsembleTSModel(nn.Module):
def __init__(self, modelTeacher, modelStudent):
super(EnsembleTSModel, self).__init__()
if isinstance(modelTeacher, (DistributedDataParallel, DataParallel)):
modelTeacher = modelTeacher.module
if isinstance(modelStudent, (DistributedDataParallel, DataParallel)):
modelStudent = modelStudent.module
self.modelTeacher = modelTeacher
self.modelStudent = modelStudent |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, Optional
import torch
from detectron2.structures import ImageList, Instances
from detectron2.modeling.proposal_generator import RPN
from detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY
@PROPOSAL_GENERATOR_REGISTRY.register()
class PseudoLabRPN(RPN):
"""
Region Proposal Network, introduced by :paper:`Faster R-CNN`.
"""
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
gt_instances: Optional[Instances] = None,
compute_loss: bool = True,
compute_val_loss: bool = False,
):
features = [features[f] for f in self.in_features]
anchors = self.anchor_generator(features)
pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
pred_objectness_logits = [
# (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)
score.permute(0, 2, 3, 1).flatten(1)
for score in pred_objectness_logits
]
pred_anchor_deltas = [
# (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N, Hi*Wi*A, B)
x.view(
x.shape[0], -1, self.anchor_generator.box_dim, x.shape[-2], x.shape[-1]
)
.permute(0, 3, 4, 1, 2)
.flatten(1, -2)
for x in pred_anchor_deltas
]
if (self.training and compute_loss) or compute_val_loss:
gt_labels, gt_boxes = self.label_and_sample_anchors(anchors, gt_instances)
losses = self.losses(
anchors, pred_objectness_logits, gt_labels, pred_anchor_deltas, gt_boxes
)
losses = {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
else: # inference
losses = {}
proposals = self.predict_proposals(
anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes
)
return proposals, losses |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.modeling.roi_heads.fast_rcnn import (
FastRCNNOutputLayers,
FastRCNNOutputs,
)
# focal loss
class FastRCNNFocaltLossOutputLayers(FastRCNNOutputLayers):
def __init__(self, cfg, input_shape):
super(FastRCNNFocaltLossOutputLayers, self).__init__(cfg, input_shape)
self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
def losses(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features
that were used to compute predictions.
"""
scores, proposal_deltas = predictions
losses = FastRCNNFocalLoss(
self.box2box_transform,
scores,
proposal_deltas,
proposals,
self.smooth_l1_beta,
self.box_reg_loss_type,
num_classes=self.num_classes,
).losses()
return losses
class FastRCNNFocalLoss(FastRCNNOutputs):
"""
A class that stores information about outputs of a Fast R-CNN head.
It provides methods that are used to decode the outputs of a Fast R-CNN head.
"""
def __init__(
self,
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta=0.0,
box_reg_loss_type="smooth_l1",
num_classes=80,
):
super(FastRCNNFocalLoss, self).__init__(
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta,
box_reg_loss_type,
)
self.num_classes = num_classes
def losses(self):
return {
"loss_cls": self.comput_focal_loss(),
"loss_box_reg": self.box_reg_loss(),
}
def comput_focal_loss(self):
if self._no_instances:
return 0.0 * self.pred_class_logits.sum()
else:
FC_loss = FocalLoss(
gamma=1.5,
num_classes=self.num_classes,
)
total_loss = FC_loss(input=self.pred_class_logits, target=self.gt_classes)
total_loss = total_loss / self.gt_classes.shape[0]
return total_loss
class FocalLoss(nn.Module):
def __init__(
self,
weight=None,
gamma=1.0,
num_classes=80,
):
super(FocalLoss, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
self.num_classes = num_classes
def forward(self, input, target):
# focal loss
CE = F.cross_entropy(input, target, reduction="none")
p = torch.exp(-CE)
loss = (1 - p) ** self.gamma * CE
return loss.sum()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from typing import Dict, List, Optional, Tuple, Union
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.modeling.proposal_generator.proposal_utils import (
add_ground_truth_to_proposals,
)
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads.box_head import build_box_head
from detectron2.layers import ShapeSpec
from detectron2.modeling.roi_heads import (
ROI_HEADS_REGISTRY,
StandardROIHeads,
)
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from adapteacher.modeling.roi_heads.fast_rcnn import FastRCNNFocaltLossOutputLayers
import numpy as np
from detectron2.modeling.poolers import ROIPooler
@ROI_HEADS_REGISTRY.register()
class StandardROIHeadsPseudoLab(StandardROIHeads):
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
box_head = build_box_head(
cfg,
ShapeSpec(
channels=in_channels, height=pooler_resolution, width=pooler_resolution
),
)
if cfg.MODEL.ROI_HEADS.LOSS == "CrossEntropy":
box_predictor = FastRCNNOutputLayers(cfg, box_head.output_shape)
elif cfg.MODEL.ROI_HEADS.LOSS == "FocalLoss":
box_predictor = FastRCNNFocaltLossOutputLayers(cfg, box_head.output_shape)
else:
raise ValueError("Unknown ROI head loss.")
return {
"box_in_features": in_features,
"box_pooler": box_pooler,
"box_head": box_head,
"box_predictor": box_predictor,
}
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
compute_loss=True,
branch="",
compute_val_loss=False,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
del images
if self.training and compute_loss: # apply if training loss
assert targets
# 1000 --> 512
proposals = self.label_and_sample_proposals(
proposals, targets, branch=branch
)
elif compute_val_loss: # apply if val loss
assert targets
# 1000 --> 512
temp_proposal_append_gt = self.proposal_append_gt
self.proposal_append_gt = False
proposals = self.label_and_sample_proposals(
proposals, targets, branch=branch
) # do not apply target on proposals
self.proposal_append_gt = temp_proposal_append_gt
del targets
if (self.training and compute_loss) or compute_val_loss:
losses, _ = self._forward_box(
features, proposals, compute_loss, compute_val_loss, branch
)
return proposals, losses
else:
pred_instances, predictions = self._forward_box(
features, proposals, compute_loss, compute_val_loss, branch
)
return pred_instances, predictions
def _forward_box(
self,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
compute_loss: bool = True,
compute_val_loss: bool = False,
branch: str = "",
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
features = [features[f] for f in self.box_in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
del box_features
if (
self.training and compute_loss
) or compute_val_loss: # apply if training loss or val loss
losses = self.box_predictor.losses(predictions, proposals)
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(
proposals, pred_boxes
):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses, predictions
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
return pred_instances, predictions
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances], branch: str = ""
) -> List[Instances]:
gt_boxes = [x.gt_boxes for x in targets]
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes
)
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(
trg_name
):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
else:
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))
)
proposals_per_image.gt_boxes = gt_boxes
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
storage = get_event_storage()
storage.put_scalar(
"roi_head/num_target_fg_samples_" + branch, np.mean(num_fg_samples)
)
storage.put_scalar(
"roi_head/num_target_bg_samples_" + branch, np.mean(num_bg_samples)
)
return proposals_with_gt
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .coco_evaluation import COCOEvaluator
from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
# __all__ = [k for k in globals().keys() if not k.startswith("_")]
__all__ = [
"COCOEvaluator",
"PascalVOCDetectionEvaluator"
]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pickle
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_dict
from detectron2.evaluation.fast_eval_api import COCOeval_opt
from detectron2.structures import Boxes, BoxMode, pairwise_iou
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
from detectron2.evaluation import DatasetEvaluator
from iopath.common.file_io import file_lock
logger = logging.getLogger(__name__)
def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
"""
Converts dataset into COCO format and saves it to a json file.
dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
Args:
dataset_name:
reference from the config file to the catalogs
must be registered in DatasetCatalog and in detectron2's standard format
output_file: path of json file that will be saved to
allow_cached: if json file is already present then skip conversion
"""
# TODO: The dataset or the conversion script *may* change,
# a checksum would be useful for validating the cached data
PathManager.mkdirs(os.path.dirname(output_file))
with file_lock(output_file):
if PathManager.exists(output_file) and allow_cached:
logger.warning(
f"Using previously cached COCO format annotations at '{output_file}'. "
"You need to clear the cache file if your dataset has been modified."
)
else:
logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)")
coco_dict = convert_to_coco_dict(dataset_name)
logger.info(f"Caching COCO format annotations at '{output_file}' ...")
tmp_file = output_file #+ ".tmp"
# with PathManager.open(tmp_file, "w") as f:
# json.dump(coco_dict, f)
# shutil.move(tmp_file, output_file)
with PathManager.open(tmp_file, "w") as f:
json.dump(coco_dict, f)
class COCOEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
the metric cannot be computed (e.g. due to no predictions made).
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
kpt_oks_sigmas=(),
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
contains all the results in the format they are produced by the model.
2. "coco_instances_results.json" a json file in COCO's result format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
See http://cocodataset.org/#keypoints-eval
When empty, it will use the defaults in COCO.
Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
kpt_oks_sigmas = (
tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
)
self._logger.warn(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
if not hasattr(self._metadata, "json_file"):
self._logger.info(
f"'{dataset_name}' is not registered by `register_coco_instances`."
" Therefore trying to convert it to COCO format ..."
)
cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
self._metadata.json_file = cache_path
convert_to_coco_json(dataset_name, cache_path)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._coco_api = COCO(json_file)
# Test set json files do not contain annotations (evaluation must be
# performed using the COCO evaluation server).
self._do_evaluation = "annotations" in self._coco_api.dataset
if self._do_evaluation:
self._kpt_oks_sigmas = kpt_oks_sigmas
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
if "proposals" in output:
prediction["proposals"] = output["proposals"].to(self._cpu_device)
if len(prediction) > 1:
self._predictions.append(prediction)
def evaluate(self, img_ids=None):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(predictions, f)
self._results = OrderedDict()
if "proposals" in predictions[0]:
self._eval_box_proposals(predictions)
if "instances" in predictions[0]:
self._eval_predictions(predictions, img_ids=img_ids)
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _tasks_from_predictions(self, predictions):
"""
Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
"""
tasks = {"bbox"}
for pred in predictions:
if "segmentation" in pred:
tasks.add("segm")
if "keypoints" in pred:
tasks.add("keypoints")
return sorted(tasks)
def _eval_predictions(self, predictions, img_ids=None):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
tasks = self._tasks or self._tasks_from_predictions(coco_results)
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
num_classes = len(all_contiguous_ids)
assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in coco_results:
category_id = result["category_id"]
assert category_id < num_classes, (
f"A prediction has class={category_id}, "
f"but the dataset only has {num_classes} classes and "
f"predicted class id should be in [0, {num_classes - 1}]."
)
result["category_id"] = reverse_id_mapping[category_id]
if self._output_dir:
file_path = os.path.join(self._output_dir, "coco_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(coco_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info(
"Evaluating predictions with {} COCO API...".format(
"unofficial" if self._use_fast_impl else "official"
)
)
for task in sorted(tasks):
assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
coco_eval = (
_evaluate_predictions_on_coco(
self._coco_api,
coco_results,
task,
kpt_oks_sigmas=self._kpt_oks_sigmas,
use_fast_impl=self._use_fast_impl,
img_ids=img_ids,
)
if len(coco_results) > 0
else None # cocoapi does not handle empty results very well
)
res = self._derive_coco_results(
coco_eval, task, class_names=self._metadata.get("thing_classes")
)
self._results[task] = res
def _eval_box_proposals(self, predictions):
"""
Evaluate the box proposals in predictions.
Fill self._results with the metrics for "box_proposals" task.
"""
if self._output_dir:
# Saving generated box proposals to file.
# Predicted box_proposals are in XYXY_ABS mode.
bbox_mode = BoxMode.XYXY_ABS.value
ids, boxes, objectness_logits = [], [], []
for prediction in predictions:
ids.append(prediction["image_id"])
boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
proposal_data = {
"boxes": boxes,
"objectness_logits": objectness_logits,
"ids": ids,
"bbox_mode": bbox_mode,
}
with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
pickle.dump(proposal_data, f)
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info("Evaluating bbox proposals ...")
res = {}
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
key = "AR{}@{:d}".format(suffix, limit)
res[key] = float(stats["ar"].item() * 100)
self._logger.info("Proposal metrics: \n" + create_small_table(res))
self._results["box_proposals"] = res
def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}[iou_type]
if coco_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
for idx, metric in enumerate(metrics)
}
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
if not np.isfinite(sum(results.values())):
self._logger.info("Some metrics cannot be computed and is shown as NaN.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
# results.update({"AP-" + name: ap for name, ap in results_per_category})
results_per_category_AP50 = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
t = np.where(.5 == coco_eval.params.iouThrs)[0]
precisions_50 = precisions[t]
precisions_50 = precisions_50[:, :, idx, 0, -1]
precisions_50 = precisions_50[precisions_50 > -1]
ap = np.mean(precisions_50) if precisions_50.size else float("nan")
results_per_category_AP50.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category_AP50) * 2)
results_flatten = list(itertools.chain(*results_per_category_AP50))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP50"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP50: \n".format(iou_type) + table)
results.update({"AP50-" + name: ap for name, ap in results_per_category_AP50})
return results
def instances_to_coco_json(instances, img_id):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances):
img_id (int): the image id
Returns:
list[dict]: list of json annotations in COCO format.
"""
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes.tensor.numpy()
boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
boxes = boxes.tolist()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
has_mask = instances.has("pred_masks")
if has_mask:
# use RLE to encode the masks, because they are too large and takes memory
# since this evaluator stores outputs of the entire dataset
rles = [
mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
for mask in instances.pred_masks
]
for rle in rles:
# "counts" is an array encoded by mask_util as a byte-stream. Python3's
# json writer which always produces strings cannot serialize a bytestream
# unless you decode it. Thankfully, utf-8 works out (which is also what
# the pycocotools/_mask.pyx does).
rle["counts"] = rle["counts"].decode("utf-8")
has_keypoints = instances.has("pred_keypoints")
if has_keypoints:
keypoints = instances.pred_keypoints
results = []
for k in range(num_instance):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
}
if has_mask:
result["segmentation"] = rles[k]
if has_keypoints:
# In COCO annotations,
# keypoints coordinates are pixel indices.
# However our predictions are floating point coordinates.
# Therefore we subtract 0.5 to be consistent with the annotation format.
# This is the inverse of data loading logic in `datasets/coco.py`.
keypoints[k][:, :2] -= 0.5
result["keypoints"] = keypoints[k].flatten().tolist()
results.append(result)
return results
# inspired from Detectron:
# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
"""
Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for prediction_dict in dataset_predictions:
predictions = prediction_dict["proposals"]
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = predictions.objectness_logits.sort(descending=True)[1]
predictions = predictions[inds]
ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
anno = coco_api.loadAnns(ann_ids)
gt_boxes = [
BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
for obj in anno
if obj["iscrowd"] == 0
]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = Boxes(gt_boxes)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0 or len(predictions) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if limit is not None and len(predictions) > limit:
predictions = predictions[:limit]
overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(predictions), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = (
torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def _evaluate_predictions_on_coco(
coco_gt, coco_results, iou_type, kpt_oks_sigmas=None, use_fast_impl=True, img_ids=None
):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(coco_results) > 0
if iou_type == "segm":
coco_results = copy.deepcopy(coco_results)
# When evaluating mask AP, if the results contain bbox, cocoapi will
# use the box area as the area of the instance, instead of the mask area.
# This leads to a different definition of small/medium/large.
# We remove the bbox field to let mask AP use mask area.
for c in coco_results:
c.pop("bbox", None)
coco_dt = coco_gt.loadRes(coco_results)
coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type)
if img_ids is not None:
coco_eval.params.imgIds = img_ids
if iou_type == "keypoints":
# Use the COCO default keypoint OKS sigmas unless overrides are specified
if kpt_oks_sigmas:
assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
# COCOAPI requires every detection and every gt to have keypoints, so
# we just take the first entry from both
num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
f"Ground truth contains {num_keypoints_gt} keypoints. "
f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
"They have to agree with each other. For meaning of OKS, please refer to "
"http://cocodataset.org/#keypoints-eval."
)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import os
import tempfile
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from functools import lru_cache
import torch
from detectron2.data import MetadataCatalog
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from detectron2.evaluation import DatasetEvaluator
class PascalVOCDetectionEvaluator(DatasetEvaluator):
"""
Evaluate Pascal VOC style AP for Pascal VOC dataset.
It contains a synchronization, therefore has to be called from all ranks.
Note that the concept of AP can be implemented in different ways and may not
produce identical results. This class mimics the implementation of the official
Pascal VOC Matlab API, and should produce similar but not identical results to the
official API.
"""
def __init__(self, dataset_name, target_classnames=None):
"""
Args:
dataset_name (str): name of the dataset, e.g., "voc_2007_test"
"""
self._dataset_name = dataset_name
meta = MetadataCatalog.get(dataset_name)
# Too many tiny files, download all to local for speed.
annotation_dir_local = PathManager.get_local_path(
os.path.join(meta.dirname, "Annotations/")
)
self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml")
self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt")
self._class_names = meta.thing_classes
assert meta.year in [2007, 2012], meta.year
self._is_2007 = meta.year == 2007
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
if target_classnames == None:
self.target_classnames = self._class_names
else:
self.target_classnames = target_classnames
def reset(self):
self._predictions = defaultdict(list) # class name -> list of prediction strings
def process(self, inputs, outputs):
for input, output in zip(inputs, outputs):
image_id = input["image_id"]
instances = output["instances"].to(self._cpu_device)
boxes = instances.pred_boxes.tensor.numpy()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
for box, score, cls in zip(boxes, scores, classes):
xmin, ymin, xmax, ymax = box
# The inverse of data loading logic in `datasets/pascal_voc.py`
xmin += 1
ymin += 1
self._predictions[cls].append(
f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}"
)
def evaluate(self):
"""
Returns:
dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75".
"""
all_predictions = comm.gather(self._predictions, dst=0)
if not comm.is_main_process():
return
predictions = defaultdict(list)
for predictions_per_rank in all_predictions:
for clsid, lines in predictions_per_rank.items():
predictions[clsid].extend(lines)
del all_predictions
self._logger.info(
"Evaluating {} using {} metric. "
"Note that results do not use the official Matlab API.".format(
self._dataset_name, 2007 if self._is_2007 else 2012
)
)
with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname:
res_file_template = os.path.join(dirname, "{}.txt")
aps = defaultdict(list) # iou -> ap per class
for cls_id, cls_name in enumerate(self._class_names):
if cls_name not in self.target_classnames:
continue
lines = predictions.get(cls_id, [""])
with open(res_file_template.format(cls_name), "w") as f:
f.write("\n".join(lines))
for thresh in range(50, 100, 5):
rec, prec, ap = voc_eval(
res_file_template,
self._anno_file_template,
self._image_set_path,
cls_name,
ovthresh=thresh / 100.0,
use_07_metric=self._is_2007,
)
aps[thresh].append(ap * 100)
ret = OrderedDict()
mAP = {iou: np.mean(x) for iou, x in aps.items()}
ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]}
#Add the codes for AP50
for idx, name in enumerate(self.target_classnames):
ret["bbox"].update({"AP50-" + name: aps[50][idx]})
return ret
##############################################################################
#
# Below code is modified from
# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
"""Python implementation of the PASCAL VOC devkit's AP evaluation code."""
@lru_cache(maxsize=None)
def parse_rec(filename):
"""Parse a PASCAL VOC xml file."""
with PathManager.open(filename) as f:
tree = ET.parse(f)
objects = []
for obj in tree.findall("object"):
obj_struct = {}
obj_struct["name"] = obj.find("name").text
obj_struct["pose"] = obj.find("pose").text
obj_struct["truncated"] = int(obj.find("truncated").text)
obj_struct["difficult"] = int(obj.find("difficult").text)
bbox = obj.find("bndbox")
obj_struct["bbox"] = [
int(bbox.find("xmin").text),
int(bbox.find("ymin").text),
int(bbox.find("xmax").text),
int(bbox.find("ymax").text),
]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# first load gt
# read list of images
with PathManager.open(imagesetfile, "r") as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# load annots
recs = {}
for imagename in imagenames:
recs[imagename] = parse_rec(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj["name"] == classname]
bbox = np.array([x["bbox"] for x in R])
difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
# difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
# read dets
detfile = detpath.format(classname)
with open(detfile, "r") as f:
lines = f.readlines()
splitlines = [x.strip().split(" ") for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R["difficult"][jmax]:
if not R["det"][jmax]:
tp[d] = 1.0
R["det"][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import numpy as np
import operator
import json
import torch.utils.data
from detectron2.utils.comm import get_world_size
from detectron2.data.common import (
DatasetFromList,
MapDataset,
)
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import (
InferenceSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
from detectron2.data.build import (
trivial_batch_collator,
worker_init_reset_seed,
get_detection_dataset_dicts,
build_batch_data_loader,
)
from adapteacher.data.common import (
AspectRatioGroupedSemiSupDatasetTwoCrop,
)
"""
This file contains the default logic to build a dataloader for training or testing.
"""
def divide_label_unlabel(
dataset_dicts, SupPercent, random_data_seed, random_data_seed_path
):
num_all = len(dataset_dicts)
num_label = int(SupPercent / 100.0 * num_all)
# read from pre-generated data seed
with open(random_data_seed_path) as COCO_sup_file:
coco_random_idx = json.load(COCO_sup_file)
labeled_idx = np.array(coco_random_idx[str(SupPercent)][str(random_data_seed)])
assert labeled_idx.shape[0] == num_label, "Number of READ_DATA is mismatched."
label_dicts = []
unlabel_dicts = []
labeled_idx = set(labeled_idx)
for i in range(len(dataset_dicts)):
if i in labeled_idx:
label_dicts.append(dataset_dicts[i])
else:
unlabel_dicts.append(dataset_dicts[i])
return label_dicts, unlabel_dicts
# uesed by supervised-only baseline trainer
def build_detection_semisup_train_loader(cfg, mapper=None):
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
# Divide into labeled and unlabeled sets according to supervision percentage
label_dicts, unlabel_dicts = divide_label_unlabel(
dataset_dicts,
cfg.DATALOADER.SUP_PERCENT,
cfg.DATALOADER.RANDOM_DATA_SEED,
cfg.DATALOADER.RANDOM_DATA_SEED_PATH,
)
dataset = DatasetFromList(label_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
repeat_factors = (
RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
label_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
# list num of labeled and unlabeled
logger.info("Number of training samples " + str(len(dataset)))
logger.info("Supervision percentage " + str(cfg.DATALOADER.SUP_PERCENT))
return build_batch_data_loader(
dataset,
sampler,
cfg.SOLVER.IMS_PER_BATCH,
aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
num_workers=cfg.DATALOADER.NUM_WORKERS,
)
# uesed by evaluation
def build_detection_test_loader(cfg, dataset_name, mapper=None):
dataset_dicts = get_detection_dataset_dicts(
[dataset_name],
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[
list(cfg.DATASETS.TEST).index(dataset_name)
]
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
dataset = DatasetFromList(dataset_dicts)
if mapper is None:
mapper = DatasetMapper(cfg, False)
dataset = MapDataset(dataset, mapper)
sampler = InferenceSampler(len(dataset))
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
# uesed by unbiased teacher trainer
def build_detection_semisup_train_loader_two_crops(cfg, mapper=None):
if cfg.DATASETS.CROSS_DATASET: # cross-dataset (e.g., coco-additional)
label_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN_LABEL,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
unlabel_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN_UNLABEL,
filter_empty=False,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
else: # different degree of supervision (e.g., COCO-supervision)
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
# Divide into labeled and unlabeled sets according to supervision percentage
label_dicts, unlabel_dicts = divide_label_unlabel(
dataset_dicts,
cfg.DATALOADER.SUP_PERCENT,
cfg.DATALOADER.RANDOM_DATA_SEED,
cfg.DATALOADER.RANDOM_DATA_SEED_PATH,
)
label_dataset = DatasetFromList(label_dicts, copy=False)
# exclude the labeled set from unlabeled dataset
unlabel_dataset = DatasetFromList(unlabel_dicts, copy=False)
# include the labeled set in unlabel dataset
# unlabel_dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
label_dataset = MapDataset(label_dataset, mapper)
unlabel_dataset = MapDataset(unlabel_dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
label_sampler = TrainingSampler(len(label_dataset))
unlabel_sampler = TrainingSampler(len(unlabel_dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
raise NotImplementedError("{} not yet supported.".format(sampler_name))
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
return build_semisup_batch_data_loader_two_crop(
(label_dataset, unlabel_dataset),
(label_sampler, unlabel_sampler),
cfg.SOLVER.IMG_PER_BATCH_LABEL,
cfg.SOLVER.IMG_PER_BATCH_UNLABEL,
aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
num_workers=cfg.DATALOADER.NUM_WORKERS,
)
# batch data loader
def build_semisup_batch_data_loader_two_crop(
dataset,
sampler,
total_batch_size_label,
total_batch_size_unlabel,
*,
aspect_ratio_grouping=False,
num_workers=0
):
world_size = get_world_size()
assert (
total_batch_size_label > 0 and total_batch_size_label % world_size == 0
), "Total label batch size ({}) must be divisible by the number of gpus ({}).".format(
total_batch_size_label, world_size
)
assert (
total_batch_size_unlabel > 0 and total_batch_size_unlabel % world_size == 0
), "Total unlabel batch size ({}) must be divisible by the number of gpus ({}).".format(
total_batch_size_label, world_size
)
batch_size_label = total_batch_size_label // world_size
batch_size_unlabel = total_batch_size_unlabel // world_size
label_dataset, unlabel_dataset = dataset
label_sampler, unlabel_sampler = sampler
if aspect_ratio_grouping:
label_data_loader = torch.utils.data.DataLoader(
label_dataset,
sampler=label_sampler,
num_workers=num_workers,
batch_sampler=None,
collate_fn=operator.itemgetter(
0
), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
unlabel_data_loader = torch.utils.data.DataLoader(
unlabel_dataset,
sampler=unlabel_sampler,
num_workers=num_workers,
batch_sampler=None,
collate_fn=operator.itemgetter(
0
), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
return AspectRatioGroupedSemiSupDatasetTwoCrop(
(label_data_loader, unlabel_data_loader),
(batch_size_label, batch_size_unlabel),
)
else:
raise NotImplementedError("ASPECT_RATIO_GROUPING = False is not supported yet") |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .build import (
build_detection_test_loader,
build_detection_semisup_train_loader,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import torchvision.transforms as transforms
from adapteacher.data.transforms.augmentation_impl import (
GaussianBlur,
)
def build_strong_augmentation(cfg, is_train):
"""
Create a list of :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
logger = logging.getLogger(__name__)
augmentation = []
if is_train:
# This is simialr to SimCLR https://arxiv.org/abs/2002.05709
augmentation.append(
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8)
)
augmentation.append(transforms.RandomGrayscale(p=0.2))
augmentation.append(transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5))
randcrop_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.RandomErasing(
p=0.7, scale=(0.05, 0.2), ratio=(0.3, 3.3), value="random"
),
transforms.RandomErasing(
p=0.5, scale=(0.02, 0.2), ratio=(0.1, 6), value="random"
),
transforms.RandomErasing(
p=0.3, scale=(0.02, 0.2), ratio=(0.05, 8), value="random"
),
transforms.ToPILImage(),
]
)
augmentation.append(randcrop_transform)
logger.info("Augmentations used in training: " + str(augmentation))
return transforms.Compose(augmentation) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
from PIL import Image
import torch
import detectron2.data.detection_utils as utils
import detectron2.data.transforms as T
from detectron2.data.dataset_mapper import DatasetMapper
from adapteacher.data.detection_utils import build_strong_augmentation
class DatasetMapperTwoCropSeparate(DatasetMapper):
"""
This customized mapper produces two augmented images from a single image
instance. This mapper makes sure that the two augmented images have the same
cropping and thus the same size.
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic,
such as a different way to read or transform images.
See :doc:`/tutorials/data_loading` for details.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
def __init__(self, cfg, is_train=True):
self.augmentation = utils.build_augmentation(cfg, is_train)
# include crop into self.augmentation
if cfg.INPUT.CROP.ENABLED and is_train:
self.augmentation.insert(
0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
)
logging.getLogger(__name__).info(
"Cropping used in training: " + str(self.augmentation[0])
)
self.compute_tight_boxes = True
else:
self.compute_tight_boxes = False
self.strong_augmentation = build_strong_augmentation(cfg, is_train)
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.mask_on = cfg.MODEL.MASK_ON
self.mask_format = cfg.INPUT.MASK_FORMAT
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
# fmt: on
if self.keypoint_on and is_train:
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
cfg.DATASETS.TRAIN
)
else:
self.keypoint_hflip_indices = None
if self.load_proposals:
self.proposal_min_box_size = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
self.proposal_topk = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
# utils.check_image_size(dataset_dict, image)
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = utils.read_image(
dataset_dict.pop("sem_seg_file_name"), "L"
).squeeze(2)
else:
sem_seg_gt = None
aug_input = T.StandardAugInput(image, sem_seg=sem_seg_gt)
transforms = aug_input.apply_augmentations(self.augmentation)
image_weak_aug, sem_seg_gt = aug_input.image, aug_input.sem_seg
image_shape = image_weak_aug.shape[:2] # h, w
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
if self.load_proposals:
utils.transform_proposals(
dataset_dict,
image_shape,
transforms,
proposal_topk=self.proposal_topk,
min_box_size=self.proposal_min_box_size,
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
annos = [
utils.transform_instance_annotations(
obj,
transforms,
image_shape,
keypoint_hflip_indices=self.keypoint_hflip_indices,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(
annos, image_shape, mask_format=self.mask_format
)
if self.compute_tight_boxes and instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
bboxes_d2_format = utils.filter_empty_instances(instances)
dataset_dict["instances"] = bboxes_d2_format
# apply strong augmentation
# We use torchvision augmentation, which is not compatiable with
# detectron2, which use numpy format for images. Thus, we need to
# convert to PIL format first.
image_pil = Image.fromarray(image_weak_aug.astype("uint8"), "RGB")
image_strong_aug = np.array(self.strong_augmentation(image_pil))
dataset_dict["image"] = torch.as_tensor(
np.ascontiguousarray(image_strong_aug.transpose(2, 0, 1))
)
dataset_dict_key = copy.deepcopy(dataset_dict)
dataset_dict_key["image"] = torch.as_tensor(
np.ascontiguousarray(image_weak_aug.transpose(2, 0, 1))
)
assert dataset_dict["image"].size(1) == dataset_dict_key["image"].size(1)
assert dataset_dict["image"].size(2) == dataset_dict_key["image"].size(2)
return (dataset_dict, dataset_dict_key)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from detectron2.data.common import MapDataset, AspectRatioGroupedDataset
class MapDatasetTwoCrop(MapDataset):
"""
Map a function over the elements in a dataset.
This customized MapDataset transforms an image with two augmentations
as two inputs (queue and key).
Args:
dataset: a dataset where map function is applied.
map_func: a callable which maps the element in dataset. map_func is
responsible for error handling, when error happens, it needs to
return None so the MapDataset will randomly use other
elements from the dataset.
"""
def __getitem__(self, idx):
retry_count = 0
cur_idx = int(idx)
while True:
data = self._map_func(self._dataset[cur_idx])
if data is not None:
self._fallback_candidates.add(cur_idx)
return data
# _map_func fails for this idx, use a random new index from the pool
retry_count += 1
self._fallback_candidates.discard(cur_idx)
cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
if retry_count >= 3:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to apply `_map_func` for idx: {}, retry count: {}".format(
idx, retry_count
)
)
class AspectRatioGroupedDatasetTwoCrop(AspectRatioGroupedDataset):
"""
Batch data that have similar aspect ratio together.
In this implementation, images whose aspect ratio < (or >) 1 will
be batched together.
This improves training speed because the images then need less padding
to form a batch.
It assumes the underlying dataset produces dicts with "width" and "height" keys.
It will then produce a list of original dicts with length = batch_size,
all with similar aspect ratios.
"""
def __init__(self, dataset, batch_size):
"""
Args:
dataset: an iterable. Each element must be a dict with keys
"width" and "height", which will be used to batch data.
batch_size (int):
"""
self.dataset = dataset
self.batch_size = batch_size
self._buckets = [[] for _ in range(2)]
self._buckets_key = [[] for _ in range(2)]
# Hard-coded two aspect ratio groups: w > h and w < h.
# Can add support for more aspect ratio groups, but doesn't seem useful
def __iter__(self):
for d in self.dataset:
# d is a tuple with len = 2
# It's two images (same size) from the same image instance
w, h = d[0]["width"], d[0]["height"]
bucket_id = 0 if w > h else 1
# bucket = bucket for normal images
bucket = self._buckets[bucket_id]
bucket.append(d[0])
# buckets_key = bucket for augmented images
buckets_key = self._buckets_key[bucket_id]
buckets_key.append(d[1])
if len(bucket) == self.batch_size:
yield (bucket[:], buckets_key[:])
del bucket[:]
del buckets_key[:]
class AspectRatioGroupedSemiSupDatasetTwoCrop(AspectRatioGroupedDataset):
"""
Batch data that have similar aspect ratio together.
In this implementation, images whose aspect ratio < (or >) 1 will
be batched together.
This improves training speed because the images then need less padding
to form a batch.
It assumes the underlying dataset produces dicts with "width" and "height" keys.
It will then produce a list of original dicts with length = batch_size,
all with similar aspect ratios.
"""
def __init__(self, dataset, batch_size):
"""
Args:
dataset: a tuple containing two iterable generators. (labeled and unlabeled data)
Each element must be a dict with keys "width" and "height", which will be used
to batch data.
batch_size (int):
"""
self.label_dataset, self.unlabel_dataset = dataset
self.batch_size_label = batch_size[0]
self.batch_size_unlabel = batch_size[1]
self._label_buckets = [[] for _ in range(2)]
self._label_buckets_key = [[] for _ in range(2)]
self._unlabel_buckets = [[] for _ in range(2)]
self._unlabel_buckets_key = [[] for _ in range(2)]
# Hard-coded two aspect ratio groups: w > h and w < h.
# Can add support for more aspect ratio groups, but doesn't seem useful
def __iter__(self):
label_bucket, unlabel_bucket = [], []
for d_label, d_unlabel in zip(self.label_dataset, self.unlabel_dataset):
# d is a tuple with len = 2
# It's two images (same size) from the same image instance
# d[0] is with strong augmentation, d[1] is with weak augmentation
# because we are grouping images with their aspect ratio
# label and unlabel buckets might not have the same number of data
# i.e., one could reach batch_size, while the other is still not
if len(label_bucket) != self.batch_size_label:
w, h = d_label[0]["width"], d_label[0]["height"]
label_bucket_id = 0 if w > h else 1
label_bucket = self._label_buckets[label_bucket_id]
label_bucket.append(d_label[0])
label_buckets_key = self._label_buckets_key[label_bucket_id]
label_buckets_key.append(d_label[1])
if len(unlabel_bucket) != self.batch_size_unlabel:
w, h = d_unlabel[0]["width"], d_unlabel[0]["height"]
unlabel_bucket_id = 0 if w > h else 1
unlabel_bucket = self._unlabel_buckets[unlabel_bucket_id]
unlabel_bucket.append(d_unlabel[0])
unlabel_buckets_key = self._unlabel_buckets_key[unlabel_bucket_id]
unlabel_buckets_key.append(d_unlabel[1])
# yield the batch of data until all buckets are full
if (
len(label_bucket) == self.batch_size_label
and len(unlabel_bucket) == self.batch_size_unlabel
):
# label_strong, label_weak, unlabed_strong, unlabled_weak
yield (
label_bucket[:],
label_buckets_key[:],
unlabel_bucket[:],
unlabel_buckets_key[:],
)
del label_bucket[:]
del label_buckets_key[:]
del unlabel_bucket[:]
del unlabel_buckets_key[:]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import contextlib
from detectron2.data import DatasetCatalog, MetadataCatalog
from fvcore.common.timer import Timer
# from fvcore.common.file_io import PathManager
from iopath.common.file_io import PathManager
from detectron2.data.datasets.pascal_voc import register_pascal_voc
from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
from .cityscapes_foggy import load_cityscapes_instances
import io
import logging
logger = logging.getLogger(__name__)
JSON_ANNOTATIONS_DIR = ""
_SPLITS_COCO_FORMAT = {}
_SPLITS_COCO_FORMAT["coco"] = {
"coco_2017_unlabel": (
"coco/unlabeled2017",
"coco/annotations/image_info_unlabeled2017.json",
),
"coco_2017_for_voc20": (
"coco",
"coco/annotations/google/instances_unlabeledtrainval20class.json",
),
}
def register_coco_unlabel(root):
for _, splits_per_dataset in _SPLITS_COCO_FORMAT.items():
for key, (image_root, json_file) in splits_per_dataset.items():
meta = {}
register_coco_unlabel_instances(
key, meta, os.path.join(root, json_file), os.path.join(root, image_root)
)
def register_coco_unlabel_instances(name, metadata, json_file, image_root):
"""
Register a dataset in COCO's json annotation format for
instance detection, instance segmentation and keypoint detection.
(i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
`instances*.json` and `person_keypoints*.json` in the dataset).
This is an example of how to register a new dataset.
You can do something similar to this function, to register new datasets.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(
name, lambda: load_coco_unlabel_json(json_file, image_root, name)
)
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
)
def load_coco_unlabel_json(
json_file, image_root, dataset_name=None, extra_annotation_keys=None
):
from pycocotools.coco import COCO
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info(
"Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())
)
id_map = None
# sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
imgs = coco_api.loadImgs(img_ids)
logger.info("Loaded {} images in COCO format from {}".format(len(imgs), json_file))
dataset_dicts = []
for img_dict in imgs:
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
image_id = record["image_id"] = img_dict["id"]
dataset_dicts.append(record)
return dataset_dicts
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_coco_unlabel(_root)
# ==== Predefined splits for raw cityscapes foggy images ===========
_RAW_CITYSCAPES_SPLITS = {
# "cityscapes_foggy_{task}_train": ("cityscape_foggy/leftImg8bit/train/", "cityscape_foggy/gtFine/train/"),
# "cityscapes_foggy_{task}_val": ("cityscape_foggy/leftImg8bit/val/", "cityscape_foggy/gtFine/val/"),
# "cityscapes_foggy_{task}_test": ("cityscape_foggy/leftImg8bit/test/", "cityscape_foggy/gtFine/test/"),
"cityscapes_foggy_train": ("cityscapes_foggy/leftImg8bit/train/", "cityscapes_foggy/gtFine/train/"),
"cityscapes_foggy_val": ("cityscapes_foggy/leftImg8bit/val/", "cityscapes_foggy/gtFine/val/"),
"cityscapes_foggy_test": ("cityscapes_foggy/leftImg8bit/test/", "cityscapes_foggy/gtFine/test/"),
}
def register_all_cityscapes_foggy(root):
# root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
meta = _get_builtin_metadata("cityscapes")
image_dir = os.path.join(root, image_dir)
gt_dir = os.path.join(root, gt_dir)
# inst_key = key.format(task="instance_seg")
inst_key = key
# DatasetCatalog.register(
# inst_key,
# lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
# x, y, from_json=True, to_polygons=True
# ),
# )
DatasetCatalog.register(
inst_key,
lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
x, y, from_json=False, to_polygons=False
),
)
# MetadataCatalog.get(inst_key).set(
# image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
# )
# MetadataCatalog.get(inst_key).set(
# image_dir=image_dir, gt_dir=gt_dir, evaluator_type="pascal_voc", **meta
# )
MetadataCatalog.get(inst_key).set(
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="coco", **meta
)
# ==== Predefined splits for Clipart (PASCAL VOC format) ===========
def register_all_clipart(root):
# root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
SPLITS = [
("Clipart1k_train", "clipart", "train"),
("Clipart1k_test", "clipart", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# MetadataCatalog.get(name).evaluator_type = "coco"
# ==== Predefined splits for Watercolor (PASCAL VOC format) ===========
def register_all_water(root):
# root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
SPLITS = [
("Watercolor_train", "watercolor", "train"),
("Watercolor_test", "watercolor", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
# register_pascal_voc(name, os.path.join(root, dirname), split, year, class_names=["person", "dog","bicycle", "bird", "car", "cat"])
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc_water"
# MetadataCatalog.get(name).thing_classes = ["person", "dog","bike", "bird", "car", "cat"]
# MetadataCatalog.get(name).thing_classes = ["person", "dog","bicycle", "bird", "car", "cat"]
# MetadataCatalog.get(name).evaluator_type = "coco"
register_all_cityscapes_foggy(_root)
register_all_clipart(_root)
register_all_water(_root)
|
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import json
import logging
import multiprocessing as mp
import numpy as np
import os
from itertools import chain
import pycocotools.mask as mask_util
from PIL import Image
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
logger = logging.getLogger(__name__)
load_only_002 = False
def _get_cityscapes_files(image_dir, gt_dir):
files = []
# scan through the directory
cities = PathManager.ls(image_dir)
logger.info(f"{len(cities)} cities found in '{image_dir}'.")
for city in cities:
city_img_dir = os.path.join(image_dir, city)
city_gt_dir = os.path.join(gt_dir, city)
for basename in PathManager.ls(city_img_dir):
if load_only_002 and '0.02.png' not in basename:
continue
image_file = os.path.join(city_img_dir, basename)
# suffix = "leftImg8bit.png"
# assert basename.endswith(suffix), basename
# basename = basename[: -len(suffix)]
suffix = 'leftImg8bit_foggy'
basename = basename.split(suffix)[0]
instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png")
label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
files.append((image_file, instance_file, label_file, json_file))
assert len(files), "No images found in {}".format(image_dir)
for f in files[0]:
assert PathManager.isfile(f), f
return files
def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
if from_json:
assert to_polygons, (
"Cityscapes's json annotations are in polygon format. "
"Converting to mask format is not supported now."
)
files = _get_cityscapes_files(image_dir, gt_dir)
logger.info("Preprocessing cityscapes annotations ...")
# This is still not fast: all workers will execute duplicate works and will
# take up to 10m on a 8GPU server.
pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
ret = pool.map(
functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
files,
)
logger.info("Loaded {} images from {}".format(len(ret), image_dir))
pool.close()
# Map cityscape ids to contiguous ids
from cityscapesscripts.helpers.labels import labels
labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
for dict_per_image in ret:
for anno in dict_per_image["annotations"]:
anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
return ret
def load_cityscapes_semantic(image_dir, gt_dir):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
Returns:
list[dict]: a list of dict, each has "file_name" and
"sem_seg_file_name".
"""
ret = []
# gt_dir is small and contain many small files. make sense to fetch to local first
gt_dir = PathManager.get_local_path(gt_dir)
for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):
label_file = label_file.replace("labelIds", "labelTrainIds")
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret.append(
{
"file_name": image_file,
"sem_seg_file_name": label_file,
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(
ret[0]["sem_seg_file_name"]
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
return ret
def _cityscapes_files_to_dict(files, from_json, to_polygons):
"""
Parse cityscapes annotation files to a instance segmentation dataset dict.
Args:
files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
A dict in Detectron2 Dataset format.
"""
from cityscapesscripts.helpers.labels import id2label, name2label
image_file, instance_id_file, _, json_file = files
annos = []
if from_json:
from shapely.geometry import MultiPolygon, Polygon
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
# `polygons_union` contains the union of all valid polygons.
polygons_union = Polygon()
# CityscapesScripts draw the polygons in sequential order
# and each polygon *overwrites* existing ones. See
# (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
# We use reverse order, and each polygon *avoids* early ones.
# This will resolve the ploygon overlaps in the same way as CityscapesScripts.
for obj in jsonobj["objects"][::-1]:
if "deleted" in obj: # cityscapes data format specific
continue
label_name = obj["label"]
try:
label = name2label[label_name]
except KeyError:
if label_name.endswith("group"): # crowd area
label = name2label[label_name[: -len("group")]]
else:
raise
if label.id < 0: # cityscapes data format
continue
# Cityscapes's raw annotations uses integer coordinates
# Therefore +0.5 here
poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
# CityscapesScript uses PIL.ImageDraw.polygon to rasterize
# polygons for evaluation. This function operates in integer space
# and draws each pixel whose center falls into the polygon.
# Therefore it draws a polygon which is 0.5 "fatter" in expectation.
# We therefore dilate the input polygon by 0.5 as our input.
poly = Polygon(poly_coord).buffer(0.5, resolution=4)
if not label.hasInstances or label.ignoreInEval:
# even if we won't store the polygon it still contributes to overlaps resolution
polygons_union = polygons_union.union(poly)
continue
# Take non-overlapping part of the polygon
poly_wo_overlaps = poly.difference(polygons_union)
if poly_wo_overlaps.is_empty:
continue
polygons_union = polygons_union.union(poly)
anno = {}
anno["iscrowd"] = label_name.endswith("group")
anno["category_id"] = label.id
if isinstance(poly_wo_overlaps, Polygon):
poly_list = [poly_wo_overlaps]
elif isinstance(poly_wo_overlaps, MultiPolygon):
poly_list = poly_wo_overlaps.geoms
else:
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
poly_coord = []
for poly_el in poly_list:
# COCO API can work only with exterior boundaries now, hence we store only them.
# TODO: store both exterior and interior boundaries once other parts of the
# codebase support holes in polygons.
poly_coord.append(list(chain(*poly_el.exterior.coords)))
anno["segmentation"] = poly_coord
(xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
anno["bbox"] = (xmin, ymin, xmax, ymax)
anno["bbox_mode"] = BoxMode.XYXY_ABS
annos.append(anno)
else:
# See also the official annotation parsing scripts at
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
with PathManager.open(instance_id_file, "rb") as f:
inst_image = np.asarray(Image.open(f), order="F")
# ids < 24 are stuff labels (filtering them first is about 5% faster)
flattened_ids = np.unique(inst_image[inst_image >= 24])
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": inst_image.shape[0],
"width": inst_image.shape[1],
}
for instance_id in flattened_ids:
# For non-crowd annotations, instance_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
label = id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
anno = {}
anno["iscrowd"] = instance_id < 1000
anno["category_id"] = label.id
mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = (xmin, ymin, xmax, ymax)
if xmax <= xmin or ymax <= ymin:
continue
anno["bbox_mode"] = BoxMode.XYXY_ABS
if to_polygons:
# This conversion comes from D4809743 and D5171122,
# when Mask-RCNN was first developed.
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
-2
]
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
# opencv's can produce invalid polygons
if len(polygons) == 0:
continue
anno["segmentation"] = polygons
else:
anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
annos.append(anno)
ret["annotations"] = annos
return ret
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import random
from PIL import ImageFilter
class GaussianBlur:
"""
Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709
Adapted from MoCo:
https://github.com/facebookresearch/moco/blob/master/moco/loader.py
Note that this implementation does not seem to be exactly the same as
described in SimCLR.
"""
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.engine.hooks import HookBase
import detectron2.utils.comm as comm
import torch
import numpy as np
from contextlib import contextmanager
class LossEvalHook(HookBase):
def __init__(self, eval_period, model, data_loader, model_output, model_name=""):
self._model = model
self._period = eval_period
self._data_loader = data_loader
self._model_output = model_output
self._model_name = model_name
def _do_loss_eval(self):
record_acc_dict = {}
with inference_context(self._model), torch.no_grad():
for _, inputs in enumerate(self._data_loader):
record_dict = self._get_loss(inputs, self._model)
# accumulate the losses
for loss_type in record_dict.keys():
if loss_type not in record_acc_dict.keys():
record_acc_dict[loss_type] = record_dict[loss_type]
else:
record_acc_dict[loss_type] += record_dict[loss_type]
# average
for loss_type in record_acc_dict.keys():
record_acc_dict[loss_type] = record_acc_dict[loss_type] / len(
self._data_loader
)
# divide loss and other metrics
loss_acc_dict = {}
for key in record_acc_dict.keys():
if key[:4] == "loss":
loss_acc_dict[key] = record_acc_dict[key]
# only output the results of major node
if comm.is_main_process():
total_losses_reduced = sum(loss for loss in loss_acc_dict.values())
self.trainer.storage.put_scalar(
"val_total_loss_val" + self._model_name, total_losses_reduced
)
record_acc_dict = {
"val_" + k + self._model_name: record_acc_dict[k]
for k in record_acc_dict.keys()
}
if len(record_acc_dict) > 1:
self.trainer.storage.put_scalars(**record_acc_dict)
def _get_loss(self, data, model):
if self._model_output == "loss_only":
record_dict = model(data)
elif self._model_output == "loss_proposal":
record_dict, _, _, _ = model(data, branch="val_loss", val_mode=True)
elif self._model_output == "meanteacher":
record_dict, _, _, _, _ = model(data)
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in record_dict.items()
}
return metrics_dict
def _write_losses(self, metrics_dict):
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
comm.synchronize()
all_metrics_dict = comm.gather(metrics_dict, dst=0)
if comm.is_main_process():
# average the rest metrics
metrics_dict = {
"val_" + k: np.mean([x[k] for x in all_metrics_dict])
for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(loss for loss in metrics_dict.values())
self.trainer.storage.put_scalar("val_total_loss_val", total_losses_reduced)
if len(metrics_dict) > 1:
self.trainer.storage.put_scalars(**metrics_dict)
def _detect_anomaly(self, losses, loss_dict):
if not torch.isfinite(losses).all():
raise FloatingPointError(
"Loss became infinite or NaN at iteration={}!\nloss_dict = {}".format(
self.trainer.iter, loss_dict
)
)
def after_step(self):
next_iter = self.trainer.iter + 1
is_final = next_iter == self.trainer.max_iter
if is_final or (self._period > 0 and next_iter % self._period == 0):
self._do_loss_eval()
@contextmanager
def inference_context(model):
"""
A context where the model is temporarily changed to eval mode,
and restored to previous mode afterwards.
Args:
model: a torch Module
"""
training_mode = model.training
model.eval()
yield
model.train(training_mode)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.structures import pairwise_iou
class OpenMatchTrainerProbe:
def __init__(self, cfg):
self.BOX_AP = 0.5
self.NUM_CLASSES = cfg.MODEL.ROI_HEADS.NUM_CLASSES
# self.bbox_stat_list = ['compute_fp_gtoutlier', 'compute_num_box', 'compute_ood_acc']
def bbox_stat(self, unlabel_gt, unlabel_pseudo, name, bbox_stat_list):
stats = {}
sum_gpu_names = []
for metric in bbox_stat_list:
stats_per, sum_gpu_names_per = getattr(
self, metric)(unlabel_gt, unlabel_pseudo, name)
stats.update(stats_per)
sum_gpu_names.extend(sum_gpu_names_per)
return stats, sum_gpu_names
def compute_fp_gtoutlier(self, unlabel_gt, unlabel_pseudo, name):
num_gt_ood_object = 0
num_gt_fp_ood_object = 0
sum_iou = 0.0
sum_gpu_names = []
results = {}
if len(unlabel_gt) != 0:
for gt, pseudo in zip(unlabel_gt, unlabel_pseudo):
# import pdb; pdb. set_trace()
if name == "pred":
pp_boxes = pseudo.pred_boxes
elif name == "pseudo_conf" or name == "pseudo_ood":
# filter predicted ood box when evaluating this metric
pseudo = pseudo[pseudo.gt_classes != -1]
pp_boxes = pseudo.gt_boxes
else:
raise ValueError("Unknown name for probe roi bbox.")
if len(gt) != 0 and len(pseudo) != 0:
max_iou, max_idx = pairwise_iou(
gt.gt_boxes.to('cuda'), pp_boxes).max(1)
ood_idx = (gt.gt_classes == -1)
num_gt_ood_object += ood_idx.sum().item()
num_gt_fp_ood_object += (max_iou[ood_idx]
> self.BOX_AP).sum().item()
sum_iou += max_iou[ood_idx].sum().item()
elif len(gt) != 0 and len(pseudo) == 0:
ood_idx = (gt.gt_classes == -1)
num_gt_ood_object += ood_idx.shape[0]
results = {'Analysis_'+name+'/num_gt_ood_object': num_gt_ood_object,
'Analysis_'+name+'/num_gt_fp_ood_object': num_gt_fp_ood_object,
'Analysis_'+name+'/sum_iou': sum_iou}
sum_gpu_names.extend(list(results.keys()))
return results, sum_gpu_names
def compute_num_box(self, unlabel_gt, unlabel_pseudo, name, processed=False):
num_bbox = 0.0
size_bbox = 0.0
avg_conf = 0.0
# measure in and out box for openset SS-OD
num_bbox_in = 0.0
num_bbox_out = 0.0
num_bg = 0.0
# when ground-truth is missing in unlabeled data
if len(unlabel_gt) == 0:
for pp_roi in unlabel_pseudo:
if name == "pred":
pp_boxes = pp_roi.pred_boxes
pp_classes = pp_roi.pred_classes
pp_scores = pp_roi.scores
elif name == "pseudo_conf" or name == "pseudo_ood":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
pp_scores = pp_roi.scores
elif name == "gt":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
else:
raise ValueError("Unknown name for probe roi bbox.")
# all boxes (in + out boxes)
if len(pp_roi) != 0:
# bbox number and size
num_bbox += len(pp_roi)
size_bbox += pp_boxes.area().mean().item()
# average box confidence
if name != "gt":
avg_conf += pp_scores.mean()
else:
num_bbox += 0
size_bbox += torch.tensor(0).cuda()
num_valid_img = len(unlabel_pseudo)
else:
# with ground-truth
num_valid_img = 0
for gt, pp_roi in zip(unlabel_gt, unlabel_pseudo):
if name == "pred":
pp_boxes = pp_roi.pred_boxes
pp_classes = pp_roi.pred_classes
pp_scores = pp_roi.scores
elif name == "pseudo_conf" or name == "pseudo_ood":
# filter out ood pseudo-box when doing analysis
pp_roi = pp_roi[pp_roi.gt_classes != -1]
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
pp_scores = pp_roi.scores
elif name == "gt":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
else:
raise ValueError("Unknown name for probe roi bbox.")
# all boxes (in + out boxes)
if len(pp_roi) != 0:
# bbox number and size
num_bbox += len(pp_roi)
size_bbox += pp_boxes.area().mean().item()
# average box confidence
if name != "gt":
avg_conf += pp_scores.mean()
else:
num_bbox += 0
size_bbox += torch.tensor(0).cuda()
# in and out class
if name == "gt":
pp_roi_in = pp_roi[pp_classes != -1]
num_bbox_in += len(pp_roi_in)
pp_roi_out = pp_roi[pp_classes == -1]
num_bbox_out += len(pp_roi_out)
num_valid_img += 1
elif name == "pred" or name == "pseudo_conf" or name == "pseudo_ood":
if len(gt.gt_boxes.to('cuda'))>0 and len(pp_boxes) > 0:
max_iou, max_idx = pairwise_iou(gt.gt_boxes.to('cuda'), pp_boxes).max(0)
# for the ground-truth label for each pseudo-box
gtclass4pseudo = gt.gt_classes[max_idx]
matchgtbox = max_iou > 0.5
# compute the number of boxes (background, inlier, outlier)
num_bg += (~matchgtbox).sum().item()
num_bbox_in += (gtclass4pseudo[matchgtbox]
!= -1).sum().item()
num_bbox_out += (gtclass4pseudo[matchgtbox]
== -1).sum().item()
num_valid_img += 1
else:
raise ValueError("Unknown name for probe roi bbox.")
box_probe = {}
if processed == True:
name = name+"processed"
if num_bbox == 0:
return box_probe, []
if num_valid_img >0 :
box_probe["Analysis_" + name + "/Num_bbox"] = num_bbox / \
num_valid_img
box_probe["Analysis_" + name + "/Size_bbox"] = size_bbox / \
num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_inlier"] = num_bbox_in / num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_outlier"] = num_bbox_out / num_valid_img
if name != "gt": # prediciton, background number
box_probe["Analysis_" + name + "/Conf"] = avg_conf / \
num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_background"] = num_bg / num_valid_img
box_probe["Analysis_" + name +
"/background_fp_ratio"] = num_bg / num_bbox
box_probe["Analysis_" + name +
"/background_tp_ratio"] = num_bbox_in / num_bbox
else:
box_probe["Analysis_" + name + "/Num_bbox"] = 0.0
box_probe["Analysis_" + name + "/Size_bbox"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_inlier"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_outlier"] = 0.0
if name != "gt": # prediciton, background number
box_probe["Analysis_" + name + "/Conf"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_background"] = 0.0
box_probe["Analysis_" + name +
"/background_fp_ratio"] = num_bg / num_bbox
box_probe["Analysis_" + name +
"/background_tp_ratio"] = num_bbox_in / num_bbox
return box_probe, []
def compute_ood_acc(self, unlabel_gt, unlabel_pseudo, name, BOX_IOU=0.5):
results = {}
sum_gpu_names = []
if len(unlabel_gt) != 0:
for metric in ['acc_outlier', 'recall_outlier']:
for samples in ['_fg', '_all']:
for fraction_part in ['_nume', '_deno']:
results[metric+samples+fraction_part] = 0.0
for gt, pred in zip(unlabel_gt, unlabel_pseudo):
if name == "pred":
pp_boxes = pred.pred_boxes
pp_ood_scores = pred.ood_scores
elif name == "pseudo_conf" or name == "pseudo_ood":
# assume these outlier are suppressed
pred = pred[pred.gt_classes != -1]
pp_boxes = pred.gt_boxes
pp_ood_scores = pred.ood_scores
else:
raise ValueError("Unknown name for probe roi bbox.")
if len(gt) != 0 and len(pred) != 0:
# find the most overlapped ground-truth box for each pseudo-box
max_iou, max_idx = pairwise_iou(
gt.gt_boxes.to('cuda'), pp_boxes).max(0)
# ignore background instances
find_fg_mask = max_iou > BOX_IOU
if find_fg_mask.sum() > 0:
gt_corres = gt[max_idx].gt_classes.to("cuda")
gt_outlier = (gt_corres[find_fg_mask] == -1)
pred_outlier = pp_ood_scores[find_fg_mask][:, 0] > 0.5
# accurcay of ood detection (foreground)
# acc_outlier_fg = (pred_outlier == gt_outlier).sum() /find_fg_mask.sum()
results['acc_outlier_fg_nume'] += (
pred_outlier == gt_outlier).sum()
results['acc_outlier_fg_deno'] += find_fg_mask.sum()
# recall of ood detection (foreground)
# recall_outlier_fg = (pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum() /gt_outlier.sum()
results['recall_outlier_fg_nume'] += (
pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum()
results['recall_outlier_fg_deno'] += gt_outlier.sum()
# Regard backgound gt as outlier
gt_corres = gt[max_idx].gt_classes.to("cuda")
# convert all background gt as outlier
gt_corres[~find_fg_mask] = -1
gt_outlier = gt_corres == -1
pred_outlier = pp_ood_scores[:, 0] > 0.5
# accurcay of ood detection (all)
# acc_outlier_all = (pred_outlier == gt_outlier).sum() /len(pred)
results['acc_outlier_all_nume'] += (
pred_outlier == gt_outlier).sum()
results['acc_outlier_all_deno'] += len(pred)
# recall of ood detection (all)
# recall_outlier_all = (pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum() /gt_outlier.sum()
results['recall_outlier_all_nume'] += (
pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum()
results['recall_outlier_all_deno'] += gt_outlier.sum()
results = {'Analysis_'+name+'/'+k: v for k, v in results.items()}
sum_gpu_names.extend(list(results.keys()))
return results, sum_gpu_names
import torch
def probe(
cfg,
proposals_roih_unsup_k,
unlabel_data_k,
pesudo_proposals_roih_unsup_k,
record_dict,
):
"""
Probe for research development
"""
# [probe] roi result from weak branch (before pseudo-labeling)
record_roih = probe_roih_bbox(
proposals_roih_unsup_k, cfg.MODEL.ROI_HEADS.NUM_CLASSES, "roih"
)
record_dict.update(record_roih)
# [probe] roi result after pseudo-labeling from weak branch
record_roih_pseudo = probe_roih_bbox(
pesudo_proposals_roih_unsup_k, cfg.MODEL.ROI_HEADS.NUM_CLASSES, "roih_pseudo"
)
record_dict.update(record_roih_pseudo)
return record_dict
def probe_roih_bbox(proposals_roih, num_cls, name=""):
num_bbox = 0.0
size_bbox = 0.0
avg_conf = 0.0
pred_cls_list = []
for pp_roi in proposals_roih:
if name == "roih":
pp_boxes = pp_roi.pred_boxes
pp_classes = pp_roi.pred_classes
pp_scores = pp_roi.scores
elif name == "roih_pseudo":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
pp_scores = pp_roi.scores
elif name == "gt":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
else:
raise ValueError(f"Unknown name for probe roi bbox '{name}'")
device = pp_classes.device
if pp_roi:
# bbox number and size
num_bbox += len(pp_roi)
size_bbox += pp_boxes.area().mean()
if name != "gt":
avg_conf += pp_scores.mean()
# ratio of majority class
all_idx, cls_count = torch.unique(pp_classes, return_counts=True)
major_cls_idx = all_idx[torch.argmax(cls_count)]
major_cls_ratio = torch.max(cls_count).float() / pp_classes.numel()
# cls_sum
pred_cls_list.append(pp_classes)
else:
num_bbox += 0
size_bbox += torch.tensor(0).to(device)
major_cls_idx = torch.tensor(0).to(device)
major_cls_ratio = torch.tensor(0).to(device)
# boxes monitor
box_probe = {}
box_probe["bbox_probe_" + name + "/Num_bbox"] = num_bbox / len(proposals_roih)
box_probe["bbox_probe_" + name + "/Size_bbox"] = size_bbox.item() / len(
proposals_roih
)
if name != "gt":
box_probe["bbox_probe_" + name + "/Conf"] = avg_conf / len(proposals_roih)
box_probe["bbox_probe_" + name + "/Ratio_major_cls_idx"] = major_cls_idx.item()
box_probe["bbox_probe_" + name + "/Ratio_major_cls"] = major_cls_ratio.item()
return box_probe |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import time
import logging
import torch
from torch.nn.parallel import DistributedDataParallel
from fvcore.nn.precise_bn import get_bn_modules
import numpy as np
from collections import OrderedDict
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.engine import DefaultTrainer, SimpleTrainer, TrainerBase
from detectron2.engine.train_loop import AMPTrainer
from detectron2.utils.events import EventStorage
from detectron2.evaluation import verify_results, DatasetEvaluators
# from detectron2.evaluation import COCOEvaluator, verify_results, DatasetEvaluators
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.engine import hooks
from detectron2.structures.boxes import Boxes
from detectron2.structures.instances import Instances
from detectron2.utils.env import TORCH_VERSION
from detectron2.data import MetadataCatalog
from adapteacher.data.build import (
build_detection_semisup_train_loader,
build_detection_test_loader,
build_detection_semisup_train_loader_two_crops,
)
from adapteacher.data.dataset_mapper import DatasetMapperTwoCropSeparate
from adapteacher.engine.hooks import LossEvalHook
from adapteacher.modeling.meta_arch.ts_ensemble import EnsembleTSModel
from adapteacher.checkpoint.detection_checkpoint import DetectionTSCheckpointer
from adapteacher.solver.build import build_lr_scheduler
from adapteacher.evaluation import PascalVOCDetectionEvaluator, COCOEvaluator
from .probe import OpenMatchTrainerProbe
import copy
# Supervised-only Trainer
class BaselineTrainer(DefaultTrainer):
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
Use the custom checkpointer, which loads other backbone models
with matching heuristics.
"""
cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
data_loader = self.build_train_loader(cfg)
if comm.get_world_size() > 1:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
TrainerBase.__init__(self)
self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
model, data_loader, optimizer
)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
self.checkpointer = DetectionCheckpointer(
model,
cfg.OUTPUT_DIR,
optimizer=optimizer,
scheduler=self.scheduler,
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
checkpoint = self.checkpointer.resume_or_load(
self.cfg.MODEL.WEIGHTS, resume=resume
)
if resume and self.checkpointer.has_checkpoint():
self.start_iter = checkpoint.get("iteration", -1) + 1
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
if isinstance(self.model, DistributedDataParallel):
# broadcast loaded data/model from the first rank, because other
# machines may not have access to the checkpoint file
if TORCH_VERSION >= (1, 7):
self.model._sync_params_and_buffers()
self.start_iter = comm.all_gather(self.start_iter)[0]
def train_loop(self, start_iter: int, max_iter: int):
"""
Args:
start_iter, max_iter (int): See docs above
"""
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
except Exception:
logger.exception("Exception during training:")
raise
finally:
self.after_train()
def run_step(self):
self._trainer.iter = self.iter
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
start = time.perf_counter()
data = next(self._trainer._data_loader_iter)
data_time = time.perf_counter() - start
record_dict, _, _, _ = self.model(data, branch="supervised")
num_gt_bbox = 0.0
for element in data:
num_gt_bbox += len(element["instances"])
num_gt_bbox = num_gt_bbox / len(data)
record_dict["bbox_num/gt_bboxes"] = num_gt_bbox
loss_dict = {}
for key in record_dict.keys():
if key[:4] == "loss" and key[-3:] != "val":
loss_dict[key] = record_dict[key]
losses = sum(loss_dict.values())
metrics_dict = record_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(
dataset_name, output_dir=output_folder))
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "pascal_voc_water":
return PascalVOCDetectionEvaluator(dataset_name, target_classnames=["bicycle", "bird", "car", "cat", "dog", "person"])
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
return build_detection_semisup_train_loader(cfg, mapper=None)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
"""
return build_detection_test_loader(cfg, dataset_name)
def build_hooks(self):
"""
Build a list of default hooks, including timing, evaluation,
checkpointing, lr scheduling, precise BN, writing events.
Returns:
list[HookBase]:
"""
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(self.optimizer, self.scheduler),
hooks.PreciseBN(
cfg.TEST.EVAL_PERIOD,
self.model,
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
if comm.is_main_process():
ret.append(
hooks.PeriodicCheckpointer(
self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD
)
)
def test_and_save_results():
self._last_eval_results = self.test(self.cfg, self.model)
return self._last_eval_results
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
if comm.is_main_process():
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
def _write_metrics(self, metrics_dict: dict):
"""
Args:
metrics_dict (dict): dict of scalar metrics
"""
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in metrics_dict.items()
}
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
if comm.is_main_process():
if "data_time" in all_metrics_dict[0]:
data_time = np.max([x.pop("data_time")
for x in all_metrics_dict])
self.storage.put_scalar("data_time", data_time)
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict])
for k in all_metrics_dict[0].keys()
}
loss_dict = {}
for key in metrics_dict.keys():
if key[:4] == "loss":
loss_dict[key] = metrics_dict[key]
total_losses_reduced = sum(loss for loss in loss_dict.values())
self.storage.put_scalar("total_loss", total_losses_reduced)
if len(metrics_dict) > 1:
self.storage.put_scalars(**metrics_dict)
# Adaptive Teacher Trainer
class ATeacherTrainer(DefaultTrainer):
def __init__(self, cfg):
"""
Args:
cfg (CfgNode):
Use the custom checkpointer, which loads other backbone models
with matching heuristics.
"""
cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
data_loader = self.build_train_loader(cfg)
# create an student model
model = self.build_model(cfg)
optimizer = self.build_optimizer(cfg, model)
# create an teacher model
model_teacher = self.build_model(cfg)
self.model_teacher = model_teacher
# For training, wrap with DDP. But don't need this for inference.
if comm.get_world_size() > 1:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
TrainerBase.__init__(self)
self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
model, data_loader, optimizer
)
self.scheduler = self.build_lr_scheduler(cfg, optimizer)
# Ensemble teacher and student model is for model saving and loading
ensem_ts_model = EnsembleTSModel(model_teacher, model)
self.checkpointer = DetectionTSCheckpointer(
ensem_ts_model,
cfg.OUTPUT_DIR,
optimizer=optimizer,
scheduler=self.scheduler,
)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
self.cfg = cfg
self.probe = OpenMatchTrainerProbe(cfg)
self.register_hooks(self.build_hooks())
def resume_or_load(self, resume=True):
"""
If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
a `last_checkpoint` file), resume from the file. Resuming means loading all
available states (eg. optimizer and scheduler) and update iteration counter
from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
Otherwise, this is considered as an independent training. The method will load model
weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
from iteration 0.
Args:
resume (bool): whether to do resume or not
"""
checkpoint = self.checkpointer.resume_or_load(
self.cfg.MODEL.WEIGHTS, resume=resume
)
if resume and self.checkpointer.has_checkpoint():
self.start_iter = checkpoint.get("iteration", -1) + 1
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
if isinstance(self.model, DistributedDataParallel):
# broadcast loaded data/model from the first rank, because other
# machines may not have access to the checkpoint file
if TORCH_VERSION >= (1, 7):
self.model._sync_params_and_buffers()
self.start_iter = comm.all_gather(self.start_iter)[0]
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "coco":
evaluator_list.append(COCOEvaluator(
dataset_name, output_dir=output_folder))
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "pascal_voc_water":
return PascalVOCDetectionEvaluator(dataset_name, target_classnames=["bicycle", "bird", "car", "cat", "dog", "person"])
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def build_train_loader(cls, cfg):
mapper = DatasetMapperTwoCropSeparate(cfg, True)
return build_detection_semisup_train_loader_two_crops(cfg, mapper)
@classmethod
def build_lr_scheduler(cls, cfg, optimizer):
return build_lr_scheduler(cfg, optimizer)
def train(self):
self.train_loop(self.start_iter, self.max_iter)
if hasattr(self, "_last_eval_results") and comm.is_main_process():
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def train_loop(self, start_iter: int, max_iter: int):
logger = logging.getLogger(__name__)
logger.info("Starting training from iteration {}".format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
try:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step_full_semisup()
self.after_step()
except Exception:
logger.exception("Exception during training:")
raise
finally:
self.after_train()
# =====================================================
# ================== Pseduo-labeling ==================
# =====================================================
def threshold_bbox(self, proposal_bbox_inst, thres=0.7, proposal_type="roih"):
if proposal_type == "rpn":
valid_map = proposal_bbox_inst.objectness_logits > thres
# create instances containing boxes and gt_classes
image_shape = proposal_bbox_inst.image_size
new_proposal_inst = Instances(image_shape)
# create box
new_bbox_loc = proposal_bbox_inst.proposal_boxes.tensor[valid_map, :]
new_boxes = Boxes(new_bbox_loc)
# add boxes to instances
new_proposal_inst.gt_boxes = new_boxes
new_proposal_inst.objectness_logits = proposal_bbox_inst.objectness_logits[
valid_map
]
elif proposal_type == "roih":
valid_map = proposal_bbox_inst.scores > thres
# create instances containing boxes and gt_classes
image_shape = proposal_bbox_inst.image_size
new_proposal_inst = Instances(image_shape)
# create box
new_bbox_loc = proposal_bbox_inst.pred_boxes.tensor[valid_map, :]
new_boxes = Boxes(new_bbox_loc)
# add boxes to instances
new_proposal_inst.gt_boxes = new_boxes
new_proposal_inst.gt_classes = proposal_bbox_inst.pred_classes[valid_map]
new_proposal_inst.scores = proposal_bbox_inst.scores[valid_map]
return new_proposal_inst
def process_pseudo_label(
self, proposals_rpn_unsup_k, cur_threshold, proposal_type, psedo_label_method=""
):
list_instances = []
num_proposal_output = 0.0
for proposal_bbox_inst in proposals_rpn_unsup_k:
# thresholding
if psedo_label_method == "thresholding":
proposal_bbox_inst = self.threshold_bbox(
proposal_bbox_inst, thres=cur_threshold, proposal_type=proposal_type
)
else:
raise ValueError("Unkown pseudo label boxes methods")
num_proposal_output += len(proposal_bbox_inst)
list_instances.append(proposal_bbox_inst)
num_proposal_output = num_proposal_output / len(proposals_rpn_unsup_k)
return list_instances, num_proposal_output
def remove_label(self, label_data):
for label_datum in label_data:
if "instances" in label_datum.keys():
del label_datum["instances"]
return label_data
def add_label(self, unlabled_data, label):
for unlabel_datum, lab_inst in zip(unlabled_data, label):
unlabel_datum["instances"] = lab_inst
return unlabled_data
def get_label(self, label_data):
label_list = []
for label_datum in label_data:
if "instances" in label_datum.keys():
label_list.append(copy.deepcopy(label_datum["instances"]))
return label_list
# def get_label_test(self, label_data):
# label_list = []
# for label_datum in label_data:
# if "instances" in label_datum.keys():
# label_list.append(label_datum["instances"])
# =====================================================
# =================== Training Flow ===================
# =====================================================
def run_step_full_semisup(self):
self._trainer.iter = self.iter
assert self.model.training, "[UBTeacherTrainer] model was changed to eval mode!"
start = time.perf_counter()
data = next(self._trainer._data_loader_iter)
# data_q and data_k from different augmentations (q:strong, k:weak)
# label_strong, label_weak, unlabed_strong, unlabled_weak
label_data_q, label_data_k, unlabel_data_q, unlabel_data_k = data
data_time = time.perf_counter() - start
# burn-in stage (supervised training with labeled data)
if self.iter < self.cfg.SEMISUPNET.BURN_UP_STEP:
# input both strong and weak supervised data into model
label_data_q.extend(label_data_k)
record_dict, _, _, _ = self.model(
label_data_q, branch="supervised")
# weight losses
loss_dict = {}
for key in record_dict.keys():
if key[:4] == "loss":
loss_dict[key] = record_dict[key] * 1
losses = sum(loss_dict.values())
else:
if self.iter == self.cfg.SEMISUPNET.BURN_UP_STEP:
# update copy the the whole model
self._update_teacher_model(keep_rate=0.00)
# self.model.build_discriminator()
elif (
self.iter - self.cfg.SEMISUPNET.BURN_UP_STEP
) % self.cfg.SEMISUPNET.TEACHER_UPDATE_ITER == 0:
self._update_teacher_model(
keep_rate=self.cfg.SEMISUPNET.EMA_KEEP_RATE)
record_dict = {}
######################## For probe #################################
# import pdb; pdb. set_trace()
gt_unlabel_k = self.get_label(unlabel_data_k)
# gt_unlabel_q = self.get_label_test(unlabel_data_q)
# 0. remove unlabeled data labels
unlabel_data_q = self.remove_label(unlabel_data_q)
unlabel_data_k = self.remove_label(unlabel_data_k)
# 1. generate the pseudo-label using teacher model
with torch.no_grad():
(
_,
proposals_rpn_unsup_k,
proposals_roih_unsup_k,
_,
) = self.model_teacher(unlabel_data_k, branch="unsup_data_weak")
######################## For probe #################################
# import pdb; pdb. set_trace()
# probe_metrics = ['compute_fp_gtoutlier', 'compute_num_box']
# probe_metrics = ['compute_num_box']
# analysis_pred, _ = self.probe.compute_num_box(gt_unlabel_k,proposals_roih_unsup_k,'pred')
# record_dict.update(analysis_pred)
######################## For probe END #################################
# 2. Pseudo-labeling
cur_threshold = self.cfg.SEMISUPNET.BBOX_THRESHOLD
joint_proposal_dict = {}
joint_proposal_dict["proposals_rpn"] = proposals_rpn_unsup_k
#Process pseudo labels and thresholding
(
pesudo_proposals_rpn_unsup_k,
nun_pseudo_bbox_rpn,
) = self.process_pseudo_label(
proposals_rpn_unsup_k, cur_threshold, "rpn", "thresholding"
)
# analysis_pred, _ = self.probe.compute_num_box(gt_unlabel_k,pesudo_proposals_rpn_unsup_k,'pred',True)
# record_dict.update(analysis_pred)
joint_proposal_dict["proposals_pseudo_rpn"] = pesudo_proposals_rpn_unsup_k
# Pseudo_labeling for ROI head (bbox location/objectness)
pesudo_proposals_roih_unsup_k, _ = self.process_pseudo_label(
proposals_roih_unsup_k, cur_threshold, "roih", "thresholding"
)
joint_proposal_dict["proposals_pseudo_roih"] = pesudo_proposals_roih_unsup_k
# 3. add pseudo-label to unlabeled data
unlabel_data_q = self.add_label(
unlabel_data_q, joint_proposal_dict["proposals_pseudo_roih"]
)
unlabel_data_k = self.add_label(
unlabel_data_k, joint_proposal_dict["proposals_pseudo_roih"]
)
all_label_data = label_data_q + label_data_k
all_unlabel_data = unlabel_data_q
# 4. input both strongly and weakly augmented labeled data into student model
record_all_label_data, _, _, _ = self.model(
all_label_data, branch="supervised"
)
record_dict.update(record_all_label_data)
# 5. input strongly augmented unlabeled data into model
record_all_unlabel_data, _, _, _ = self.model(
all_unlabel_data, branch="supervised_target"
)
new_record_all_unlabel_data = {}
for key in record_all_unlabel_data.keys():
new_record_all_unlabel_data[key + "_pseudo"] = record_all_unlabel_data[
key
]
record_dict.update(new_record_all_unlabel_data)
# 6. input weakly labeled data (source) and weakly unlabeled data (target) to student model
# give sign to the target data
for i_index in range(len(unlabel_data_k)):
# unlabel_data_item = {}
for k, v in unlabel_data_k[i_index].items():
# label_data_k[i_index][k + "_unlabeled"] = v
label_data_k[i_index][k + "_unlabeled"] = v
# unlabel_data_k[i_index] = unlabel_data_item
all_domain_data = label_data_k
# all_domain_data = label_data_k + unlabel_data_k
record_all_domain_data, _, _, _ = self.model(all_domain_data, branch="domain")
record_dict.update(record_all_domain_data)
# weight losses
loss_dict = {}
for key in record_dict.keys():
if key.startswith("loss"):
if key == "loss_rpn_loc_pseudo" or key == "loss_box_reg_pseudo":
# pseudo bbox regression <- 0
loss_dict[key] = record_dict[key] * 0
elif key[-6:] == "pseudo": # unsupervised loss
loss_dict[key] = (
record_dict[key] *
self.cfg.SEMISUPNET.UNSUP_LOSS_WEIGHT
)
elif (
key == "loss_D_img_s" or key == "loss_D_img_t"
): # set weight for discriminator
# import pdb
# pdb.set_trace()
loss_dict[key] = record_dict[key] * self.cfg.SEMISUPNET.DIS_LOSS_WEIGHT #Need to modify defaults and yaml
else: # supervised loss
loss_dict[key] = record_dict[key] * 1
losses = sum(loss_dict.values())
metrics_dict = record_dict
metrics_dict["data_time"] = data_time
self._write_metrics(metrics_dict)
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
def _write_metrics(self, metrics_dict: dict):
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in metrics_dict.items()
}
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
all_metrics_dict = comm.gather(metrics_dict)
# all_hg_dict = comm.gather(hg_dict)
if comm.is_main_process():
if "data_time" in all_metrics_dict[0]:
# data_time among workers can have high variance. The actual latency
# caused by data_time is the maximum among workers.
data_time = np.max([x.pop("data_time")
for x in all_metrics_dict])
self.storage.put_scalar("data_time", data_time)
# average the rest metrics
metrics_dict = {
k: np.mean([x[k] for x in all_metrics_dict])
for k in all_metrics_dict[0].keys()
}
# append the list
loss_dict = {}
for key in metrics_dict.keys():
if key[:4] == "loss":
loss_dict[key] = metrics_dict[key]
total_losses_reduced = sum(loss for loss in loss_dict.values())
self.storage.put_scalar("total_loss", total_losses_reduced)
if len(metrics_dict) > 1:
self.storage.put_scalars(**metrics_dict)
@torch.no_grad()
def _update_teacher_model(self, keep_rate=0.9996):
if comm.get_world_size() > 1:
student_model_dict = {
key[7:]: value for key, value in self.model.state_dict().items()
}
else:
student_model_dict = self.model.state_dict()
new_teacher_dict = OrderedDict()
for key, value in self.model_teacher.state_dict().items():
if key in student_model_dict.keys():
new_teacher_dict[key] = (
student_model_dict[key] *
(1 - keep_rate) + value * keep_rate
)
else:
raise Exception("{} is not found in student model".format(key))
self.model_teacher.load_state_dict(new_teacher_dict)
@torch.no_grad()
def _copy_main_model(self):
# initialize all parameters
if comm.get_world_size() > 1:
rename_model_dict = {
key[7:]: value for key, value in self.model.state_dict().items()
}
self.model_teacher.load_state_dict(rename_model_dict)
else:
self.model_teacher.load_state_dict(self.model.state_dict())
@classmethod
def build_test_loader(cls, cfg, dataset_name):
return build_detection_test_loader(cfg, dataset_name)
def build_hooks(self):
cfg = self.cfg.clone()
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(self.optimizer, self.scheduler),
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
cfg.TEST.EVAL_PERIOD,
self.model,
# Build a new data loader to not affect training
self.build_train_loader(cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
)
if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
else None,
]
# Do PreciseBN before checkpointer, because it updates the model and need to
# be saved by checkpointer.
# This is not always the best: if checkpointing has a different frequency,
# some checkpoints may have more precise statistics than others.
if comm.is_main_process():
ret.append(
hooks.PeriodicCheckpointer(
self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD
)
)
def test_and_save_results_student():
self._last_eval_results_student = self.test(self.cfg, self.model)
_last_eval_results_student = {
k + "_student": self._last_eval_results_student[k]
for k in self._last_eval_results_student.keys()
}
return _last_eval_results_student
def test_and_save_results_teacher():
self._last_eval_results_teacher = self.test(
self.cfg, self.model_teacher)
return self._last_eval_results_teacher
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD,
test_and_save_results_student))
ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD,
test_and_save_results_teacher))
if comm.is_main_process():
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
return ret
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# from d2go.config import CfgNode as CN
def add_aut_config(cfg):
"""
Add config for SemiSupSegRunner.
"""
_C = cfg
#New added for discriminator
_C.UNBIASEDTEACHER.DIS_LOSS_WEIGHT = 0.1
_C.UNBIASEDTEACHER.DIS_TYPE = "concate" #["concate","p2","multi"]
_C.UNBIASEDTEACHER.ISAUG = "Yes"
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from collections import OrderedDict
from functools import lru_cache
import d2go.utils.abnormal_checker as abnormal_checker
import detectron2.utils.comm as comm
from d2go.config import CONFIG_SCALING_METHOD_REGISTRY, temp_defrost
from d2go.data.dataset_mappers import D2GoDatasetMapper, build_dataset_mapper
from d2go.data.transforms.build import build_transform_gen
from d2go.data.utils import maybe_subsample_n_images
from d2go.modeling import build_model, kmeans_anchors, model_ema
from d2go.runner import GeneralizedRCNNRunner
from d2go.utils.flop_calculator import add_print_flops_callback
from d2go.utils.misc import get_tensorboard_log_dir
from d2go.utils.helper import TensorboardXWriter, D2Trainer
from detectron2.checkpoint import PeriodicCheckpointer
from detectron2.engine import hooks
from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
from torch.nn.parallel import DataParallel, DistributedDataParallel
from detectron2.evaluation import (
DatasetEvaluators,
)
from detectron2.data import (
MetadataCatalog,
)
from ..evaluation import (
COCOEvaluator,
PascalVOCDetectionEvaluator,
)
from d2go.projects.unbiased_teacher.checkpoint import EnsembleTSModel
from ..config.defaults import add_aut_config
# from ..config.defaults import add_ut_config
# from ..data.build import (
# build_detection_semisup_train_loader_two_crops,
# build_uru_detection_semisup_train_loader,
# inject_uru_dataset,
# )
from d2go.projects.unbiased_teacher.data.build import (
build_detection_semisup_train_loader_two_crops,
build_uru_detection_semisup_train_loader,
)
from d2go.projects.unbiased_teacher.runner.runner import UnbiasedTeacherRunner
from d2go.projects.unbiased_teacher.data.dataset_mapper import DatasetMapperTwoCropSeparate # noqa
from ..data import builtin # noqa; for registering COCO unlabel dataset
from d2go.projects.unbiased_teacher.engine.trainer import UnbiasedTeacherTrainer
from d2go.projects.unbiased_teacher.modeling.meta_arch.rcnn import TwoStagePseudoLabGeneralizedRCNN # noqa
from d2go.projects.unbiased_teacher.modeling.proposal_generator.rpn import PseudoLabRPN # noqa
from d2go.projects.unbiased_teacher.modeling.roi_heads.roi_heads import StandardROIHeadsPseudoLab # noqa
from d2go.projects.unbiased_teacher.solver.build import ut_build_lr_scheduler
#For DA object detection
from ..engine.trainer import DAobjTrainer
from ..modeling.meta_arch.daobj_rcnn import DAobjTwoStagePseudoLabGeneralizedRCNN # noqa
#For VGG model architecture
from ..modeling.meta_arch.vgg import build_vgg_backbone,build_vgg_fpn_backbone # noqa
ALL_TB_WRITERS = []
@lru_cache()
def _get_tbx_writer(log_dir):
ret = TensorboardXWriter(log_dir)
ALL_TB_WRITERS.append(ret)
return ret
class BaseUnbiasedTeacherRunner(UnbiasedTeacherRunner):
def get_default_cfg(self):
cfg = super().get_default_cfg()
add_aut_config(cfg)
# add_pointrend_config(cfg)
# cfg = CN(cfg) # upgrade from D2's CfgNode to D2Go's CfgNode
return cfg
@staticmethod
def get_evaluator(cfg, dataset_name, output_folder):
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["coco"]:
# D2 is in the process of reducing the use of cfg.
dataset_evaluators = COCOEvaluator(
dataset_name,
output_dir=output_folder,
kpt_oks_sigmas=cfg.TEST.KEYPOINT_OKS_SIGMAS,
)
elif evaluator_type in ["pascal_voc"]:
dataset_evaluators = PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type in ["pascal_voc_water"]:
dataset_evaluators = PascalVOCDetectionEvaluator(dataset_name, target_classnames=["bicycle", "bird", "car", "cat", "dog", "person"])
else:
dataset_evaluators = D2Trainer.build_evaluator(
cfg, dataset_name, output_folder
)
if not isinstance(dataset_evaluators, DatasetEvaluators):
dataset_evaluators = DatasetEvaluators([dataset_evaluators])
return dataset_evaluators
# class DAobjUnbiasedTeacherRunner(UnbiasedTeacherRunner):
class DAobjUnbiasedTeacherRunner(BaseUnbiasedTeacherRunner):
def get_default_cfg(self):
cfg = super().get_default_cfg()
# add_aut_config(cfg)
# add_pointrend_config(cfg)
# cfg = CN(cfg) # upgrade from D2's CfgNode to D2Go's CfgNode
return cfg
def build_model(self, cfg, eval_only=False):
"""
Build both Student and Teacher models
Student: regular model
Teacher: model that is updated by EMA
"""
# build_model might modify the cfg, thus clone
cfg = cfg.clone()
model = build_model(cfg)
model_teacher = build_model(cfg)
if cfg.MODEL.FROZEN_LAYER_REG_EXP:
raise NotImplementedError()
if cfg.QUANTIZATION.QAT.ENABLED:
raise NotImplementedError()
if eval_only:
raise NotImplementedError()
return EnsembleTSModel(model_teacher, model)
def do_train(self, cfg, model, resume):
# NOTE: d2go's train_net applies DDP layer by default
# we need to strip it away and only put DDP on model_student
if isinstance(model, (DistributedDataParallel, DataParallel)):
model = model.module
model_teacher, model_student = model.model_teacher, model.model_student
if comm.get_world_size() > 1:
model_student = DistributedDataParallel(
model_student,
device_ids=None
if cfg.MODEL.DEVICE == "cpu"
else [comm.get_local_rank()],
broadcast_buffers=False,
find_unused_parameters=cfg.MODEL.DDP_FIND_UNUSED_PARAMETERS,
)
add_print_flops_callback(cfg, model_student, disable_after_callback=True)
optimizer = self.build_optimizer(cfg, model_student)
scheduler = self.build_lr_scheduler(cfg, optimizer)
checkpointer = self.build_checkpointer(
cfg,
model,
save_dir=cfg.OUTPUT_DIR,
optimizer=optimizer,
scheduler=scheduler,
)
checkpoint = checkpointer.resume_or_load(
cfg.MODEL.WEIGHTS, resume=resume or cfg.UNBIASEDTEACHER.RESUME_FROM_ANOTHER
)
start_iter = (
checkpoint.get("iteration", -1)
if resume
and checkpointer.has_checkpoint()
or cfg.UNBIASEDTEACHER.RESUME_FROM_ANOTHER
else -1
)
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
start_iter += 1
max_iter = cfg.SOLVER.MAX_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
# if resume from a pre-trained checkpoint, we modify the BURN_IN_STEP
# so that the weights of the Student will be copied to the Teacher
# at the 1st iteration when the training started
if cfg.UNBIASEDTEACHER.RESUME_FROM_ANOTHER:
cfg.defrost()
cfg.UNBIASEDTEACHER.BURN_IN_STEP = start_iter
cfg.freeze()
data_loader = self.build_detection_train_loader(cfg)
def _get_model_with_abnormal_checker(model):
if not cfg.ABNORMAL_CHECKER.ENABLED:
return model
tbx_writer = _get_tbx_writer(get_tensorboard_log_dir(cfg.OUTPUT_DIR))
writers = abnormal_checker.get_writers(cfg, tbx_writer)
checker = abnormal_checker.AbnormalLossChecker(start_iter, writers)
ret = abnormal_checker.AbnormalLossCheckerWrapper(model, checker)
return ret
trainer = DAobjTrainer(
cfg,
_get_model_with_abnormal_checker(model_student),
_get_model_with_abnormal_checker(model_teacher),
data_loader,
optimizer,
)
trainer_hooks = [
hooks.IterationTimer(),
self._create_after_step_hook(
cfg, model_student, optimizer, scheduler, periodic_checkpointer
),
hooks.EvalHook(
cfg.TEST.EVAL_PERIOD,
lambda: self.do_test(cfg, model, train_iter=trainer.iter),
),
kmeans_anchors.compute_kmeans_anchors_hook(self, cfg),
self._create_qat_hook(cfg) if cfg.QUANTIZATION.QAT.ENABLED else None,
]
if comm.is_main_process():
tbx_writer = _get_tbx_writer(get_tensorboard_log_dir(cfg.OUTPUT_DIR))
writers = [
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
tbx_writer,
]
trainer_hooks.append(
hooks.PeriodicWriter(writers, period=cfg.WRITER_PERIOD)
)
trainer.register_hooks(trainer_hooks)
trainer.train(start_iter, max_iter)
trained_cfg = cfg.clone()
with temp_defrost(trained_cfg):
trained_cfg.MODEL.WEIGHTS = checkpointer.get_checkpoint_file()
return {"model_final": trained_cfg}
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# from .runner import SemiSupSegRunner, SemiSupHandTrackingRunner # noqa
from .runner import BaseUnbiasedTeacherRunner # noqa
from .runner import DAobjUnbiasedTeacherRunner # noqa
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.nn as nn
import copy
import torch
from typing import Union, List, Dict, Any, cast
from detectron2.modeling.backbone import (
ResNet,
Backbone,
build_resnet_backbone,
BACKBONE_REGISTRY
)
from detectron2.modeling.backbone.fpn import FPN, LastLevelMaxPool, LastLevelP6P7
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class vgg_backbone(Backbone):
"""
Backbone (bottom-up) for FBNet.
Hierarchy:
trunk0:
xif0_0
xif0_1
...
trunk1:
xif1_0
xif1_1
...
...
Output features:
The outputs from each "stage", i.e. trunkX.
"""
def __init__(self, cfg):
super().__init__()
self.vgg = make_layers(cfgs['vgg16'],batch_norm=True)
self._initialize_weights()
# self.stage_names_index = {'vgg1':3, 'vgg2':8 , 'vgg3':15, 'vgg4':22, 'vgg5':29}
_out_feature_channels = [64, 128, 256, 512, 512]
_out_feature_strides = [2, 4, 8, 16, 32]
# stages, shape_specs = build_fbnet(
# cfg,
# name="trunk",
# in_channels=cfg.MODEL.FBNET_V2.STEM_IN_CHANNELS
# )
# nn.Sequential(*list(self.vgg.features._modules.values())[:14])
self.stages = [nn.Sequential(*list(self.vgg._modules.values())[0:7]),\
nn.Sequential(*list(self.vgg._modules.values())[7:14]),\
nn.Sequential(*list(self.vgg._modules.values())[14:24]),\
nn.Sequential(*list(self.vgg._modules.values())[24:34]),\
nn.Sequential(*list(self.vgg._modules.values())[34:]),]
self._out_feature_channels = {}
self._out_feature_strides = {}
self._stage_names = []
for i, stage in enumerate(self.stages):
name = "vgg{}".format(i)
self.add_module(name, stage)
self._stage_names.append(name)
self._out_feature_channels[name] = _out_feature_channels[i]
self._out_feature_strides[name] = _out_feature_strides[i]
self._out_features = self._stage_names
del self.vgg
def forward(self, x):
features = {}
for name, stage in zip(self._stage_names, self.stages):
x = stage(x)
# if name in self._out_features:
# outputs[name] = x
features[name] = x
# import pdb
# pdb.set_trace()
return features
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
@BACKBONE_REGISTRY.register() #already register in baseline model
def build_vgg_backbone(cfg, _):
return vgg_backbone(cfg)
@BACKBONE_REGISTRY.register() #already register in baseline model
def build_vgg_fpn_backbone(cfg, _):
# backbone = FPN(
# bottom_up=build_vgg_backbone(cfg),
# in_features=cfg.MODEL.FPN.IN_FEATURES,
# out_channels=cfg.MODEL.FPN.OUT_CHANNELS,
# norm=cfg.MODEL.FPN.NORM,
# top_block=LastLevelMaxPool(),
# )
bottom_up = vgg_backbone(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
# fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
# return backbone
return backbone
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.modeling import META_ARCH_REGISTRY, GeneralizedRCNN
from detectron2.utils.events import get_event_storage
import logging
from typing import Dict, Tuple, List, Optional
from collections import OrderedDict
from detectron2.config import configurable
# from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
# from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.modeling.backbone import build_backbone, Backbone
from detectron2.modeling.roi_heads import build_roi_heads
from detectron2.utils.events import get_event_storage
from detectron2.structures import ImageList
############### Image discriminator ##############
class FCDiscriminator_img(nn.Module):
def __init__(self, num_classes, ndf1=256, ndf2=128):
super(FCDiscriminator_img, self).__init__()
self.conv1 = nn.Conv2d(num_classes, ndf1, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(ndf1, ndf2, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(ndf2, ndf2, kernel_size=3, padding=1)
self.classifier = nn.Conv2d(ndf2, 1, kernel_size=3, padding=1)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.classifier(x)
return x
#################################
################ Gradient reverse function
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
return GradReverse.apply(x)
#######################
@META_ARCH_REGISTRY.register()
class DAobjTwoStagePseudoLabGeneralizedRCNN(GeneralizedRCNN):
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
input_format: Optional[str] = None,
vis_period: int = 0,
dis_type: str,
# dis_loss_weight: float = 0,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super(GeneralizedRCNN, self).__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.input_format = input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
# @yujheli: you may need to build your discriminator here
self.dis_type = dis_type
# self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels['res4']) # Need to know the channel
if self.dis_type == "multi":
self.D_img_dict = {}
for k,v in self.backbone._out_feature_channels.items():
self.D_img_dict[k] = FCDiscriminator_img(v)
self.add_module("D_"+k, self.D_img_dict[k])
else:
self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels[self.dis_type]) # Need to know the channel
# self.bceLoss_func = nn.BCEWithLogitsLoss()
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
"dis_type": cfg.UNBIASEDTEACHER.DIS_TYPE,
# "dis_loss_ratio": cfg.xxx,
}
def preprocess_image_train(self, batched_inputs: List[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
images_t = [x["image_unlabeled"].to(self.device) for x in batched_inputs]
images_t = [(x - self.pixel_mean) / self.pixel_std for x in images_t]
images_t = ImageList.from_tensors(images_t, self.backbone.size_divisibility)
return images, images_t
def forward(
self, batched_inputs, branch="supervised", given_proposals=None, val_mode=False
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if (not self.training) and (not val_mode): # only conduct when testing mode
return self.inference(batched_inputs)
if branch == "domain":
source_label = 0
target_label = 1
# images = self.preprocess_image(batched_inputs)
images_s, images_t = self.preprocess_image_train(batched_inputs)
features = self.backbone(images_s.tensor)
# import pdb
# pdb.set_trace()
if self.dis_type == "multi":
loss_D_img_s = 0
for k, v in features.items():
features_s = grad_reverse(v)
D_img_out_s = self.D_img_dict[k](features_s)
loss_D_img_s += F.binary_cross_entropy_with_logits(D_img_out_s, torch.FloatTensor(D_img_out_s.data.size()).fill_(source_label).to(self.device))
loss_D_img_s /= len(features)
# features_s = grad_reverse(torch.cat((features['p2'],features['p3'],features['p4'],features['p5']),dim=1))
else:
features_s = grad_reverse(features[self.dis_type])
D_img_out_s = self.D_img(features_s)
loss_D_img_s = F.binary_cross_entropy_with_logits(D_img_out_s, torch.FloatTensor(D_img_out_s.data.size()).fill_(source_label).to(self.device))
features_t = self.backbone(images_t.tensor)
if self.dis_type == "multi":
loss_D_img_t = 0
for k, v in features_t.items():
features_tt = grad_reverse(v)
D_img_out_t = self.D_img_dict[k](features_tt)
loss_D_img_t += F.binary_cross_entropy_with_logits(D_img_out_t, torch.FloatTensor(D_img_out_t.data.size()).fill_(target_label).to(self.device))
loss_D_img_t /= len(features_t)
else:
features_t = grad_reverse(features_t[self.dis_type])
# features_t = grad_reverse(features_t['p2'])
D_img_out_t = self.D_img(features_t)
loss_D_img_t = F.binary_cross_entropy_with_logits(D_img_out_t, torch.FloatTensor(D_img_out_t.data.size()).fill_(target_label).to(self.device))
# import pdb
# pdb.set_trace()
losses = {}
losses["loss_D_img_s"] = loss_D_img_s
losses["loss_D_img_t"] = loss_D_img_t
return losses, [], [], None
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
# TODO: remove the usage of if else here. This needs to be re-organized
if branch.startswith("supervised"):
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
compute_loss=True,
targets=gt_instances,
branch=branch,
)
# visualization
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals_rpn, branch)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, [], [], None
elif branch == "unsup_data_weak":
"""
unsupervised weak branch: input image without any ground-truth label; output proposals of rpn and roi-head
"""
# Region proposal network
proposals_rpn, _ = self.proposal_generator(
images, features, None, compute_loss=False
)
# roi_head lower branch (keep this for further production)
# notice that we do not use any target in ROI head to do inference!
proposals_roih, ROI_predictions = self.roi_heads(
images,
features,
proposals_rpn,
targets=None,
compute_loss=False,
branch=branch,
)
# if self.vis_period > 0:
# storage = get_event_storage()
# if storage.iter % self.vis_period == 0:
# self.visualize_training(batched_inputs, proposals_rpn, branch)
return {}, proposals_rpn, proposals_roih, ROI_predictions
elif branch == "unsup_data_strong":
raise NotImplementedError()
elif branch == "val_loss":
raise NotImplementedError()
def visualize_training(self, batched_inputs, proposals, branch=""):
"""
This function different from the original one:
- it adds "branch" to the `vis_name`.
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 predicted object
proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = (
"Left: GT bounding boxes "
+ branch
+ "; Right: Predicted proposals "
+ branch
)
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .coco_evaluation import COCOEvaluator
from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
# __all__ = [k for k in globals().keys() if not k.startswith("_")]
__all__ = [
"COCOEvaluator",
"PascalVOCDetectionEvaluator"
]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pickle
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_dict
from detectron2.evaluation.fast_eval_api import COCOeval_opt
from detectron2.structures import Boxes, BoxMode, pairwise_iou
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
from detectron2.evaluation import DatasetEvaluator
from iopath.common.file_io import file_lock
logger = logging.getLogger(__name__)
def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
"""
Converts dataset into COCO format and saves it to a json file.
dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
Args:
dataset_name:
reference from the config file to the catalogs
must be registered in DatasetCatalog and in detectron2's standard format
output_file: path of json file that will be saved to
allow_cached: if json file is already present then skip conversion
"""
# TODO: The dataset or the conversion script *may* change,
# a checksum would be useful for validating the cached data
PathManager.mkdirs(os.path.dirname(output_file))
with file_lock(output_file):
if PathManager.exists(output_file) and allow_cached:
logger.warning(
f"Using previously cached COCO format annotations at '{output_file}'. "
"You need to clear the cache file if your dataset has been modified."
)
else:
logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)")
coco_dict = convert_to_coco_dict(dataset_name)
logger.info(f"Caching COCO format annotations at '{output_file}' ...")
tmp_file = output_file #+ ".tmp"
# with PathManager.open(tmp_file, "w") as f:
# json.dump(coco_dict, f)
# shutil.move(tmp_file, output_file)
with PathManager.open(tmp_file, "w") as f:
json.dump(coco_dict, f)
class COCOEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
the metric cannot be computed (e.g. due to no predictions made).
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
kpt_oks_sigmas=(),
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
contains all the results in the format they are produced by the model.
2. "coco_instances_results.json" a json file in COCO's result format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
See http://cocodataset.org/#keypoints-eval
When empty, it will use the defaults in COCO.
Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
kpt_oks_sigmas = (
tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
)
self._logger.warn(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
if not hasattr(self._metadata, "json_file"):
self._logger.info(
f"'{dataset_name}' is not registered by `register_coco_instances`."
" Therefore trying to convert it to COCO format ..."
)
cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
self._metadata.json_file = cache_path
convert_to_coco_json(dataset_name, cache_path)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._coco_api = COCO(json_file)
# Test set json files do not contain annotations (evaluation must be
# performed using the COCO evaluation server).
self._do_evaluation = "annotations" in self._coco_api.dataset
if self._do_evaluation:
self._kpt_oks_sigmas = kpt_oks_sigmas
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
if "proposals" in output:
prediction["proposals"] = output["proposals"].to(self._cpu_device)
if len(prediction) > 1:
self._predictions.append(prediction)
def evaluate(self, img_ids=None):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(predictions, f)
self._results = OrderedDict()
if "proposals" in predictions[0]:
self._eval_box_proposals(predictions)
if "instances" in predictions[0]:
self._eval_predictions(predictions, img_ids=img_ids)
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _tasks_from_predictions(self, predictions):
"""
Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
"""
tasks = {"bbox"}
for pred in predictions:
if "segmentation" in pred:
tasks.add("segm")
if "keypoints" in pred:
tasks.add("keypoints")
return sorted(tasks)
def _eval_predictions(self, predictions, img_ids=None):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
tasks = self._tasks or self._tasks_from_predictions(coco_results)
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
num_classes = len(all_contiguous_ids)
assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in coco_results:
category_id = result["category_id"]
assert category_id < num_classes, (
f"A prediction has class={category_id}, "
f"but the dataset only has {num_classes} classes and "
f"predicted class id should be in [0, {num_classes - 1}]."
)
result["category_id"] = reverse_id_mapping[category_id]
if self._output_dir:
file_path = os.path.join(self._output_dir, "coco_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(coco_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info(
"Evaluating predictions with {} COCO API...".format(
"unofficial" if self._use_fast_impl else "official"
)
)
for task in sorted(tasks):
assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
coco_eval = (
_evaluate_predictions_on_coco(
self._coco_api,
coco_results,
task,
kpt_oks_sigmas=self._kpt_oks_sigmas,
use_fast_impl=self._use_fast_impl,
img_ids=img_ids,
)
if len(coco_results) > 0
else None # cocoapi does not handle empty results very well
)
res = self._derive_coco_results(
coco_eval, task, class_names=self._metadata.get("thing_classes")
)
self._results[task] = res
def _eval_box_proposals(self, predictions):
"""
Evaluate the box proposals in predictions.
Fill self._results with the metrics for "box_proposals" task.
"""
if self._output_dir:
# Saving generated box proposals to file.
# Predicted box_proposals are in XYXY_ABS mode.
bbox_mode = BoxMode.XYXY_ABS.value
ids, boxes, objectness_logits = [], [], []
for prediction in predictions:
ids.append(prediction["image_id"])
boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
proposal_data = {
"boxes": boxes,
"objectness_logits": objectness_logits,
"ids": ids,
"bbox_mode": bbox_mode,
}
with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
pickle.dump(proposal_data, f)
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info("Evaluating bbox proposals ...")
res = {}
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
key = "AR{}@{:d}".format(suffix, limit)
res[key] = float(stats["ar"].item() * 100)
self._logger.info("Proposal metrics: \n" + create_small_table(res))
self._results["box_proposals"] = res
def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}[iou_type]
if coco_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
for idx, metric in enumerate(metrics)
}
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
if not np.isfinite(sum(results.values())):
self._logger.info("Some metrics cannot be computed and is shown as NaN.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
# results.update({"AP-" + name: ap for name, ap in results_per_category})
results_per_category_AP50 = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
t = np.where(.5 == coco_eval.params.iouThrs)[0]
precisions_50 = precisions[t]
precisions_50 = precisions_50[:, :, idx, 0, -1]
precisions_50 = precisions_50[precisions_50 > -1]
ap = np.mean(precisions_50) if precisions_50.size else float("nan")
results_per_category_AP50.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category_AP50) * 2)
results_flatten = list(itertools.chain(*results_per_category_AP50))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP50"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP50: \n".format(iou_type) + table)
results.update({"AP50-" + name: ap for name, ap in results_per_category_AP50})
return results
def instances_to_coco_json(instances, img_id):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances):
img_id (int): the image id
Returns:
list[dict]: list of json annotations in COCO format.
"""
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes.tensor.numpy()
boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
boxes = boxes.tolist()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
has_mask = instances.has("pred_masks")
if has_mask:
# use RLE to encode the masks, because they are too large and takes memory
# since this evaluator stores outputs of the entire dataset
rles = [
mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
for mask in instances.pred_masks
]
for rle in rles:
# "counts" is an array encoded by mask_util as a byte-stream. Python3's
# json writer which always produces strings cannot serialize a bytestream
# unless you decode it. Thankfully, utf-8 works out (which is also what
# the pycocotools/_mask.pyx does).
rle["counts"] = rle["counts"].decode("utf-8")
has_keypoints = instances.has("pred_keypoints")
if has_keypoints:
keypoints = instances.pred_keypoints
results = []
for k in range(num_instance):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
}
if has_mask:
result["segmentation"] = rles[k]
if has_keypoints:
# In COCO annotations,
# keypoints coordinates are pixel indices.
# However our predictions are floating point coordinates.
# Therefore we subtract 0.5 to be consistent with the annotation format.
# This is the inverse of data loading logic in `datasets/coco.py`.
keypoints[k][:, :2] -= 0.5
result["keypoints"] = keypoints[k].flatten().tolist()
results.append(result)
return results
# inspired from Detectron:
# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
"""
Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for prediction_dict in dataset_predictions:
predictions = prediction_dict["proposals"]
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = predictions.objectness_logits.sort(descending=True)[1]
predictions = predictions[inds]
ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
anno = coco_api.loadAnns(ann_ids)
gt_boxes = [
BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
for obj in anno
if obj["iscrowd"] == 0
]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = Boxes(gt_boxes)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0 or len(predictions) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if limit is not None and len(predictions) > limit:
predictions = predictions[:limit]
overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(predictions), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = (
torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def _evaluate_predictions_on_coco(
coco_gt, coco_results, iou_type, kpt_oks_sigmas=None, use_fast_impl=True, img_ids=None
):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(coco_results) > 0
if iou_type == "segm":
coco_results = copy.deepcopy(coco_results)
# When evaluating mask AP, if the results contain bbox, cocoapi will
# use the box area as the area of the instance, instead of the mask area.
# This leads to a different definition of small/medium/large.
# We remove the bbox field to let mask AP use mask area.
for c in coco_results:
c.pop("bbox", None)
coco_dt = coco_gt.loadRes(coco_results)
coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type)
if img_ids is not None:
coco_eval.params.imgIds = img_ids
if iou_type == "keypoints":
# Use the COCO default keypoint OKS sigmas unless overrides are specified
if kpt_oks_sigmas:
assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
# COCOAPI requires every detection and every gt to have keypoints, so
# we just take the first entry from both
num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
f"Ground truth contains {num_keypoints_gt} keypoints. "
f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
"They have to agree with each other. For meaning of OKS, please refer to "
"http://cocodataset.org/#keypoints-eval."
)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import os
import tempfile
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from functools import lru_cache
import torch
from detectron2.data import MetadataCatalog
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from detectron2.evaluation import DatasetEvaluator
class PascalVOCDetectionEvaluator(DatasetEvaluator):
"""
Evaluate Pascal VOC style AP for Pascal VOC dataset.
It contains a synchronization, therefore has to be called from all ranks.
Note that the concept of AP can be implemented in different ways and may not
produce identical results. This class mimics the implementation of the official
Pascal VOC Matlab API, and should produce similar but not identical results to the
official API.
"""
def __init__(self, dataset_name, target_classnames=None):
"""
Args:
dataset_name (str): name of the dataset, e.g., "voc_2007_test"
"""
self._dataset_name = dataset_name
meta = MetadataCatalog.get(dataset_name)
# Too many tiny files, download all to local for speed.
annotation_dir_local = PathManager.get_local_path(
os.path.join(meta.dirname, "Annotations/")
)
self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml")
self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt")
self._class_names = meta.thing_classes
assert meta.year in [2007, 2012], meta.year
self._is_2007 = meta.year == 2007
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
if target_classnames == None:
self.target_classnames = self._class_names
else:
self.target_classnames = target_classnames
def reset(self):
self._predictions = defaultdict(list) # class name -> list of prediction strings
def process(self, inputs, outputs):
for input, output in zip(inputs, outputs):
image_id = input["image_id"]
instances = output["instances"].to(self._cpu_device)
boxes = instances.pred_boxes.tensor.numpy()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
for box, score, cls in zip(boxes, scores, classes):
xmin, ymin, xmax, ymax = box
# The inverse of data loading logic in `datasets/pascal_voc.py`
xmin += 1
ymin += 1
self._predictions[cls].append(
f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}"
)
def evaluate(self):
"""
Returns:
dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75".
"""
all_predictions = comm.gather(self._predictions, dst=0)
if not comm.is_main_process():
return
predictions = defaultdict(list)
for predictions_per_rank in all_predictions:
for clsid, lines in predictions_per_rank.items():
predictions[clsid].extend(lines)
del all_predictions
self._logger.info(
"Evaluating {} using {} metric. "
"Note that results do not use the official Matlab API.".format(
self._dataset_name, 2007 if self._is_2007 else 2012
)
)
with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname:
res_file_template = os.path.join(dirname, "{}.txt")
aps = defaultdict(list) # iou -> ap per class
for cls_id, cls_name in enumerate(self._class_names):
if cls_name not in self.target_classnames:
continue
lines = predictions.get(cls_id, [""])
with open(res_file_template.format(cls_name), "w") as f:
f.write("\n".join(lines))
for thresh in range(50, 100, 5):
rec, prec, ap = voc_eval(
res_file_template,
self._anno_file_template,
self._image_set_path,
cls_name,
ovthresh=thresh / 100.0,
use_07_metric=self._is_2007,
)
aps[thresh].append(ap * 100)
ret = OrderedDict()
mAP = {iou: np.mean(x) for iou, x in aps.items()}
ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]}
#Add the codes for AP50
for idx, name in enumerate(self.target_classnames):
ret["bbox"].update({"AP50-" + name: aps[50][idx]})
return ret
##############################################################################
#
# Below code is modified from
# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
"""Python implementation of the PASCAL VOC devkit's AP evaluation code."""
@lru_cache(maxsize=None)
def parse_rec(filename):
"""Parse a PASCAL VOC xml file."""
with PathManager.open(filename) as f:
tree = ET.parse(f)
objects = []
for obj in tree.findall("object"):
obj_struct = {}
obj_struct["name"] = obj.find("name").text
obj_struct["pose"] = obj.find("pose").text
obj_struct["truncated"] = int(obj.find("truncated").text)
obj_struct["difficult"] = int(obj.find("difficult").text)
bbox = obj.find("bndbox")
obj_struct["bbox"] = [
int(bbox.find("xmin").text),
int(bbox.find("ymin").text),
int(bbox.find("xmax").text),
int(bbox.find("ymax").text),
]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# first load gt
# read list of images
with PathManager.open(imagesetfile, "r") as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# load annots
recs = {}
for imagename in imagenames:
recs[imagename] = parse_rec(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj["name"] == classname]
bbox = np.array([x["bbox"] for x in R])
difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
# difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
# read dets
detfile = detpath.format(classname)
with open(detfile, "r") as f:
lines = f.readlines()
splitlines = [x.strip().split(" ") for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R["difficult"][jmax]:
if not R["det"][jmax]:
tp[d] = 1.0
R["det"][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import io
import logging
import os
import json
from detectron2.data import DatasetCatalog, MetadataCatalog
from d2go.data.utils import CallFuncWithJsonFile
from detectron2.utils.file_io import PathManager
from fvcore.common.timer import Timer
from detectron2.data.datasets.pascal_voc import register_pascal_voc
from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
from .cityscapes_foggy import load_cityscapes_instances
logger = logging.getLogger(__name__)
_SPLITS_COCO_FORMAT = {}
_SPLITS_COCO_FORMAT["coco"] = {
"coco_2017_unlabel": (
"memcache_manifold://mobile_vision_dataset/tree/coco_unlabel2017",
"memcache_manifold://mobile_vision_dataset/tree/coco_unlabel2017/coco_jsons/image_info_unlabeled2017.json",
),
"goi_v5_unlabel": (
"memcache_manifold://portal_ai_data/tree/goi_v5/train",
"memcache_manifold://mobile_vision_dataset/tree/goi/v5/coco_jsons/openimages_v5_train_unlabel.json",
),
}
def register_coco_unlabel():
for _, splits_per_dataset in _SPLITS_COCO_FORMAT.items():
for key, (image_root, json_file) in splits_per_dataset.items():
meta = {}
register_coco_unlabel_instances(key, meta, json_file, image_root)
def register_coco_unlabel_instances(name, metadata, json_file, image_root):
"""
Register a dataset in COCO's json annotation format for
instance detection, instance segmentation and keypoint detection.
(i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
`instances*.json` and `person_keypoints*.json` in the dataset).
This is an example of how to register a new dataset.
You can do something similar to this function, to register new datasets.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(
name, lambda: load_coco_unlabel_json(json_file, image_root, name)
)
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
)
def load_coco_unlabel_json(
json_file, image_root, dataset_name=None, extra_annotation_keys=None
):
"""
Load a json file with COCO's instances annotation format.
Currently supports instance detection, instance segmentation,
and person keypoints annotations.
Args:
json_file (str): full path to the json file in COCO instances annotation format.
image_root (str or path-like): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., coco_2017_train).
If provided, this function will also put "thing_classes" into
the metadata associated with this dataset.
extra_annotation_keys (list[str]): list of per-annotation keys that should also be
loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
"category_id", "segmentation"). The values for these keys will be returned as-is.
For example, the densepose annotations are loaded in this way.
Returns:
list[dict]: a list of dicts in Detectron2 standard dataset dicts format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
from pycocotools.coco import COCO
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info(
"Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())
)
# sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
imgs = coco_api.loadImgs(img_ids)
logger.info(
"Loaded {} unlabeled images in COCO format from {}".format(len(imgs), json_file)
)
dataset_dicts = []
for img_dict in imgs:
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["image_id"] = img_dict["id"]
dataset_dicts.append(record)
return dataset_dicts
_UNLABELED_DATASETS = {
# 1-2 people images extracted from UGC ig images or ig profiles using fetch_image flow
"UGC_unlabel_ig_1M_20210514_1or2people": "manifold://pai_mobile/tree/datasets/semi_supervised/unlabeled_UGC/sweep_4m_20210514_20210515_1or2people.json",
# hand non-UGC long range frames extracted from collected videos
"hand_nonUGC_long_range_384K_20210521": "manifold://pai_mobile/tree/datasets/hand_unlabeled_nonUGC/long_range.json",
# hand non-UGC short range images cropped from the annotated bounding boxes in long-range videos
"hand_nonUGC_short_range_183K_20210521": "manifold://pai_mobile/tree/datasets/hand_unlabeled_nonUGC/short_range.json",
}
def load_json(json_file):
"""
Simply load and return the json_file
"""
with PathManager.open(json_file, "r") as f:
json_data = json.load(f)
return json_data
def register_unlabeled():
"""
Register the unlabeled datasets
The json_file needs to be in D2's format
"""
for name, json_file in _UNLABELED_DATASETS.items():
# 1. register a function which returns dicts
DatasetCatalog.register(
name,
CallFuncWithJsonFile(
func=load_json,
json_file=json_file
)
)
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root="", evaluator_type="coco"
)
# ==== Predefined splits for raw cityscapes foggy images ===========
_RAW_CITYSCAPES_SPLITS = {
# "cityscapes_foggy_{task}_train": ("cityscape_foggy/leftImg8bit/train/", "cityscape_foggy/gtFine/train/"),
# "cityscapes_foggy_{task}_val": ("cityscape_foggy/leftImg8bit/val/", "cityscape_foggy/gtFine/val/"),
# "cityscapes_foggy_{task}_test": ("cityscape_foggy/leftImg8bit/test/", "cityscape_foggy/gtFine/test/"),
"cityscapes_foggy_train": ("cityscape_foggy/leftImg8bit/train/", "cityscape_foggy/gtFine/train/"),
"cityscapes_foggy_val": ("cityscape_foggy/leftImg8bit/val/", "cityscape_foggy/gtFine/val/"),
"cityscapes_foggy_test": ("cityscape_foggy/leftImg8bit/test/", "cityscape_foggy/gtFine/test/"),
}
def register_all_cityscapes_foggy():
root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
meta = _get_builtin_metadata("cityscapes")
image_dir = os.path.join(root, image_dir)
gt_dir = os.path.join(root, gt_dir)
# inst_key = key.format(task="instance_seg")
inst_key = key
# DatasetCatalog.register(
# inst_key,
# lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
# x, y, from_json=True, to_polygons=True
# ),
# )
DatasetCatalog.register(
inst_key,
lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
x, y, from_json=False, to_polygons=False
),
)
# MetadataCatalog.get(inst_key).set(
# image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
# )
# MetadataCatalog.get(inst_key).set(
# image_dir=image_dir, gt_dir=gt_dir, evaluator_type="pascal_voc", **meta
# )
MetadataCatalog.get(inst_key).set(
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="coco", **meta
)
# ==== Predefined splits for Clipart (PASCAL VOC format) ===========
def register_all_clipart():
root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
SPLITS = [
("Clipart1k_train", "clipart", "train"),
("Clipart1k_test", "clipart", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# MetadataCatalog.get(name).evaluator_type = "coco"
register_all_cityscapes_foggy()
register_all_clipart()
# register_coco_unlabel()
# register_unlabeled()
def register_all_water():
root = "manifold://mobile_vision_dataset/tree/yujheli/dataset" #Need to modify to the correct folder containing the dataset.
SPLITS = [
("Watercolor_train", "watercolor", "train"),
("Watercolor_test", "watercolor", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
# register_pascal_voc(name, os.path.join(root, dirname), split, year, class_names=["person", "dog","bicycle", "bird", "car", "cat"])
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc_water"
register_all_water()
def register_all_clipart_ws():
root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
SPLITS = [
("Clipart1k_train_w", "clipart", "train"),
("Clipart1k_test_w", "clipart", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc_water"
# MetadataCatalog.get(name).evaluator_type = "coco"
register_all_clipart_ws() |
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import json
import logging
import multiprocessing as mp
import numpy as np
import os
from itertools import chain
import pycocotools.mask as mask_util
from PIL import Image
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
logger = logging.getLogger(__name__)
def _get_cityscapes_files(image_dir, gt_dir):
files = []
# scan through the directory
cities = PathManager.ls(image_dir)
logger.info(f"{len(cities)} cities found in '{image_dir}'.")
for city in cities:
city_img_dir = os.path.join(image_dir, city)
city_gt_dir = os.path.join(gt_dir, city)
for basename in PathManager.ls(city_img_dir):
image_file = os.path.join(city_img_dir, basename)
# suffix = "leftImg8bit.png"
# assert basename.endswith(suffix), basename
# basename = basename[: -len(suffix)]
suffix = 'leftImg8bit_foggy'
basename = basename.split(suffix)[0]
instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png")
label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
files.append((image_file, instance_file, label_file, json_file))
assert len(files), "No images found in {}".format(image_dir)
for f in files[0]:
assert PathManager.isfile(f), f
return files
def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
if from_json:
assert to_polygons, (
"Cityscapes's json annotations are in polygon format. "
"Converting to mask format is not supported now."
)
files = _get_cityscapes_files(image_dir, gt_dir)
logger.info("Preprocessing cityscapes annotations ...")
# This is still not fast: all workers will execute duplicate works and will
# take up to 10m on a 8GPU server.
pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
ret = pool.map(
functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
files,
)
logger.info("Loaded {} images from {}".format(len(ret), image_dir))
# Map cityscape ids to contiguous ids
from cityscapesscripts.helpers.labels import labels
labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
for dict_per_image in ret:
for anno in dict_per_image["annotations"]:
anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
return ret
def load_cityscapes_semantic(image_dir, gt_dir):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
Returns:
list[dict]: a list of dict, each has "file_name" and
"sem_seg_file_name".
"""
ret = []
# gt_dir is small and contain many small files. make sense to fetch to local first
gt_dir = PathManager.get_local_path(gt_dir)
for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):
label_file = label_file.replace("labelIds", "labelTrainIds")
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret.append(
{
"file_name": image_file,
"sem_seg_file_name": label_file,
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(
ret[0]["sem_seg_file_name"]
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
return ret
def _cityscapes_files_to_dict(files, from_json, to_polygons):
"""
Parse cityscapes annotation files to a instance segmentation dataset dict.
Args:
files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
A dict in Detectron2 Dataset format.
"""
from cityscapesscripts.helpers.labels import id2label, name2label
image_file, instance_id_file, _, json_file = files
annos = []
if from_json:
from shapely.geometry import MultiPolygon, Polygon
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
# `polygons_union` contains the union of all valid polygons.
polygons_union = Polygon()
# CityscapesScripts draw the polygons in sequential order
# and each polygon *overwrites* existing ones. See
# (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
# We use reverse order, and each polygon *avoids* early ones.
# This will resolve the ploygon overlaps in the same way as CityscapesScripts.
for obj in jsonobj["objects"][::-1]:
if "deleted" in obj: # cityscapes data format specific
continue
label_name = obj["label"]
try:
label = name2label[label_name]
except KeyError:
if label_name.endswith("group"): # crowd area
label = name2label[label_name[: -len("group")]]
else:
raise
if label.id < 0: # cityscapes data format
continue
# Cityscapes's raw annotations uses integer coordinates
# Therefore +0.5 here
poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
# CityscapesScript uses PIL.ImageDraw.polygon to rasterize
# polygons for evaluation. This function operates in integer space
# and draws each pixel whose center falls into the polygon.
# Therefore it draws a polygon which is 0.5 "fatter" in expectation.
# We therefore dilate the input polygon by 0.5 as our input.
poly = Polygon(poly_coord).buffer(0.5, resolution=4)
if not label.hasInstances or label.ignoreInEval:
# even if we won't store the polygon it still contributes to overlaps resolution
polygons_union = polygons_union.union(poly)
continue
# Take non-overlapping part of the polygon
poly_wo_overlaps = poly.difference(polygons_union)
if poly_wo_overlaps.is_empty:
continue
polygons_union = polygons_union.union(poly)
anno = {}
anno["iscrowd"] = label_name.endswith("group")
anno["category_id"] = label.id
if isinstance(poly_wo_overlaps, Polygon):
poly_list = [poly_wo_overlaps]
elif isinstance(poly_wo_overlaps, MultiPolygon):
poly_list = poly_wo_overlaps.geoms
else:
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
poly_coord = []
for poly_el in poly_list:
# COCO API can work only with exterior boundaries now, hence we store only them.
# TODO: store both exterior and interior boundaries once other parts of the
# codebase support holes in polygons.
poly_coord.append(list(chain(*poly_el.exterior.coords)))
anno["segmentation"] = poly_coord
(xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
anno["bbox"] = (xmin, ymin, xmax, ymax)
anno["bbox_mode"] = BoxMode.XYXY_ABS
annos.append(anno)
else:
# See also the official annotation parsing scripts at
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
with PathManager.open(instance_id_file, "rb") as f:
inst_image = np.asarray(Image.open(f), order="F")
# ids < 24 are stuff labels (filtering them first is about 5% faster)
flattened_ids = np.unique(inst_image[inst_image >= 24])
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": inst_image.shape[0],
"width": inst_image.shape[1],
}
for instance_id in flattened_ids:
# For non-crowd annotations, instance_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
label = id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
anno = {}
anno["iscrowd"] = instance_id < 1000
anno["category_id"] = label.id
mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = (xmin, ymin, xmax, ymax)
if xmax <= xmin or ymax <= ymin:
continue
anno["bbox_mode"] = BoxMode.XYXY_ABS
if to_polygons:
# This conversion comes from D4809743 and D5171122,
# when Mask-RCNN was first developed.
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
-2
]
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
# opencv's can produce invalid polygons
if len(polygons) == 0:
continue
anno["segmentation"] = polygons
else:
anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
annos.append(anno)
ret["annotations"] = annos
return ret
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.structures import pairwise_iou
class OpenMatchTrainerProbe:
def __init__(self, cfg):
self.BOX_AP = 0.5
self.NUM_CLASSES = cfg.MODEL.ROI_HEADS.NUM_CLASSES
# self.bbox_stat_list = ['compute_fp_gtoutlier', 'compute_num_box', 'compute_ood_acc']
def bbox_stat(self, unlabel_gt, unlabel_pseudo, name, bbox_stat_list):
stats = {}
sum_gpu_names = []
for metric in bbox_stat_list:
stats_per, sum_gpu_names_per = getattr(
self, metric)(unlabel_gt, unlabel_pseudo, name)
stats.update(stats_per)
sum_gpu_names.extend(sum_gpu_names_per)
return stats, sum_gpu_names
def compute_fp_gtoutlier(self, unlabel_gt, unlabel_pseudo, name):
num_gt_ood_object = 0
num_gt_fp_ood_object = 0
sum_iou = 0.0
sum_gpu_names = []
results = {}
if len(unlabel_gt) != 0:
for gt, pseudo in zip(unlabel_gt, unlabel_pseudo):
# import pdb; pdb. set_trace()
if name == "pred":
pp_boxes = pseudo.pred_boxes
elif name == "pseudo_conf" or name == "pseudo_ood":
# filter predicted ood box when evaluating this metric
pseudo = pseudo[pseudo.gt_classes != -1]
pp_boxes = pseudo.gt_boxes
else:
raise ValueError("Unknown name for probe roi bbox.")
if len(gt) != 0 and len(pseudo) != 0:
max_iou, max_idx = pairwise_iou(
gt.gt_boxes.to('cuda'), pp_boxes).max(1)
ood_idx = (gt.gt_classes == -1)
num_gt_ood_object += ood_idx.sum().item()
num_gt_fp_ood_object += (max_iou[ood_idx]
> self.BOX_AP).sum().item()
sum_iou += max_iou[ood_idx].sum().item()
elif len(gt) != 0 and len(pseudo) == 0:
ood_idx = (gt.gt_classes == -1)
num_gt_ood_object += ood_idx.shape[0]
results = {'Analysis_'+name+'/num_gt_ood_object': num_gt_ood_object,
'Analysis_'+name+'/num_gt_fp_ood_object': num_gt_fp_ood_object,
'Analysis_'+name+'/sum_iou': sum_iou}
sum_gpu_names.extend(list(results.keys()))
return results, sum_gpu_names
def compute_num_box(self, unlabel_gt, unlabel_pseudo, name):
num_bbox = 0.0
size_bbox = 0.0
avg_conf = 0.0
# measure in and out box for openset SS-OD
num_bbox_in = 0.0
num_bbox_out = 0.0
num_bg = 0.0
# when ground-truth is missing in unlabeled data
if len(unlabel_gt) == 0:
for pp_roi in unlabel_pseudo:
if name == "pred":
pp_boxes = pp_roi.pred_boxes
pp_classes = pp_roi.pred_classes
pp_scores = pp_roi.scores
elif name == "pseudo_conf" or name == "pseudo_ood":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
pp_scores = pp_roi.scores
elif name == "gt":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
else:
raise ValueError("Unknown name for probe roi bbox.")
# all boxes (in + out boxes)
if len(pp_roi) != 0:
# bbox number and size
num_bbox += len(pp_roi)
size_bbox += pp_boxes.area().mean().item()
# average box confidence
if name != "gt":
avg_conf += pp_scores.mean()
else:
num_bbox += 0
size_bbox += torch.tensor(0).cuda()
num_valid_img = len(unlabel_pseudo)
else:
# with ground-truth
num_valid_img = 0
for gt, pp_roi in zip(unlabel_gt, unlabel_pseudo):
if name == "pred":
pp_boxes = pp_roi.pred_boxes
pp_classes = pp_roi.pred_classes
pp_scores = pp_roi.scores
elif name == "pseudo_conf" or name == "pseudo_ood":
# filter out ood pseudo-box when doing analysis
pp_roi = pp_roi[pp_roi.gt_classes != -1]
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
pp_scores = pp_roi.scores
elif name == "gt":
pp_boxes = pp_roi.gt_boxes
pp_classes = pp_roi.gt_classes
else:
raise ValueError("Unknown name for probe roi bbox.")
# all boxes (in + out boxes)
if len(pp_roi) != 0:
# bbox number and size
num_bbox += len(pp_roi)
size_bbox += pp_boxes.area().mean().item()
# average box confidence
if name != "gt":
avg_conf += pp_scores.mean()
else:
num_bbox += 0
size_bbox += torch.tensor(0).cuda()
# in and out class
if name == "gt":
pp_roi_in = pp_roi[pp_classes != -1]
num_bbox_in += len(pp_roi_in)
pp_roi_out = pp_roi[pp_classes == -1]
num_bbox_out += len(pp_roi_out)
num_valid_img += 1
elif name == "pred" or name == "pseudo_conf" or name == "pseudo_ood":
if len(gt.gt_boxes.to('cuda'))>0 and len(pp_boxes) > 0:
max_iou, max_idx = pairwise_iou(gt.gt_boxes.to('cuda'), pp_boxes).max(0)
# for the ground-truth label for each pseudo-box
gtclass4pseudo = gt.gt_classes[max_idx]
matchgtbox = max_iou > 0.5
# compute the number of boxes (background, inlier, outlier)
num_bg += (~matchgtbox).sum().item()
num_bbox_in += (gtclass4pseudo[matchgtbox]
!= -1).sum().item()
num_bbox_out += (gtclass4pseudo[matchgtbox]
== -1).sum().item()
num_valid_img += 1
else:
raise ValueError("Unknown name for probe roi bbox.")
box_probe = {}
if num_valid_img >0 :
box_probe["Analysis_" + name + "/Num_bbox"] = num_bbox / \
num_valid_img
box_probe["Analysis_" + name + "/Size_bbox"] = size_bbox / \
num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_inlier"] = num_bbox_in / num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_outlier"] = num_bbox_out / num_valid_img
if name != "gt": # prediciton, background number
box_probe["Analysis_" + name + "/Conf"] = avg_conf / \
num_valid_img
box_probe["Analysis_" + name +
"/Num_bbox_background"] = num_bg / num_valid_img
box_probe["Analysis_" + name +
"/background_fp_ratio"] = num_bg / num_bbox
box_probe["Analysis_" + name +
"/background_tp_ratio"] = num_bbox_in / num_bbox
else:
box_probe["Analysis_" + name + "/Num_bbox"] = 0.0
box_probe["Analysis_" + name + "/Size_bbox"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_inlier"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_outlier"] = 0.0
if name != "gt": # prediciton, background number
box_probe["Analysis_" + name + "/Conf"] = 0.0
box_probe["Analysis_" + name +
"/Num_bbox_background"] = 0.0
box_probe["Analysis_" + name +
"/background_fp_ratio"] = num_bg / num_bbox
box_probe["Analysis_" + name +
"/background_tp_ratio"] = num_bbox_in / num_bbox
return box_probe, []
def compute_ood_acc(self, unlabel_gt, unlabel_pseudo, name, BOX_IOU=0.5):
results = {}
sum_gpu_names = []
if len(unlabel_gt) != 0:
for metric in ['acc_outlier', 'recall_outlier']:
for samples in ['_fg', '_all']:
for fraction_part in ['_nume', '_deno']:
results[metric+samples+fraction_part] = 0.0
for gt, pred in zip(unlabel_gt, unlabel_pseudo):
if name == "pred":
pp_boxes = pred.pred_boxes
pp_ood_scores = pred.ood_scores
elif name == "pseudo_conf" or name == "pseudo_ood":
# assume these outlier are suppressed
pred = pred[pred.gt_classes != -1]
pp_boxes = pred.gt_boxes
pp_ood_scores = pred.ood_scores
else:
raise ValueError("Unknown name for probe roi bbox.")
if len(gt) != 0 and len(pred) != 0:
# find the most overlapped ground-truth box for each pseudo-box
max_iou, max_idx = pairwise_iou(
gt.gt_boxes.to('cuda'), pp_boxes).max(0)
# ignore background instances
find_fg_mask = max_iou > BOX_IOU
if find_fg_mask.sum() > 0:
gt_corres = gt[max_idx].gt_classes.to("cuda")
gt_outlier = (gt_corres[find_fg_mask] == -1)
pred_outlier = pp_ood_scores[find_fg_mask][:, 0] > 0.5
# accurcay of ood detection (foreground)
# acc_outlier_fg = (pred_outlier == gt_outlier).sum() /find_fg_mask.sum()
results['acc_outlier_fg_nume'] += (
pred_outlier == gt_outlier).sum()
results['acc_outlier_fg_deno'] += find_fg_mask.sum()
# recall of ood detection (foreground)
# recall_outlier_fg = (pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum() /gt_outlier.sum()
results['recall_outlier_fg_nume'] += (
pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum()
results['recall_outlier_fg_deno'] += gt_outlier.sum()
# Regard backgound gt as outlier
gt_corres = gt[max_idx].gt_classes.to("cuda")
# convert all background gt as outlier
gt_corres[~find_fg_mask] = -1
gt_outlier = gt_corres == -1
pred_outlier = pp_ood_scores[:, 0] > 0.5
# accurcay of ood detection (all)
# acc_outlier_all = (pred_outlier == gt_outlier).sum() /len(pred)
results['acc_outlier_all_nume'] += (
pred_outlier == gt_outlier).sum()
results['acc_outlier_all_deno'] += len(pred)
# recall of ood detection (all)
# recall_outlier_all = (pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum() /gt_outlier.sum()
results['recall_outlier_all_nume'] += (
pred_outlier[gt_outlier] == gt_outlier[gt_outlier]).sum()
results['recall_outlier_all_deno'] += gt_outlier.sum()
results = {'Analysis_'+name+'/'+k: v for k, v in results.items()}
sum_gpu_names.extend(list(results.keys()))
return results, sum_gpu_names
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import time
from collections import OrderedDict
from typing import Dict
import detectron2.utils.comm as comm
import numpy as np
import torch
from detectron2.engine import SimpleTrainer
from detectron2.structures import BitMasks, Boxes, Instances, Keypoints
from detectron2.utils.events import get_event_storage
from d2go.projects.unbiased_teacher.engine.trainer import UnbiasedTeacherTrainer
from d2go.projects.unbiased_teacher.utils.probe import probe
import copy
logger = logging.getLogger(__name__)
class DAobjTrainer(UnbiasedTeacherTrainer):
"""
A trainer for Teacher-Student mutual learning following this paper:
"Unbiased Teacher for Semi-Supervised Object Detection"
It assumes that every step, you:
For Teacher:
1. Perform a forward pass on a weakly augmented unlabeled data from the data_loader.
2. Generate pseudo-labels on the weakly augmented unlabeled data
For Student:
1. Perform a forward pass on a strongly augmented unlabeled data from the data_loader.
2. Perform a forward pass on a labeled data from the data_loader.
1. Use pseudo-labels generated from the Teacher as target and compute the
loss on a strongly augmented unlabeled data
2. Compute the gradients with the above losses on labeled and unlabeled data.
3. Update the Student model with the optimizer.
4. EMA update the Teacher model
"""
# def __init__(self, cfg, model, model_teacher, data_loader, optimizer):
# """
# Args:
# model: a torch Module. Takes a data from data_loader and returns a
# dict of losses.
# data_loader: an iterable. Contains data to be used to call model.
# optimizer: a torch optimizer.
# """
# super().__init__(model, data_loader, optimizer)
# self.cfg = cfg
# self.model_teacher = model_teacher
def run_step(self):
assert (
self.model.training
), "Student model was changed to eval mode during training"
start = time.perf_counter()
data = next(self._data_loader_iter)
# q (queue): strongly augmented, k (key): weakly augmented
#TODO Need to further use the weak samples for domain adaptation
label_data_q, label_data_k, unlabel_data_q, unlabel_data_k = data
data_time = time.perf_counter() - start
if (
self.cfg.UNBIASEDTEACHER.BURN_IN_STEP != 0
and self.iter < self.cfg.UNBIASEDTEACHER.BURN_IN_STEP
):
# Burn-In stage. Supervisedly train the Student model.
losses, loss_dict, record_dict = self.burn_in(label_data_q, label_data_k)
else:
# Copy the Student model to the Teacher (using keep_rate = 0)
if self.iter == self.cfg.UNBIASEDTEACHER.BURN_IN_STEP:
logger.info("Copying Student weights to the Teacher .....")
self._update_teacher_model(keep_rate=0.0)
elif (
self.iter - self.cfg.UNBIASEDTEACHER.BURN_IN_STEP
) % self.cfg.UNBIASEDTEACHER.TEACHER_UPDATE_ITER == 0:
self._update_teacher_model(
keep_rate=self.cfg.UNBIASEDTEACHER.EMA.KEEP_RATE
)
# Teacher-Student Mutual Learning
losses, loss_dict, record_dict = self.teacher_student_learning(
label_data_q, label_data_k, unlabel_data_q, unlabel_data_k
)
self.optimizer.zero_grad()
losses.backward()
self._write_metrics(record_dict, data_time)
"""
If you need gradient clipping/scaling or other processing, you can
wrap the optimizer with your custom `step()` method. But it is
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
"""
self.optimizer.step()
def burn_in(self, label_data_q, label_data_k):
"""
Perform Burn-In stage with labeled data
"""
# combine label_data_q + label_data_k
label_data_q.extend(label_data_k)
record_dict, _, _, _ = self.model(label_data_q, branch="supervised")
# weight losses
loss_dict = self.weight_losses(record_dict)
losses = sum(loss_dict.values())
return losses, loss_dict, record_dict
def teacher_student_learning(
self, label_data_q, label_data_k, unlabel_data_q, unlabel_data_k
):
"""
Perform Teacher-Student Mutual Learning with labeled and unlabeled data
"""
# q (queue): strongly augmented, k (key): weakly augmented
record_dict = {}
######################## For probe #################################
# import pdb; pdb. set_trace()
gt_unlabel_k = self.get_label(unlabel_data_k)
# 0. remove potential ground-truth labels in the unlabeled data
unlabel_data_q = self.remove_label(unlabel_data_q)
unlabel_data_k = self.remove_label(unlabel_data_k)
# 1. generate the pseudo-label using teacher model
# TODO: why is the Teacher not in .eval() mode?
with torch.no_grad():
(
_,
proposals_rpn_unsup_k,
proposals_roih_unsup_k,
_,
) = self.model_teacher(unlabel_data_k, branch="unsup_data_weak")
######################## For probe #################################
# import pdb; pdb. set_trace()
# analysis_pred, _ = self.probe.compute_num_box(gt_unlabel_k,proposals_roih_unsup_k,'pred')
# record_dict.update(analysis_pred)
# 2. Pseudo-labeling
# Pseudo-labeling for RPN head (bbox location/objectness)
joint_proposal_dict = {}
## No need this
joint_proposal_dict["proposals_rpn"] = proposals_rpn_unsup_k
(
pesudo_proposals_rpn_unsup_k,
nun_pseudo_bbox_rpn,
) = self.process_pseudo_label(
proposals_rpn_unsup_k,
self.cfg.UNBIASEDTEACHER.BBOX_THRESHOLD,
self.cfg.UNBIASEDTEACHER.MASK_THRESHOLD,
self.cfg.UNBIASEDTEACHER.KEYPOINT_THRESHOLD,
"rpn",
"thresholding",
)
joint_proposal_dict["proposals_pseudo_rpn"] = pesudo_proposals_rpn_unsup_k
## No need this end
# Pseudo-labeling for ROI head (bbox location/objectness)
pesudo_proposals_roih_unsup_k, _ = self.process_pseudo_label(
proposals_roih_unsup_k,
self.cfg.UNBIASEDTEACHER.BBOX_THRESHOLD,
self.cfg.UNBIASEDTEACHER.MASK_THRESHOLD,
self.cfg.UNBIASEDTEACHER.KEYPOINT_THRESHOLD,
"roih",
"thresholding",
)
joint_proposal_dict["proposals_pseudo_roih"] = pesudo_proposals_roih_unsup_k
######################## For probe #################################
analysis_pred, _ = self.probe.compute_num_box(gt_unlabel_k,pesudo_proposals_roih_unsup_k,'pred')
record_dict.update(analysis_pred)
# Probe for analysis (usually for research development)
if self.cfg.UNBIASEDTEACHER.PROBE:
record_dict = probe(
self.cfg,
proposals_roih_unsup_k,
unlabel_data_k,
pesudo_proposals_roih_unsup_k,
record_dict,
)
# 3. add pseudo-label to unlabeled data
unlabel_data_q = self.add_label(
unlabel_data_q, joint_proposal_dict["proposals_pseudo_roih"]
)
unlabel_data_k = self.add_label(
unlabel_data_k, joint_proposal_dict["proposals_pseudo_roih"]
)
# all_label_data = label_data_q + label_data_k
if self.cfg.UNBIASEDTEACHER.ISAUG == "No":
all_label_data = label_data_k
all_unlabel_data = unlabel_data_k
else:
all_label_data = label_data_q + label_data_k
all_unlabel_data = unlabel_data_q
# 4. input both strongly and weakly augmented labeled data into student model
# all_unlabel_data = unlabel_data_q
record_all_label_data, _, _, _ = self.model(all_label_data, branch="supervised")
record_dict.update(record_all_label_data)
# 5. input strongly augmented unlabeled data into model
record_all_unlabel_data, _, _, _ = self.model(
all_unlabel_data, branch="supervised-pseudo"
)
# rename unsupervised loss
# NOTE: names of the recorded output from model are hard-coded
# we rename them accordingly for unlabeled data
new_record_all_unlabel_data = {}
for key in record_all_unlabel_data.keys():
new_record_all_unlabel_data[key + "_pseudo"] = record_all_unlabel_data[key]
record_dict.update(new_record_all_unlabel_data)
# 6. input weakly labeled data (source) and weakly unlabeled data (target) to student model
# give sign to the target data
for i_index in range(len(unlabel_data_k)):
# unlabel_data_item = {}
for k, v in unlabel_data_k[i_index].items():
# label_data_k[i_index][k + "_unlabeled"] = v
label_data_k[i_index][k + "_unlabeled"] = v
# unlabel_data_k[i_index] = unlabel_data_item
all_domain_data = label_data_k
# all_domain_data = label_data_k + unlabel_data_k
record_all_domain_data, _, _, _ = self.model(all_domain_data, branch="domain")
record_dict.update(record_all_domain_data)
# 7. distill teacher
# for distill back to teacher
with torch.no_grad():
(
_,
proposals_rpn_unsup_dis,
proposals_roih_unsup_dis,
_,
) = self.model(unlabel_data_k, branch="unsup_data_weak")
pesudo_proposals_roih_unsup_k, _ = self.process_pseudo_label(
proposals_roih_unsup_dis,
self.cfg.UNBIASEDTEACHER.BBOX_THRESHOLD,
self.cfg.UNBIASEDTEACHER.MASK_THRESHOLD,
self.cfg.UNBIASEDTEACHER.KEYPOINT_THRESHOLD,
"roih",
"thresholding",
)
unlabel_data_k = self.remove_label(unlabel_data_k)
unlabel_data_k = self.add_label(
unlabel_data_k, pesudo_proposals_roih_unsup_k
)
record_distill_data, _, _, _ = self.model_teacher(
unlabel_data_k, branch="supervised-pseudo"
)
new_record_all_distill_data = {}
for key in record_distill_data.keys():
new_record_all_distill_data[key + "_distill"] = record_distill_data[key]
record_dict.update(new_record_all_distill_data)
# weighting losses
loss_dict = self.weight_losses(record_dict)
#Add discriminator loss here
#loss_dict.update(...)
losses = sum(loss_dict.values())
return losses, loss_dict, record_dict
def weight_losses(self, record_dict):
loss_dict = {}
REGRESSION_LOSS_WEIGHT = 0
for key in record_dict.keys():
if key.startswith("loss"):
if key == "loss_rpn_cls_pseudo":
loss_dict[key] = (
record_dict[key]
* self.cfg.UNBIASEDTEACHER.UNSUP_LOSS_WEIGHT_RPN_CLS
)
elif (
key == "loss_rpn_loc_pseudo" or key == "loss_box_reg_pseudo"
): # set pseudo bbox regression to 0
loss_dict[key] = record_dict[key] * REGRESSION_LOSS_WEIGHT
elif (
key == "loss_rpn_loc_distill" or key == "loss_box_reg_distill"
): # set pseudo bbox regression to 0
loss_dict[key] = record_dict[key] * REGRESSION_LOSS_WEIGHT
elif key.endswith("mask_pseudo"): # unsupervised loss for segmentation
loss_dict[key] = (
record_dict[key]
* self.cfg.UNBIASEDTEACHER.UNSUP_LOSS_WEIGHT_MASK
)
elif key.endswith("keypoint_pseudo"): # unsupervised loss for keypoint
loss_dict[key] = (
record_dict[key]
* self.cfg.UNBIASEDTEACHER.UNSUP_LOSS_WEIGHT_KEYPOINT
)
elif key.endswith("pseudo"): # unsupervised loss
loss_dict[key] = (
record_dict[key] * self.cfg.UNBIASEDTEACHER.UNSUP_LOSS_WEIGHT
)
elif (
key == "loss_D_img_s" or key == "loss_D_img_t"
): # set weight for discriminator
# import pdb
# pdb.set_trace()
loss_dict[key] = record_dict[key] * self.cfg.UNBIASEDTEACHER.DIS_LOSS_WEIGHT #Need to modify defaults and yaml
else: # supervised loss
loss_dict[key] = record_dict[key] * 1
return loss_dict
def threshold_bbox(
self,
proposal_bbox_inst,
thres=0.7,
mask_thres=0.5,
keypoint_thres=0.5,
proposal_type="roih",
):
if proposal_type == "rpn":
valid_map = proposal_bbox_inst.objectness_logits > thres
# create instances containing boxes and gt_classes
image_shape = proposal_bbox_inst.image_size
new_proposal_inst = Instances(image_shape)
# create box
new_bbox_loc = proposal_bbox_inst.proposal_boxes.tensor[valid_map, :]
new_boxes = Boxes(new_bbox_loc)
# add boxes to instances
new_proposal_inst.gt_boxes = new_boxes
new_proposal_inst.pred_boxes = new_boxes
new_proposal_inst.objectness_logits = proposal_bbox_inst.objectness_logits[
valid_map
]
elif proposal_type == "roih":
valid_map = proposal_bbox_inst.scores > thres
# create instances containing boxes and gt_classes
image_shape = proposal_bbox_inst.image_size
new_proposal_inst = Instances(image_shape)
# create box
new_bbox_loc = proposal_bbox_inst.pred_boxes.tensor[valid_map, :]
new_boxes = Boxes(new_bbox_loc)
# add boxes to instances
new_proposal_inst.gt_boxes = new_boxes
new_proposal_inst.pred_boxes = new_boxes
new_proposal_inst.gt_classes = proposal_bbox_inst.pred_classes[valid_map]
new_proposal_inst.pred_classes = proposal_bbox_inst.pred_classes[valid_map]
new_proposal_inst.scores = proposal_bbox_inst.scores[valid_map]
if self.cfg.MODEL.MASK_ON and new_boxes:
# put predicted output into gt_masks with thresholding
new_masks = proposal_bbox_inst.pred_masks[valid_map].squeeze(1)
new_masks = new_masks >= mask_thres
new_proposal_inst.gt_masks = BitMasks(new_masks)
if self.cfg.MODEL.KEYPOINT_ON and new_boxes:
# we use the keypoint score as the basis for thresholding
new_keypoints = proposal_bbox_inst.pred_keypoints[valid_map, :]
invalid_keypoints = new_keypoints[:, :, 2] < keypoint_thres
# (x, y, visibility): visibility flag = 0 -> not labeled (in which case x=y=0)
new_keypoints[invalid_keypoints] = torch.FloatTensor([0, 0, 0]).to(
new_keypoints.device
)
new_proposal_inst.gt_keypoints = Keypoints(new_keypoints)
return new_proposal_inst
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import datetime
import logging
import math
import time
import sys
from torch.distributed.distributed_c10d import reduce
from utils.ap_calculator import APCalculator
from utils.misc import SmoothedValue
from utils.dist import (
all_gather_dict,
all_reduce_average,
is_primary,
reduce_dict,
barrier,
)
def compute_learning_rate(args, curr_epoch_normalized):
assert curr_epoch_normalized <= 1.0 and curr_epoch_normalized >= 0.0
if (
curr_epoch_normalized <= (args.warm_lr_epochs / args.max_epoch)
and args.warm_lr_epochs > 0
):
# Linear Warmup
curr_lr = args.warm_lr + curr_epoch_normalized * args.max_epoch * (
(args.base_lr - args.warm_lr) / args.warm_lr_epochs
)
else:
# Cosine Learning Rate Schedule
curr_lr = args.final_lr + 0.5 * (args.base_lr - args.final_lr) * (
1 + math.cos(math.pi * curr_epoch_normalized)
)
return curr_lr
def adjust_learning_rate(args, optimizer, curr_epoch):
curr_lr = compute_learning_rate(args, curr_epoch)
for param_group in optimizer.param_groups:
param_group["lr"] = curr_lr
return curr_lr
def train_one_epoch(
args,
curr_epoch,
model,
optimizer,
criterion,
dataset_config,
dataset_loader,
logger,
):
ap_calculator = APCalculator(
dataset_config=dataset_config,
ap_iou_thresh=[0.25, 0.5],
class2type_map=dataset_config.class2type,
exact_eval=False,
)
curr_iter = curr_epoch * len(dataset_loader)
max_iters = args.max_epoch * len(dataset_loader)
net_device = next(model.parameters()).device
time_delta = SmoothedValue(window_size=10)
loss_avg = SmoothedValue(window_size=10)
model.train()
barrier()
for batch_idx, batch_data_label in enumerate(dataset_loader):
curr_time = time.time()
curr_lr = adjust_learning_rate(args, optimizer, curr_iter / max_iters)
for key in batch_data_label:
batch_data_label[key] = batch_data_label[key].to(net_device)
# Forward pass
optimizer.zero_grad()
inputs = {
"point_clouds": batch_data_label["point_clouds"],
"point_cloud_dims_min": batch_data_label["point_cloud_dims_min"],
"point_cloud_dims_max": batch_data_label["point_cloud_dims_max"],
}
outputs = model(inputs)
# Compute loss
loss, loss_dict = criterion(outputs, batch_data_label)
loss_reduced = all_reduce_average(loss)
loss_dict_reduced = reduce_dict(loss_dict)
if not math.isfinite(loss_reduced.item()):
logging.info(f"Loss in not finite. Training will be stopped.")
sys.exit(1)
loss.backward()
if args.clip_gradient > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_gradient)
optimizer.step()
if curr_iter % args.log_metrics_every == 0:
# This step is slow. AP is computed approximately and locally during training.
# It will gather outputs and ground truth across all ranks.
# It is memory intensive as point_cloud ground truth is a large tensor.
# If GPU memory is not an issue, uncomment the following lines.
# outputs["outputs"] = all_gather_dict(outputs["outputs"])
# batch_data_label = all_gather_dict(batch_data_label)
ap_calculator.step_meter(outputs, batch_data_label)
time_delta.update(time.time() - curr_time)
loss_avg.update(loss_reduced.item())
# logging
if is_primary() and curr_iter % args.log_every == 0:
mem_mb = torch.cuda.max_memory_allocated() / (1024 ** 2)
eta_seconds = (max_iters - curr_iter) * time_delta.avg
eta_str = str(datetime.timedelta(seconds=int(eta_seconds)))
print(
f"Epoch [{curr_epoch}/{args.max_epoch}]; Iter [{curr_iter}/{max_iters}]; Loss {loss_avg.avg:0.2f}; LR {curr_lr:0.2e}; Iter time {time_delta.avg:0.2f}; ETA {eta_str}; Mem {mem_mb:0.2f}MB"
)
logger.log_scalars(loss_dict_reduced, curr_iter, prefix="Train_details/")
train_dict = {}
train_dict["lr"] = curr_lr
train_dict["memory"] = mem_mb
train_dict["loss"] = loss_avg.avg
train_dict["batch_time"] = time_delta.avg
logger.log_scalars(train_dict, curr_iter, prefix="Train/")
curr_iter += 1
barrier()
return ap_calculator
@torch.no_grad()
def evaluate(
args,
curr_epoch,
model,
criterion,
dataset_config,
dataset_loader,
logger,
curr_train_iter,
):
# ap calculator is exact for evaluation. This is slower than the ap calculator used during training.
ap_calculator = APCalculator(
dataset_config=dataset_config,
ap_iou_thresh=[0.25, 0.5],
class2type_map=dataset_config.class2type,
exact_eval=True,
)
curr_iter = 0
net_device = next(model.parameters()).device
num_batches = len(dataset_loader)
time_delta = SmoothedValue(window_size=10)
loss_avg = SmoothedValue(window_size=10)
model.eval()
barrier()
epoch_str = f"[{curr_epoch}/{args.max_epoch}]" if curr_epoch > 0 else ""
for batch_idx, batch_data_label in enumerate(dataset_loader):
curr_time = time.time()
for key in batch_data_label:
batch_data_label[key] = batch_data_label[key].to(net_device)
inputs = {
"point_clouds": batch_data_label["point_clouds"],
"point_cloud_dims_min": batch_data_label["point_cloud_dims_min"],
"point_cloud_dims_max": batch_data_label["point_cloud_dims_max"],
}
outputs = model(inputs)
# Compute loss
loss_str = ""
if criterion is not None:
loss, loss_dict = criterion(outputs, batch_data_label)
loss_reduced = all_reduce_average(loss)
loss_dict_reduced = reduce_dict(loss_dict)
loss_avg.update(loss_reduced.item())
loss_str = f"Loss {loss_avg.avg:0.2f};"
# Memory intensive as it gathers point cloud GT tensor across all ranks
outputs["outputs"] = all_gather_dict(outputs["outputs"])
batch_data_label = all_gather_dict(batch_data_label)
ap_calculator.step_meter(outputs, batch_data_label)
time_delta.update(time.time() - curr_time)
if is_primary() and curr_iter % args.log_every == 0:
mem_mb = torch.cuda.max_memory_allocated() / (1024 ** 2)
print(
f"Evaluate {epoch_str}; Batch [{curr_iter}/{num_batches}]; {loss_str} Iter time {time_delta.avg:0.2f}; Mem {mem_mb:0.2f}MB"
)
test_dict = {}
test_dict["memory"] = mem_mb
test_dict["batch_time"] = time_delta.avg
if criterion is not None:
test_dict["loss"] = loss_avg.avg
curr_iter += 1
barrier()
if is_primary():
if criterion is not None:
logger.log_scalars(
loss_dict_reduced, curr_train_iter, prefix="Test_details/"
)
logger.log_scalars(test_dict, curr_train_iter, prefix="Test/")
return ap_calculator
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
def build_optimizer(args, model):
params_with_decay = []
params_without_decay = []
for name, param in model.named_parameters():
if param.requires_grad is False:
continue
if args.filter_biases_wd and (len(param.shape) == 1 or name.endswith("bias")):
params_without_decay.append(param)
else:
params_with_decay.append(param)
if args.filter_biases_wd:
param_groups = [
{"params": params_without_decay, "weight_decay": 0.0},
{"params": params_with_decay, "weight_decay": args.weight_decay},
]
else:
param_groups = [
{"params": params_with_decay, "weight_decay": args.weight_decay},
]
optimizer = torch.optim.AdamW(param_groups, lr=args.base_lr)
return optimizer
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from utils.box_util import generalized_box3d_iou
from utils.dist import all_reduce_average
from utils.misc import huber_loss
from scipy.optimize import linear_sum_assignment
class Matcher(nn.Module):
def __init__(self, cost_class, cost_objectness, cost_giou, cost_center):
"""
Parameters:
cost_class:
Returns:
"""
super().__init__()
self.cost_class = cost_class
self.cost_objectness = cost_objectness
self.cost_giou = cost_giou
self.cost_center = cost_center
@torch.no_grad()
def forward(self, outputs, targets):
batchsize = outputs["sem_cls_prob"].shape[0]
nqueries = outputs["sem_cls_prob"].shape[1]
ngt = targets["gt_box_sem_cls_label"].shape[1]
nactual_gt = targets["nactual_gt"]
# classification cost: batch x nqueries x ngt matrix
pred_cls_prob = outputs["sem_cls_prob"]
gt_box_sem_cls_labels = (
targets["gt_box_sem_cls_label"]
.unsqueeze(1)
.expand(batchsize, nqueries, ngt)
)
class_mat = -torch.gather(pred_cls_prob, 2, gt_box_sem_cls_labels)
# objectness cost: batch x nqueries x 1
objectness_mat = -outputs["objectness_prob"].unsqueeze(-1)
# center cost: batch x nqueries x ngt
center_mat = outputs["center_dist"].detach()
# giou cost: batch x nqueries x ngt
giou_mat = -outputs["gious"].detach()
final_cost = (
self.cost_class * class_mat
+ self.cost_objectness * objectness_mat
+ self.cost_center * center_mat
+ self.cost_giou * giou_mat
)
final_cost = final_cost.detach().cpu().numpy()
assignments = []
# auxiliary variables useful for batched loss computation
batch_size, nprop = final_cost.shape[0], final_cost.shape[1]
per_prop_gt_inds = torch.zeros(
[batch_size, nprop], dtype=torch.int64, device=pred_cls_prob.device
)
proposal_matched_mask = torch.zeros(
[batch_size, nprop], dtype=torch.float32, device=pred_cls_prob.device
)
for b in range(batchsize):
assign = []
if nactual_gt[b] > 0:
assign = linear_sum_assignment(final_cost[b, :, : nactual_gt[b]])
assign = [
torch.from_numpy(x).long().to(device=pred_cls_prob.device)
for x in assign
]
per_prop_gt_inds[b, assign[0]] = assign[1]
proposal_matched_mask[b, assign[0]] = 1
assignments.append(assign)
return {
"assignments": assignments,
"per_prop_gt_inds": per_prop_gt_inds,
"proposal_matched_mask": proposal_matched_mask,
}
class SetCriterion(nn.Module):
def __init__(self, matcher, dataset_config, loss_weight_dict):
super().__init__()
self.dataset_config = dataset_config
self.matcher = matcher
self.loss_weight_dict = loss_weight_dict
semcls_percls_weights = torch.ones(dataset_config.num_semcls + 1)
semcls_percls_weights[-1] = loss_weight_dict["loss_no_object_weight"]
del loss_weight_dict["loss_no_object_weight"]
self.register_buffer("semcls_percls_weights", semcls_percls_weights)
self.loss_functions = {
"loss_sem_cls": self.loss_sem_cls,
"loss_angle": self.loss_angle,
"loss_center": self.loss_center,
"loss_size": self.loss_size,
"loss_giou": self.loss_giou,
# this isn't used during training and is logged for debugging.
# thus, this loss does not have a loss_weight associated with it.
"loss_cardinality": self.loss_cardinality,
}
@torch.no_grad()
def loss_cardinality(self, outputs, targets, assignments):
# Count the number of predictions that are objects
# Cardinality is the error between predicted #objects and ground truth objects
pred_logits = outputs["sem_cls_logits"]
# Count the number of predictions that are NOT "no-object" (which is the last class)
pred_objects = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(pred_objects.float(), targets["nactual_gt"])
return {"loss_cardinality": card_err}
def loss_sem_cls(self, outputs, targets, assignments):
# # Not vectorized version
# pred_logits = outputs["sem_cls_logits"]
# assign = assignments["assignments"]
# sem_cls_targets = torch.ones((pred_logits.shape[0], pred_logits.shape[1]),
# dtype=torch.int64, device=pred_logits.device)
# # initialize to background/no-object class
# sem_cls_targets *= (pred_logits.shape[-1] - 1)
# # use assignments to compute labels for matched boxes
# for b in range(pred_logits.shape[0]):
# if len(assign[b]) > 0:
# sem_cls_targets[b, assign[b][0]] = targets["gt_box_sem_cls_label"][b, assign[b][1]]
# sem_cls_targets = sem_cls_targets.view(-1)
# pred_logits = pred_logits.reshape(sem_cls_targets.shape[0], -1)
# loss = F.cross_entropy(pred_logits, sem_cls_targets, self.semcls_percls_weights, reduction="mean")
pred_logits = outputs["sem_cls_logits"]
gt_box_label = torch.gather(
targets["gt_box_sem_cls_label"], 1, assignments["per_prop_gt_inds"]
)
gt_box_label[assignments["proposal_matched_mask"].int() == 0] = (
pred_logits.shape[-1] - 1
)
loss = F.cross_entropy(
pred_logits.transpose(2, 1),
gt_box_label,
self.semcls_percls_weights,
reduction="mean",
)
return {"loss_sem_cls": loss}
def loss_angle(self, outputs, targets, assignments):
angle_logits = outputs["angle_logits"]
angle_residual = outputs["angle_residual_normalized"]
if targets["num_boxes_replica"] > 0:
gt_angle_label = targets["gt_angle_class_label"]
gt_angle_residual = targets["gt_angle_residual_label"]
gt_angle_residual_normalized = gt_angle_residual / (
np.pi / self.dataset_config.num_angle_bin
)
# # Non vectorized version
# assignments = assignments["assignments"]
# p_angle_logits = []
# p_angle_resid = []
# t_angle_labels = []
# t_angle_resid = []
# for b in range(angle_logits.shape[0]):
# if len(assignments[b]) > 0:
# p_angle_logits.append(angle_logits[b, assignments[b][0]])
# p_angle_resid.append(angle_residual[b, assignments[b][0], gt_angle_label[b][assignments[b][1]]])
# t_angle_labels.append(gt_angle_label[b, assignments[b][1]])
# t_angle_resid.append(gt_angle_residual_normalized[b, assignments[b][1]])
# p_angle_logits = torch.cat(p_angle_logits)
# p_angle_resid = torch.cat(p_angle_resid)
# t_angle_labels = torch.cat(t_angle_labels)
# t_angle_resid = torch.cat(t_angle_resid)
# angle_cls_loss = F.cross_entropy(p_angle_logits, t_angle_labels, reduction="sum")
# angle_reg_loss = huber_loss(p_angle_resid.flatten() - t_angle_resid.flatten()).sum()
gt_angle_label = torch.gather(
gt_angle_label, 1, assignments["per_prop_gt_inds"]
)
angle_cls_loss = F.cross_entropy(
angle_logits.transpose(2, 1), gt_angle_label, reduction="none"
)
angle_cls_loss = (
angle_cls_loss * assignments["proposal_matched_mask"]
).sum()
gt_angle_residual_normalized = torch.gather(
gt_angle_residual_normalized, 1, assignments["per_prop_gt_inds"]
)
gt_angle_label_one_hot = torch.zeros_like(
angle_residual, dtype=torch.float32
)
gt_angle_label_one_hot.scatter_(2, gt_angle_label.unsqueeze(-1), 1)
angle_residual_for_gt_class = torch.sum(
angle_residual * gt_angle_label_one_hot, -1
)
angle_reg_loss = huber_loss(
angle_residual_for_gt_class - gt_angle_residual_normalized, delta=1.0
)
angle_reg_loss = (
angle_reg_loss * assignments["proposal_matched_mask"]
).sum()
angle_cls_loss /= targets["num_boxes"]
angle_reg_loss /= targets["num_boxes"]
else:
angle_cls_loss = torch.zeros(1, device=angle_logits.device).squeeze()
angle_reg_loss = torch.zeros(1, device=angle_logits.device).squeeze()
return {"loss_angle_cls": angle_cls_loss, "loss_angle_reg": angle_reg_loss}
def loss_center(self, outputs, targets, assignments):
center_dist = outputs["center_dist"]
if targets["num_boxes_replica"] > 0:
# # Non vectorized version
# assign = assignments["assignments"]
# center_loss = torch.zeros(1, device=center_dist.device).squeeze()
# for b in range(center_dist.shape[0]):
# if len(assign[b]) > 0:
# center_loss += center_dist[b, assign[b][0], assign[b][1]].sum()
# select appropriate distances by using proposal to gt matching
center_loss = torch.gather(
center_dist, 2, assignments["per_prop_gt_inds"].unsqueeze(-1)
).squeeze(-1)
# zero-out non-matched proposals
center_loss = center_loss * assignments["proposal_matched_mask"]
center_loss = center_loss.sum()
if targets["num_boxes"] > 0:
center_loss /= targets["num_boxes"]
else:
center_loss = torch.zeros(1, device=center_dist.device).squeeze()
return {"loss_center": center_loss}
def loss_giou(self, outputs, targets, assignments):
gious_dist = 1 - outputs["gious"]
# # Non vectorized version
# giou_loss = torch.zeros(1, device=gious_dist.device).squeeze()
# assign = assignments["assignments"]
# for b in range(gious_dist.shape[0]):
# if len(assign[b]) > 0:
# giou_loss += gious_dist[b, assign[b][0], assign[b][1]].sum()
# select appropriate gious by using proposal to gt matching
giou_loss = torch.gather(
gious_dist, 2, assignments["per_prop_gt_inds"].unsqueeze(-1)
).squeeze(-1)
# zero-out non-matched proposals
giou_loss = giou_loss * assignments["proposal_matched_mask"]
giou_loss = giou_loss.sum()
if targets["num_boxes"] > 0:
giou_loss /= targets["num_boxes"]
return {"loss_giou": giou_loss}
def loss_size(self, outputs, targets, assignments):
gt_box_sizes = targets["gt_box_sizes_normalized"]
pred_box_sizes = outputs["size_normalized"]
if targets["num_boxes_replica"] > 0:
# # Non vectorized version
# p_sizes = []
# t_sizes = []
# assign = assignments["assignments"]
# for b in range(pred_box_sizes.shape[0]):
# if len(assign[b]) > 0:
# p_sizes.append(pred_box_sizes[b, assign[b][0]])
# t_sizes.append(gt_box_sizes[b, assign[b][1]])
# p_sizes = torch.cat(p_sizes)
# t_sizes = torch.cat(t_sizes)
# size_loss = F.l1_loss(p_sizes, t_sizes, reduction="sum")
# construct gt_box_sizes as [batch x nprop x 3] matrix by using proposal to gt matching
gt_box_sizes = torch.stack(
[
torch.gather(
gt_box_sizes[:, :, x], 1, assignments["per_prop_gt_inds"]
)
for x in range(gt_box_sizes.shape[-1])
],
dim=-1,
)
size_loss = F.l1_loss(pred_box_sizes, gt_box_sizes, reduction="none").sum(
dim=-1
)
# zero-out non-matched proposals
size_loss *= assignments["proposal_matched_mask"]
size_loss = size_loss.sum()
size_loss /= targets["num_boxes"]
else:
size_loss = torch.zeros(1, device=pred_box_sizes.device).squeeze()
return {"loss_size": size_loss}
def single_output_forward(self, outputs, targets):
gious = generalized_box3d_iou(
outputs["box_corners"],
targets["gt_box_corners"],
targets["nactual_gt"],
rotated_boxes=torch.any(targets["gt_box_angles"] > 0).item(),
needs_grad=(self.loss_weight_dict["loss_giou_weight"] > 0),
)
outputs["gious"] = gious
center_dist = torch.cdist(
outputs["center_normalized"], targets["gt_box_centers_normalized"], p=1
)
outputs["center_dist"] = center_dist
assignments = self.matcher(outputs, targets)
losses = {}
for k in self.loss_functions:
loss_wt_key = k + "_weight"
if (
loss_wt_key in self.loss_weight_dict
and self.loss_weight_dict[loss_wt_key] > 0
) or loss_wt_key not in self.loss_weight_dict:
# only compute losses with loss_wt > 0
# certain losses like cardinality are only logged and have no loss weight
curr_loss = self.loss_functions[k](outputs, targets, assignments)
losses.update(curr_loss)
final_loss = 0
for k in self.loss_weight_dict:
if self.loss_weight_dict[k] > 0:
losses[k.replace("_weight", "")] *= self.loss_weight_dict[k]
final_loss += losses[k.replace("_weight", "")]
return final_loss, losses
def forward(self, outputs, targets):
nactual_gt = targets["gt_box_present"].sum(axis=1).long()
num_boxes = torch.clamp(all_reduce_average(nactual_gt.sum()), min=1).item()
targets["nactual_gt"] = nactual_gt
targets["num_boxes"] = num_boxes
targets[
"num_boxes_replica"
] = nactual_gt.sum().item() # number of boxes on this worker for dist training
loss, loss_dict = self.single_output_forward(outputs["outputs"], targets)
if "aux_outputs" in outputs:
for k in range(len(outputs["aux_outputs"])):
interm_loss, interm_loss_dict = self.single_output_forward(
outputs["aux_outputs"][k], targets
)
loss += interm_loss
for interm_key in interm_loss_dict:
loss_dict[f"{interm_key}_{k}"] = interm_loss_dict[interm_key]
return loss, loss_dict
def build_criterion(args, dataset_config):
matcher = Matcher(
cost_class=args.matcher_cls_cost,
cost_giou=args.matcher_giou_cost,
cost_center=args.matcher_center_cost,
cost_objectness=args.matcher_objectness_cost,
)
loss_weight_dict = {
"loss_giou_weight": args.loss_giou_weight,
"loss_sem_cls_weight": args.loss_sem_cls_weight,
"loss_no_object_weight": args.loss_no_object_weight,
"loss_angle_cls_weight": args.loss_angle_cls_weight,
"loss_angle_reg_weight": args.loss_angle_reg_weight,
"loss_center_weight": args.loss_center_weight,
"loss_size_weight": args.loss_size_weight,
}
criterion = SetCriterion(matcher, dataset_config, loss_weight_dict)
return criterion
|
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
import sys
import pickle
import numpy as np
import torch
from torch.multiprocessing import set_start_method
from torch.utils.data import DataLoader, DistributedSampler
# 3DETR codebase specific imports
from datasets import build_dataset
from engine import evaluate, train_one_epoch
from models import build_model
from optimizer import build_optimizer
from criterion import build_criterion
from utils.dist import init_distributed, is_distributed, is_primary, get_rank, barrier
from utils.misc import my_worker_init_fn
from utils.io import save_checkpoint, resume_if_possible
from utils.logger import Logger
def make_args_parser():
parser = argparse.ArgumentParser("3D Detection Using Transformers", add_help=False)
##### Optimizer #####
parser.add_argument("--base_lr", default=5e-4, type=float)
parser.add_argument("--warm_lr", default=1e-6, type=float)
parser.add_argument("--warm_lr_epochs", default=9, type=int)
parser.add_argument("--final_lr", default=1e-6, type=float)
parser.add_argument("--lr_scheduler", default="cosine", type=str)
parser.add_argument("--weight_decay", default=0.1, type=float)
parser.add_argument("--filter_biases_wd", default=False, action="store_true")
parser.add_argument(
"--clip_gradient", default=0.1, type=float, help="Max L2 norm of the gradient"
)
##### Model #####
parser.add_argument(
"--model_name",
default="3detr",
type=str,
help="Name of the model",
choices=["3detr"],
)
### Encoder
parser.add_argument(
"--enc_type", default="vanilla", choices=["masked", "maskedv2", "vanilla"]
)
# Below options are only valid for vanilla encoder
parser.add_argument("--enc_nlayers", default=3, type=int)
parser.add_argument("--enc_dim", default=256, type=int)
parser.add_argument("--enc_ffn_dim", default=128, type=int)
parser.add_argument("--enc_dropout", default=0.1, type=float)
parser.add_argument("--enc_nhead", default=4, type=int)
parser.add_argument("--enc_pos_embed", default=None, type=str)
parser.add_argument("--enc_activation", default="relu", type=str)
### Decoder
parser.add_argument("--dec_nlayers", default=8, type=int)
parser.add_argument("--dec_dim", default=256, type=int)
parser.add_argument("--dec_ffn_dim", default=256, type=int)
parser.add_argument("--dec_dropout", default=0.1, type=float)
parser.add_argument("--dec_nhead", default=4, type=int)
### MLP heads for predicting bounding boxes
parser.add_argument("--mlp_dropout", default=0.3, type=float)
parser.add_argument(
"--nsemcls",
default=-1,
type=int,
help="Number of semantic object classes. Can be inferred from dataset",
)
### Other model params
parser.add_argument("--preenc_npoints", default=2048, type=int)
parser.add_argument(
"--pos_embed", default="fourier", type=str, choices=["fourier", "sine"]
)
parser.add_argument("--nqueries", default=256, type=int)
parser.add_argument("--use_color", default=False, action="store_true")
##### Set Loss #####
### Matcher
parser.add_argument("--matcher_giou_cost", default=2, type=float)
parser.add_argument("--matcher_cls_cost", default=1, type=float)
parser.add_argument("--matcher_center_cost", default=0, type=float)
parser.add_argument("--matcher_objectness_cost", default=0, type=float)
### Loss Weights
parser.add_argument("--loss_giou_weight", default=0, type=float)
parser.add_argument("--loss_sem_cls_weight", default=1, type=float)
parser.add_argument(
"--loss_no_object_weight", default=0.2, type=float
) # "no object" or "background" class for detection
parser.add_argument("--loss_angle_cls_weight", default=0.1, type=float)
parser.add_argument("--loss_angle_reg_weight", default=0.5, type=float)
parser.add_argument("--loss_center_weight", default=5.0, type=float)
parser.add_argument("--loss_size_weight", default=1.0, type=float)
##### Dataset #####
parser.add_argument(
"--dataset_name", required=True, type=str, choices=["scannet", "sunrgbd"]
)
parser.add_argument(
"--dataset_root_dir",
type=str,
default=None,
help="Root directory containing the dataset files. \
If None, default values from scannet.py/sunrgbd.py are used",
)
parser.add_argument(
"--meta_data_dir",
type=str,
default=None,
help="Root directory containing the metadata files. \
If None, default values from scannet.py/sunrgbd.py are used",
)
parser.add_argument("--dataset_num_workers", default=4, type=int)
parser.add_argument("--batchsize_per_gpu", default=8, type=int)
##### Training #####
parser.add_argument("--start_epoch", default=-1, type=int)
parser.add_argument("--max_epoch", default=720, type=int)
parser.add_argument("--eval_every_epoch", default=10, type=int)
parser.add_argument("--seed", default=0, type=int)
##### Testing #####
parser.add_argument("--test_only", default=False, action="store_true")
parser.add_argument("--test_ckpt", default=None, type=str)
##### I/O #####
parser.add_argument("--checkpoint_dir", default=None, type=str)
parser.add_argument("--log_every", default=10, type=int)
parser.add_argument("--log_metrics_every", default=20, type=int)
parser.add_argument("--save_separate_checkpoint_every_epoch", default=100, type=int)
##### Distributed Training #####
parser.add_argument("--ngpus", default=1, type=int)
parser.add_argument("--dist_url", default="tcp://localhost:12345", type=str)
return parser
def do_train(
args,
model,
model_no_ddp,
optimizer,
criterion,
dataset_config,
dataloaders,
best_val_metrics,
):
"""
Main training loop.
This trains the model for `args.max_epoch` epochs and tests the model after every `args.eval_every_epoch`.
We always evaluate the final checkpoint and report both the final AP and best AP on the val set.
"""
num_iters_per_epoch = len(dataloaders["train"])
num_iters_per_eval_epoch = len(dataloaders["test"])
print(f"Model is {model}")
print(f"Training started at epoch {args.start_epoch} until {args.max_epoch}.")
print(f"One training epoch = {num_iters_per_epoch} iters.")
print(f"One eval epoch = {num_iters_per_eval_epoch} iters.")
final_eval = os.path.join(args.checkpoint_dir, "final_eval.txt")
final_eval_pkl = os.path.join(args.checkpoint_dir, "final_eval.pkl")
if os.path.isfile(final_eval):
print(f"Found final eval file {final_eval}. Skipping training.")
return
logger = Logger(args.checkpoint_dir)
for epoch in range(args.start_epoch, args.max_epoch):
if is_distributed():
dataloaders["train_sampler"].set_epoch(epoch)
aps = train_one_epoch(
args,
epoch,
model,
optimizer,
criterion,
dataset_config,
dataloaders["train"],
logger,
)
# latest checkpoint is always stored in checkpoint.pth
save_checkpoint(
args.checkpoint_dir,
model_no_ddp,
optimizer,
epoch,
args,
best_val_metrics,
filename="checkpoint.pth",
)
metrics = aps.compute_metrics()
metric_str = aps.metrics_to_str(metrics, per_class=False)
metrics_dict = aps.metrics_to_dict(metrics)
curr_iter = epoch * len(dataloaders["train"])
if is_primary():
print("==" * 10)
print(f"Epoch [{epoch}/{args.max_epoch}]; Metrics {metric_str}")
print("==" * 10)
logger.log_scalars(metrics_dict, curr_iter, prefix="Train/")
if (
epoch > 0
and args.save_separate_checkpoint_every_epoch > 0
and epoch % args.save_separate_checkpoint_every_epoch == 0
):
# separate checkpoints are stored as checkpoint_{epoch}.pth
save_checkpoint(
args.checkpoint_dir,
model_no_ddp,
optimizer,
epoch,
args,
best_val_metrics,
)
if epoch % args.eval_every_epoch == 0 or epoch == (args.max_epoch - 1):
ap_calculator = evaluate(
args,
epoch,
model,
criterion,
dataset_config,
dataloaders["test"],
logger,
curr_iter,
)
metrics = ap_calculator.compute_metrics()
ap25 = metrics[0.25]["mAP"]
metric_str = ap_calculator.metrics_to_str(metrics, per_class=True)
metrics_dict = ap_calculator.metrics_to_dict(metrics)
if is_primary():
print("==" * 10)
print(f"Evaluate Epoch [{epoch}/{args.max_epoch}]; Metrics {metric_str}")
print("==" * 10)
logger.log_scalars(metrics_dict, curr_iter, prefix="Test/")
if is_primary() and (
len(best_val_metrics) == 0 or best_val_metrics[0.25]["mAP"] < ap25
):
best_val_metrics = metrics
filename = "checkpoint_best.pth"
save_checkpoint(
args.checkpoint_dir,
model_no_ddp,
optimizer,
epoch,
args,
best_val_metrics,
filename=filename,
)
print(
f"Epoch [{epoch}/{args.max_epoch}] saved current best val checkpoint at {filename}; ap25 {ap25}"
)
# always evaluate last checkpoint
epoch = args.max_epoch - 1
curr_iter = epoch * len(dataloaders["train"])
ap_calculator = evaluate(
args,
epoch,
model,
criterion,
dataset_config,
dataloaders["test"],
logger,
curr_iter,
)
metrics = ap_calculator.compute_metrics()
metric_str = ap_calculator.metrics_to_str(metrics)
if is_primary():
print("==" * 10)
print(f"Evaluate Final [{epoch}/{args.max_epoch}]; Metrics {metric_str}")
print("==" * 10)
with open(final_eval, "w") as fh:
fh.write("Training Finished.\n")
fh.write("==" * 10)
fh.write("Final Eval Numbers.\n")
fh.write(metric_str)
fh.write("\n")
fh.write("==" * 10)
fh.write("Best Eval Numbers.\n")
fh.write(ap_calculator.metrics_to_str(best_val_metrics))
fh.write("\n")
with open(final_eval_pkl, "wb") as fh:
pickle.dump(metrics, fh)
def test_model(args, model, model_no_ddp, criterion, dataset_config, dataloaders):
if args.test_ckpt is None or not os.path.isfile(args.test_ckpt):
f"Please specify a test checkpoint using --test_ckpt. Found invalid value {args.test_ckpt}"
sys.exit(1)
sd = torch.load(args.test_ckpt, map_location=torch.device("cpu"))
model_no_ddp.load_state_dict(sd["model"])
logger = Logger()
criterion = None # do not compute loss for speed-up; Comment out to see test loss
epoch = -1
curr_iter = 0
ap_calculator = evaluate(
args,
epoch,
model,
criterion,
dataset_config,
dataloaders["test"],
logger,
curr_iter,
)
metrics = ap_calculator.compute_metrics()
metric_str = ap_calculator.metrics_to_str(metrics)
if is_primary():
print("==" * 10)
print(f"Test model; Metrics {metric_str}")
print("==" * 10)
def main(local_rank, args):
if args.ngpus > 1:
print(
"Initializing Distributed Training. This is in BETA mode and hasn't been tested thoroughly. Use at your own risk :)"
)
print("To get the maximum speed-up consider reducing evaluations on val set by setting --eval_every_epoch to greater than 50")
init_distributed(
local_rank,
global_rank=local_rank,
world_size=args.ngpus,
dist_url=args.dist_url,
dist_backend="nccl",
)
print(f"Called with args: {args}")
torch.cuda.set_device(local_rank)
np.random.seed(args.seed + get_rank())
torch.manual_seed(args.seed + get_rank())
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed + get_rank())
datasets, dataset_config = build_dataset(args)
model, _ = build_model(args, dataset_config)
model = model.cuda(local_rank)
model_no_ddp = model
if is_distributed():
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank]
)
criterion = build_criterion(args, dataset_config)
criterion = criterion.cuda(local_rank)
dataloaders = {}
if args.test_only:
dataset_splits = ["test"]
else:
dataset_splits = ["train", "test"]
for split in dataset_splits:
if split == "train":
shuffle = True
else:
shuffle = False
if is_distributed():
sampler = DistributedSampler(datasets[split], shuffle=shuffle)
elif shuffle:
sampler = torch.utils.data.RandomSampler(datasets[split])
else:
sampler = torch.utils.data.SequentialSampler(datasets[split])
dataloaders[split] = DataLoader(
datasets[split],
sampler=sampler,
batch_size=args.batchsize_per_gpu,
num_workers=args.dataset_num_workers,
worker_init_fn=my_worker_init_fn,
)
dataloaders[split + "_sampler"] = sampler
if args.test_only:
criterion = None # faster evaluation
test_model(args, model, model_no_ddp, criterion, dataset_config, dataloaders)
else:
assert (
args.checkpoint_dir is not None
), f"Please specify a checkpoint dir using --checkpoint_dir"
if is_primary() and not os.path.isdir(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir, exist_ok=True)
optimizer = build_optimizer(args, model_no_ddp)
loaded_epoch, best_val_metrics = resume_if_possible(
args.checkpoint_dir, model_no_ddp, optimizer
)
args.start_epoch = loaded_epoch + 1
do_train(
args,
model,
model_no_ddp,
optimizer,
criterion,
dataset_config,
dataloaders,
best_val_metrics,
)
def launch_distributed(args):
world_size = args.ngpus
if world_size == 1:
main(local_rank=0, args=args)
else:
torch.multiprocessing.spawn(main, nprocs=world_size, args=(args,))
if __name__ == "__main__":
parser = make_args_parser()
args = parser.parse_args()
try:
set_start_method("spawn")
except RuntimeError:
pass
launch_distributed(args)
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Modified from https://github.com/facebookresearch/votenet
Dataset for 3D object detection on SUN RGB-D (with support of vote supervision).
A sunrgbd oriented bounding box is parameterized by (cx,cy,cz), (l,w,h) -- (dx,dy,dz) in upright depth coord
(Z is up, Y is forward, X is right ward), heading angle (from +X rotating to -Y) and semantic class
Point clouds are in **upright_depth coordinate (X right, Y forward, Z upward)**
Return heading class, heading residual, size class and size residual for 3D bounding boxes.
Oriented bounding box is parameterized by (cx,cy,cz), (l,w,h), heading_angle and semantic class label.
(cx,cy,cz) is in upright depth coordinate
(l,h,w) are *half length* of the object sizes
The heading angle is a rotation rad from +X rotating towards -Y. (+X is 0, -Y is pi/2)
Author: Charles R. Qi
Date: 2019
"""
import os
import sys
import numpy as np
from torch.utils.data import Dataset
import scipy.io as sio # to load .mat files for depth points
import utils.pc_util as pc_util
from utils.random_cuboid import RandomCuboid
from utils.pc_util import shift_scale_points, scale_points
from utils.box_util import (
flip_axis_to_camera_tensor,
get_3d_box_batch_tensor,
flip_axis_to_camera_np,
get_3d_box_batch_np,
)
MEAN_COLOR_RGB = np.array([0.5, 0.5, 0.5]) # sunrgbd color is in 0~1
DATA_PATH_V1 = "" ## Replace with path to dataset
DATA_PATH_V2 = "" ## Not used in the codebase.
class SunrgbdDatasetConfig(object):
def __init__(self):
self.num_semcls = 10
self.num_angle_bin = 12
self.max_num_obj = 64
self.type2class = {
"bed": 0,
"table": 1,
"sofa": 2,
"chair": 3,
"toilet": 4,
"desk": 5,
"dresser": 6,
"night_stand": 7,
"bookshelf": 8,
"bathtub": 9,
}
self.class2type = {self.type2class[t]: t for t in self.type2class}
self.type2onehotclass = {
"bed": 0,
"table": 1,
"sofa": 2,
"chair": 3,
"toilet": 4,
"desk": 5,
"dresser": 6,
"night_stand": 7,
"bookshelf": 8,
"bathtub": 9,
}
def angle2class(self, angle):
"""Convert continuous angle to discrete class
[optinal] also small regression number from
class center angle to current angle.
angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N)
returns class [0,1,...,N-1] and a residual number such that
class*(2pi/N) + number = angle
"""
num_class = self.num_angle_bin
angle = angle % (2 * np.pi)
assert angle >= 0 and angle <= 2 * np.pi
angle_per_class = 2 * np.pi / float(num_class)
shifted_angle = (angle + angle_per_class / 2) % (2 * np.pi)
class_id = int(shifted_angle / angle_per_class)
residual_angle = shifted_angle - (
class_id * angle_per_class + angle_per_class / 2
)
return class_id, residual_angle
def class2angle(self, pred_cls, residual, to_label_format=True):
"""Inverse function to angle2class"""
num_class = self.num_angle_bin
angle_per_class = 2 * np.pi / float(num_class)
angle_center = pred_cls * angle_per_class
angle = angle_center + residual
if to_label_format and angle > np.pi:
angle = angle - 2 * np.pi
return angle
def class2angle_batch(self, pred_cls, residual, to_label_format=True):
num_class = self.num_angle_bin
angle_per_class = 2 * np.pi / float(num_class)
angle_center = pred_cls * angle_per_class
angle = angle_center + residual
if to_label_format:
mask = angle > np.pi
angle[mask] = angle[mask] - 2 * np.pi
return angle
def class2anglebatch_tensor(self, pred_cls, residual, to_label_format=True):
return self.class2angle_batch(pred_cls, residual, to_label_format)
def box_parametrization_to_corners(self, box_center_unnorm, box_size, box_angle):
box_center_upright = flip_axis_to_camera_tensor(box_center_unnorm)
boxes = get_3d_box_batch_tensor(box_size, box_angle, box_center_upright)
return boxes
def box_parametrization_to_corners_np(self, box_center_unnorm, box_size, box_angle):
box_center_upright = flip_axis_to_camera_np(box_center_unnorm)
boxes = get_3d_box_batch_np(box_size, box_angle, box_center_upright)
return boxes
def my_compute_box_3d(self, center, size, heading_angle):
R = pc_util.rotz(-1 * heading_angle)
l, w, h = size
x_corners = [-l, l, l, -l, -l, l, l, -l]
y_corners = [w, w, -w, -w, w, w, -w, -w]
z_corners = [h, h, h, h, -h, -h, -h, -h]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0, :] += center[0]
corners_3d[1, :] += center[1]
corners_3d[2, :] += center[2]
return np.transpose(corners_3d)
class SunrgbdDetectionDataset(Dataset):
def __init__(
self,
dataset_config,
split_set="train",
root_dir=None,
num_points=20000,
use_color=False,
use_height=False,
use_v1=True,
augment=False,
use_random_cuboid=True,
random_cuboid_min_points=30000,
):
assert num_points <= 50000
assert split_set in ["train", "val", "trainval"]
self.dataset_config = dataset_config
self.use_v1 = use_v1
if root_dir is None:
root_dir = DATA_PATH_V1 if use_v1 else DATA_PATH_V2
self.data_path = root_dir + "_%s" % (split_set)
if split_set in ["train", "val"]:
self.scan_names = sorted(
list(
set([os.path.basename(x)[0:6] for x in os.listdir(self.data_path)])
)
)
elif split_set in ["trainval"]:
# combine names from both
sub_splits = ["train", "val"]
all_paths = []
for sub_split in sub_splits:
data_path = self.data_path.replace("trainval", sub_split)
basenames = sorted(
list(set([os.path.basename(x)[0:6] for x in os.listdir(data_path)]))
)
basenames = [os.path.join(data_path, x) for x in basenames]
all_paths.extend(basenames)
all_paths.sort()
self.scan_names = all_paths
self.num_points = num_points
self.augment = augment
self.use_color = use_color
self.use_height = use_height
self.use_random_cuboid = use_random_cuboid
self.random_cuboid_augmentor = RandomCuboid(
min_points=random_cuboid_min_points,
aspect=0.75,
min_crop=0.75,
max_crop=1.0,
)
self.center_normalizing_range = [
np.zeros((1, 3), dtype=np.float32),
np.ones((1, 3), dtype=np.float32),
]
self.max_num_obj = 64
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
scan_name = self.scan_names[idx]
if scan_name.startswith("/"):
scan_path = scan_name
else:
scan_path = os.path.join(self.data_path, scan_name)
point_cloud = np.load(scan_path + "_pc.npz")["pc"] # Nx6
bboxes = np.load(scan_path + "_bbox.npy") # K,8
if not self.use_color:
point_cloud = point_cloud[:, 0:3]
else:
assert point_cloud.shape[1] == 6
point_cloud = point_cloud[:, 0:6]
point_cloud[:, 3:] = point_cloud[:, 3:] - MEAN_COLOR_RGB
if self.use_height:
floor_height = np.percentile(point_cloud[:, 2], 0.99)
height = point_cloud[:, 2] - floor_height
point_cloud = np.concatenate(
[point_cloud, np.expand_dims(height, 1)], 1
) # (N,4) or (N,7)
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:, 0] = -1 * point_cloud[:, 0]
bboxes[:, 0] = -1 * bboxes[:, 0]
bboxes[:, 6] = np.pi - bboxes[:, 6]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random() * np.pi / 3) - np.pi / 6 # -30 ~ +30 degree
rot_mat = pc_util.rotz(rot_angle)
point_cloud[:, 0:3] = np.dot(point_cloud[:, 0:3], np.transpose(rot_mat))
bboxes[:, 0:3] = np.dot(bboxes[:, 0:3], np.transpose(rot_mat))
bboxes[:, 6] -= rot_angle
# Augment RGB color
if self.use_color:
rgb_color = point_cloud[:, 3:6] + MEAN_COLOR_RGB
rgb_color *= (
1 + 0.4 * np.random.random(3) - 0.2
) # brightness change for each channel
rgb_color += (
0.1 * np.random.random(3) - 0.05
) # color shift for each channel
rgb_color += np.expand_dims(
(0.05 * np.random.random(point_cloud.shape[0]) - 0.025), -1
) # jittering on each pixel
rgb_color = np.clip(rgb_color, 0, 1)
# randomly drop out 30% of the points' colors
rgb_color *= np.expand_dims(
np.random.random(point_cloud.shape[0]) > 0.3, -1
)
point_cloud[:, 3:6] = rgb_color - MEAN_COLOR_RGB
# Augment point cloud scale: 0.85x-1.15x
scale_ratio = np.random.random() * 0.3 + 0.85
scale_ratio = np.expand_dims(np.tile(scale_ratio, 3), 0)
point_cloud[:, 0:3] *= scale_ratio
bboxes[:, 0:3] *= scale_ratio
bboxes[:, 3:6] *= scale_ratio
if self.use_height:
point_cloud[:, -1] *= scale_ratio[0, 0]
if self.use_random_cuboid:
point_cloud, bboxes, _ = self.random_cuboid_augmentor(
point_cloud, bboxes
)
# ------------------------------- LABELS ------------------------------
angle_classes = np.zeros((self.max_num_obj,), dtype=np.float32)
angle_residuals = np.zeros((self.max_num_obj,), dtype=np.float32)
raw_angles = np.zeros((self.max_num_obj,), dtype=np.float32)
raw_sizes = np.zeros((self.max_num_obj, 3), dtype=np.float32)
label_mask = np.zeros((self.max_num_obj))
label_mask[0 : bboxes.shape[0]] = 1
max_bboxes = np.zeros((self.max_num_obj, 8))
max_bboxes[0 : bboxes.shape[0], :] = bboxes
target_bboxes_mask = label_mask
target_bboxes = np.zeros((self.max_num_obj, 6))
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
semantic_class = bbox[7]
raw_angles[i] = bbox[6] % 2 * np.pi
box3d_size = bbox[3:6] * 2
raw_sizes[i, :] = box3d_size
angle_class, angle_residual = self.dataset_config.angle2class(bbox[6])
angle_classes[i] = angle_class
angle_residuals[i] = angle_residual
corners_3d = self.dataset_config.my_compute_box_3d(
bbox[0:3], bbox[3:6], bbox[6]
)
# compute axis aligned box
xmin = np.min(corners_3d[:, 0])
ymin = np.min(corners_3d[:, 1])
zmin = np.min(corners_3d[:, 2])
xmax = np.max(corners_3d[:, 0])
ymax = np.max(corners_3d[:, 1])
zmax = np.max(corners_3d[:, 2])
target_bbox = np.array(
[
(xmin + xmax) / 2,
(ymin + ymax) / 2,
(zmin + zmax) / 2,
xmax - xmin,
ymax - ymin,
zmax - zmin,
]
)
target_bboxes[i, :] = target_bbox
point_cloud, choices = pc_util.random_sampling(
point_cloud, self.num_points, return_choices=True
)
point_cloud_dims_min = point_cloud.min(axis=0)
point_cloud_dims_max = point_cloud.max(axis=0)
mult_factor = point_cloud_dims_max - point_cloud_dims_min
box_sizes_normalized = scale_points(
raw_sizes.astype(np.float32)[None, ...],
mult_factor=1.0 / mult_factor[None, ...],
)
box_sizes_normalized = box_sizes_normalized.squeeze(0)
box_centers = target_bboxes.astype(np.float32)[:, 0:3]
box_centers_normalized = shift_scale_points(
box_centers[None, ...],
src_range=[
point_cloud_dims_min[None, ...],
point_cloud_dims_max[None, ...],
],
dst_range=self.center_normalizing_range,
)
box_centers_normalized = box_centers_normalized.squeeze(0)
box_centers_normalized = box_centers_normalized * target_bboxes_mask[..., None]
# re-encode angles to be consistent with VoteNet eval
angle_classes = angle_classes.astype(np.int64)
angle_residuals = angle_residuals.astype(np.float32)
raw_angles = self.dataset_config.class2angle_batch(
angle_classes, angle_residuals
)
box_corners = self.dataset_config.box_parametrization_to_corners_np(
box_centers[None, ...],
raw_sizes.astype(np.float32)[None, ...],
raw_angles.astype(np.float32)[None, ...],
)
box_corners = box_corners.squeeze(0)
ret_dict = {}
ret_dict["point_clouds"] = point_cloud.astype(np.float32)
ret_dict["gt_box_corners"] = box_corners.astype(np.float32)
ret_dict["gt_box_centers"] = box_centers.astype(np.float32)
ret_dict["gt_box_centers_normalized"] = box_centers_normalized.astype(
np.float32
)
target_bboxes_semcls = np.zeros((self.max_num_obj))
target_bboxes_semcls[0 : bboxes.shape[0]] = bboxes[:, -1] # from 0 to 9
ret_dict["gt_box_sem_cls_label"] = target_bboxes_semcls.astype(np.int64)
ret_dict["gt_box_present"] = target_bboxes_mask.astype(np.float32)
ret_dict["scan_idx"] = np.array(idx).astype(np.int64)
ret_dict["gt_box_sizes"] = raw_sizes.astype(np.float32)
ret_dict["gt_box_sizes_normalized"] = box_sizes_normalized.astype(np.float32)
ret_dict["gt_box_angles"] = raw_angles.astype(np.float32)
ret_dict["gt_angle_class_label"] = angle_classes
ret_dict["gt_angle_residual_label"] = angle_residuals
ret_dict["point_cloud_dims_min"] = point_cloud_dims_min
ret_dict["point_cloud_dims_max"] = point_cloud_dims_max
return ret_dict
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .scannet import ScannetDetectionDataset, ScannetDatasetConfig
from .sunrgbd import SunrgbdDetectionDataset, SunrgbdDatasetConfig
DATASET_FUNCTIONS = {
"scannet": [ScannetDetectionDataset, ScannetDatasetConfig],
"sunrgbd": [SunrgbdDetectionDataset, SunrgbdDatasetConfig],
}
def build_dataset(args):
dataset_builder = DATASET_FUNCTIONS[args.dataset_name][0]
dataset_config = DATASET_FUNCTIONS[args.dataset_name][1]()
dataset_dict = {
"train": dataset_builder(
dataset_config,
split_set="train",
root_dir=args.dataset_root_dir,
meta_data_dir=args.meta_data_dir,
use_color=args.use_color,
augment=True
),
"test": dataset_builder(
dataset_config,
split_set="val",
root_dir=args.dataset_root_dir,
use_color=args.use_color,
augment=False
),
}
return dataset_dict, dataset_config
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Modified from https://github.com/facebookresearch/votenet
Dataset for object bounding box regression.
An axis aligned bounding box is parameterized by (cx,cy,cz) and (dx,dy,dz)
where (cx,cy,cz) is the center point of the box, dx is the x-axis length of the box.
"""
import os
import sys
import numpy as np
import torch
import utils.pc_util as pc_util
from torch.utils.data import Dataset
from utils.box_util import (flip_axis_to_camera_np, flip_axis_to_camera_tensor,
get_3d_box_batch_np, get_3d_box_batch_tensor)
from utils.pc_util import scale_points, shift_scale_points
from utils.random_cuboid import RandomCuboid
IGNORE_LABEL = -100
MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8])
DATASET_ROOT_DIR = "" ## Replace with path to dataset
DATASET_METADATA_DIR = "" ## Replace with path to dataset
class ScannetDatasetConfig(object):
def __init__(self):
self.num_semcls = 18
self.num_angle_bin = 1
self.max_num_obj = 64
self.type2class = {
"cabinet": 0,
"bed": 1,
"chair": 2,
"sofa": 3,
"table": 4,
"door": 5,
"window": 6,
"bookshelf": 7,
"picture": 8,
"counter": 9,
"desk": 10,
"curtain": 11,
"refrigerator": 12,
"showercurtrain": 13,
"toilet": 14,
"sink": 15,
"bathtub": 16,
"garbagebin": 17,
}
self.class2type = {self.type2class[t]: t for t in self.type2class}
self.nyu40ids = np.array(
[3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
)
self.nyu40id2class = {
nyu40id: i for i, nyu40id in enumerate(list(self.nyu40ids))
}
# Semantic Segmentation Classes. Not used in 3DETR
self.num_class_semseg = 20
self.type2class_semseg = {
"wall": 0,
"floor": 1,
"cabinet": 2,
"bed": 3,
"chair": 4,
"sofa": 5,
"table": 6,
"door": 7,
"window": 8,
"bookshelf": 9,
"picture": 10,
"counter": 11,
"desk": 12,
"curtain": 13,
"refrigerator": 14,
"showercurtrain": 15,
"toilet": 16,
"sink": 17,
"bathtub": 18,
"garbagebin": 19,
}
self.class2type_semseg = {
self.type2class_semseg[t]: t for t in self.type2class_semseg
}
self.nyu40ids_semseg = np.array(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
)
self.nyu40id2class_semseg = {
nyu40id: i for i, nyu40id in enumerate(list(self.nyu40ids_semseg))
}
def angle2class(self, angle):
raise ValueError("ScanNet does not have rotated bounding boxes.")
def class2anglebatch_tensor(self, pred_cls, residual, to_label_format=True):
zero_angle = torch.zeros(
(pred_cls.shape[0], pred_cls.shape[1]),
dtype=torch.float32,
device=pred_cls.device,
)
return zero_angle
def class2anglebatch(self, pred_cls, residual, to_label_format=True):
zero_angle = np.zeros(pred_cls.shape[0], dtype=np.float32)
return zero_angle
def param2obb(
self,
center,
heading_class,
heading_residual,
size_class,
size_residual,
box_size=None,
):
heading_angle = self.class2angle(heading_class, heading_residual)
if box_size is None:
box_size = self.class2size(int(size_class), size_residual)
obb = np.zeros((7,))
obb[0:3] = center
obb[3:6] = box_size
obb[6] = heading_angle * -1
return obb
def box_parametrization_to_corners(self, box_center_unnorm, box_size, box_angle):
box_center_upright = flip_axis_to_camera_tensor(box_center_unnorm)
boxes = get_3d_box_batch_tensor(box_size, box_angle, box_center_upright)
return boxes
def box_parametrization_to_corners_np(self, box_center_unnorm, box_size, box_angle):
box_center_upright = flip_axis_to_camera_np(box_center_unnorm)
boxes = get_3d_box_batch_np(box_size, box_angle, box_center_upright)
return boxes
@staticmethod
def rotate_aligned_boxes(input_boxes, rot_mat):
centers, lengths = input_boxes[:, 0:3], input_boxes[:, 3:6]
new_centers = np.dot(centers, np.transpose(rot_mat))
dx, dy = lengths[:, 0] / 2.0, lengths[:, 1] / 2.0
new_x = np.zeros((dx.shape[0], 4))
new_y = np.zeros((dx.shape[0], 4))
for i, crnr in enumerate([(-1, -1), (1, -1), (1, 1), (-1, 1)]):
crnrs = np.zeros((dx.shape[0], 3))
crnrs[:, 0] = crnr[0] * dx
crnrs[:, 1] = crnr[1] * dy
crnrs = np.dot(crnrs, np.transpose(rot_mat))
new_x[:, i] = crnrs[:, 0]
new_y[:, i] = crnrs[:, 1]
new_dx = 2.0 * np.max(new_x, 1)
new_dy = 2.0 * np.max(new_y, 1)
new_lengths = np.stack((new_dx, new_dy, lengths[:, 2]), axis=1)
return np.concatenate([new_centers, new_lengths], axis=1)
class ScannetDetectionDataset(Dataset):
def __init__(
self,
dataset_config,
split_set="train",
root_dir=None,
meta_data_dir=None,
num_points=40000,
use_color=False,
use_height=False,
augment=False,
use_random_cuboid=True,
random_cuboid_min_points=30000,
):
self.dataset_config = dataset_config
assert split_set in ["train", "val"]
if root_dir is None:
root_dir = DATASET_ROOT_DIR
if meta_data_dir is None:
meta_data_dir = DATASET_METADATA_DIR
self.data_path = root_dir
all_scan_names = list(
set(
[
os.path.basename(x)[0:12]
for x in os.listdir(self.data_path)
if x.startswith("scene")
]
)
)
if split_set == "all":
self.scan_names = all_scan_names
elif split_set in ["train", "val", "test"]:
split_filenames = os.path.join(meta_data_dir, f"scannetv2_{split_set}.txt")
with open(split_filenames, "r") as f:
self.scan_names = f.read().splitlines()
# remove unavailiable scans
num_scans = len(self.scan_names)
self.scan_names = [
sname for sname in self.scan_names if sname in all_scan_names
]
print(f"kept {len(self.scan_names)} scans out of {num_scans}")
else:
raise ValueError(f"Unknown split name {split_set}")
self.num_points = num_points
self.use_color = use_color
self.use_height = use_height
self.augment = augment
self.use_random_cuboid = use_random_cuboid
self.random_cuboid_augmentor = RandomCuboid(min_points=random_cuboid_min_points)
self.center_normalizing_range = [
np.zeros((1, 3), dtype=np.float32),
np.ones((1, 3), dtype=np.float32),
]
def __len__(self):
return len(self.scan_names)
def __getitem__(self, idx):
scan_name = self.scan_names[idx]
mesh_vertices = np.load(os.path.join(self.data_path, scan_name) + "_vert.npy")
instance_labels = np.load(
os.path.join(self.data_path, scan_name) + "_ins_label.npy"
)
semantic_labels = np.load(
os.path.join(self.data_path, scan_name) + "_sem_label.npy"
)
instance_bboxes = np.load(os.path.join(self.data_path, scan_name) + "_bbox.npy")
if not self.use_color:
point_cloud = mesh_vertices[:, 0:3] # do not use color for now
pcl_color = mesh_vertices[:, 3:6]
else:
point_cloud = mesh_vertices[:, 0:6]
point_cloud[:, 3:] = (point_cloud[:, 3:] - MEAN_COLOR_RGB) / 256.0
pcl_color = point_cloud[:, 3:]
if self.use_height:
floor_height = np.percentile(point_cloud[:, 2], 0.99)
height = point_cloud[:, 2] - floor_height
point_cloud = np.concatenate([point_cloud, np.expand_dims(height, 1)], 1)
# ------------------------------- LABELS ------------------------------
MAX_NUM_OBJ = self.dataset_config.max_num_obj
target_bboxes = np.zeros((MAX_NUM_OBJ, 6), dtype=np.float32)
target_bboxes_mask = np.zeros((MAX_NUM_OBJ), dtype=np.float32)
angle_classes = np.zeros((MAX_NUM_OBJ,), dtype=np.int64)
angle_residuals = np.zeros((MAX_NUM_OBJ,), dtype=np.float32)
raw_sizes = np.zeros((MAX_NUM_OBJ, 3), dtype=np.float32)
raw_angles = np.zeros((MAX_NUM_OBJ,), dtype=np.float32)
if self.augment and self.use_random_cuboid:
(
point_cloud,
instance_bboxes,
per_point_labels,
) = self.random_cuboid_augmentor(
point_cloud, instance_bboxes, [instance_labels, semantic_labels]
)
instance_labels = per_point_labels[0]
semantic_labels = per_point_labels[1]
point_cloud, choices = pc_util.random_sampling(
point_cloud, self.num_points, return_choices=True
)
instance_labels = instance_labels[choices]
semantic_labels = semantic_labels[choices]
sem_seg_labels = np.ones_like(semantic_labels) * IGNORE_LABEL
for _c in self.dataset_config.nyu40ids_semseg:
sem_seg_labels[
semantic_labels == _c
] = self.dataset_config.nyu40id2class_semseg[_c]
pcl_color = pcl_color[choices]
target_bboxes_mask[0 : instance_bboxes.shape[0]] = 1
target_bboxes[0 : instance_bboxes.shape[0], :] = instance_bboxes[:, 0:6]
# ------------------------------- DATA AUGMENTATION ------------------------------
if self.augment:
if np.random.random() > 0.5:
# Flipping along the YZ plane
point_cloud[:, 0] = -1 * point_cloud[:, 0]
target_bboxes[:, 0] = -1 * target_bboxes[:, 0]
if np.random.random() > 0.5:
# Flipping along the XZ plane
point_cloud[:, 1] = -1 * point_cloud[:, 1]
target_bboxes[:, 1] = -1 * target_bboxes[:, 1]
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random() * np.pi / 18) - np.pi / 36 # -5 ~ +5 degree
rot_mat = pc_util.rotz(rot_angle)
point_cloud[:, 0:3] = np.dot(point_cloud[:, 0:3], np.transpose(rot_mat))
target_bboxes = self.dataset_config.rotate_aligned_boxes(
target_bboxes, rot_mat
)
raw_sizes = target_bboxes[:, 3:6]
point_cloud_dims_min = point_cloud.min(axis=0)[:3]
point_cloud_dims_max = point_cloud.max(axis=0)[:3]
box_centers = target_bboxes.astype(np.float32)[:, 0:3]
box_centers_normalized = shift_scale_points(
box_centers[None, ...],
src_range=[
point_cloud_dims_min[None, ...],
point_cloud_dims_max[None, ...],
],
dst_range=self.center_normalizing_range,
)
box_centers_normalized = box_centers_normalized.squeeze(0)
box_centers_normalized = box_centers_normalized * target_bboxes_mask[..., None]
mult_factor = point_cloud_dims_max - point_cloud_dims_min
box_sizes_normalized = scale_points(
raw_sizes.astype(np.float32)[None, ...],
mult_factor=1.0 / mult_factor[None, ...],
)
box_sizes_normalized = box_sizes_normalized.squeeze(0)
box_corners = self.dataset_config.box_parametrization_to_corners_np(
box_centers[None, ...],
raw_sizes.astype(np.float32)[None, ...],
raw_angles.astype(np.float32)[None, ...],
)
box_corners = box_corners.squeeze(0)
ret_dict = {}
ret_dict["point_clouds"] = point_cloud.astype(np.float32)
ret_dict["gt_box_corners"] = box_corners.astype(np.float32)
ret_dict["gt_box_centers"] = box_centers.astype(np.float32)
ret_dict["gt_box_centers_normalized"] = box_centers_normalized.astype(
np.float32
)
ret_dict["gt_angle_class_label"] = angle_classes.astype(np.int64)
ret_dict["gt_angle_residual_label"] = angle_residuals.astype(np.float32)
target_bboxes_semcls = np.zeros((MAX_NUM_OBJ))
target_bboxes_semcls[0 : instance_bboxes.shape[0]] = [
self.dataset_config.nyu40id2class[int(x)]
for x in instance_bboxes[:, -1][0 : instance_bboxes.shape[0]]
]
ret_dict["gt_box_sem_cls_label"] = target_bboxes_semcls.astype(np.int64)
ret_dict["gt_box_present"] = target_bboxes_mask.astype(np.float32)
ret_dict["scan_idx"] = np.array(idx).astype(np.int64)
ret_dict["pcl_color"] = pcl_color
ret_dict["gt_box_sizes"] = raw_sizes.astype(np.float32)
ret_dict["gt_box_sizes_normalized"] = box_sizes_normalized.astype(np.float32)
ret_dict["gt_box_angles"] = raw_angles.astype(np.float32)
ret_dict["point_cloud_dims_min"] = point_cloud_dims_min.astype(np.float32)
ret_dict["point_cloud_dims_max"] = point_cloud_dims_max.astype(np.float32)
return ret_dict
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import numpy as np
from collections import deque
from typing import List
from utils.dist import is_distributed, barrier, all_reduce_sum
def my_worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
@torch.jit.ignore
def to_list_1d(arr) -> List[float]:
arr = arr.detach().cpu().numpy().tolist()
return arr
@torch.jit.ignore
def to_list_3d(arr) -> List[List[List[float]]]:
arr = arr.detach().cpu().numpy().tolist()
return arr
def huber_loss(error, delta=1.0):
"""
Ref: https://github.com/charlesq34/frustum-pointnets/blob/master/models/model_util.py
x = error = pred - gt or dist(pred,gt)
0.5 * |x|^2 if |x|<=d
0.5 * d^2 + d * (|x|-d) if |x|>d
"""
abs_error = torch.abs(error)
quadratic = torch.clamp(abs_error, max=delta)
linear = abs_error - quadratic
loss = 0.5 * quadratic ** 2 + delta * linear
return loss
# From https://github.com/facebookresearch/detr/blob/master/util/misc.py
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_distributed():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
barrier()
all_reduce_sum(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy as np
# hacky way to find numpy include path
# replace with actual path if this does not work
np_include_path = np.__file__.replace("__init__.py", "core/include/")
INCLUDE_PATH = [
np_include_path
]
setup(
ext_modules = cythonize(
Extension(
"box_intersection",
sources=["box_intersection.pyx"],
include_dirs=INCLUDE_PATH
)),
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
""" Generic Code for Object Detection Evaluation
Input:
For each class:
For each image:
Predictions: box, score
Groundtruths: box
Output:
For each class:
precision-recal and average precision
Author: Charles R. Qi
Ref: https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/lib/datasets/voc_eval.py
"""
import numpy as np
from utils.box_util import box3d_iou
def voc_ap(rec, prec, use_07_metric=False):
"""ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def get_iou_obb(bb1, bb2):
iou3d, iou2d = box3d_iou(bb1, bb2)
return iou3d
def get_iou_main(get_iou_func, args):
return get_iou_func(*args)
def eval_det_cls(
pred, gt, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou_obb
):
"""Generic functions to compute precision/recall for object detection
for a single class.
Input:
pred: map of {img_id: [(bbox, score)]} where bbox is numpy array
gt: map of {img_id: [bbox]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if True use VOC07 11 point method
Output:
rec: numpy array of length nd
prec: numpy array of length nd
ap: scalar, average precision
"""
# construct gt objects
class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}}
npos = 0
for img_id in gt.keys():
bbox = np.array(gt[img_id])
det = [False] * len(bbox)
npos += len(bbox)
class_recs[img_id] = {"bbox": bbox, "det": det}
# pad empty list to all other imgids
for img_id in pred.keys():
if img_id not in gt:
class_recs[img_id] = {"bbox": np.array([]), "det": []}
# construct dets
image_ids = []
confidence = []
BB = []
for img_id in pred.keys():
for box, score in pred[img_id]:
image_ids.append(img_id)
confidence.append(score)
BB.append(box)
confidence = np.array(confidence)
BB = np.array(BB) # (nd,4 or 8,3 or 6)
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, ...]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
# if d%100==0: print(d)
R = class_recs[image_ids[d]]
bb = BB[d, ...].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
for j in range(BBGT.shape[0]):
iou = get_iou_main(get_iou_func, (bb, BBGT[j, ...]))
if iou > ovmax:
ovmax = iou
jmax = j
# print d, ovmax
if ovmax > ovthresh:
if not R["det"][jmax]:
tp[d] = 1.0
R["det"][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
if npos == 0:
rec = np.zeros_like(tp)
else:
rec = tp / float(npos)
# print('NPOS: ', npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def eval_det_cls_wrapper(arguments):
pred, gt, ovthresh, use_07_metric, get_iou_func = arguments
rec, prec, ap = eval_det_cls(pred, gt, ovthresh, use_07_metric, get_iou_func)
return (rec, prec, ap)
def eval_det(pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=None):
"""Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]}
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred:
pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt:
gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox, score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt:
gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
for classname in gt.keys():
# print('Computing AP for class: ', classname)
rec[classname], prec[classname], ap[classname] = eval_det_cls(
pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func
)
# print(classname, ap[classname])
return rec, prec, ap
from multiprocessing import Pool
def eval_det_multiprocessing(
pred_all, gt_all, ovthresh=0.25, use_07_metric=False, get_iou_func=get_iou_obb
):
"""Generic functions to compute precision/recall for object detection
for multiple classes.
Input:
pred_all: map of {img_id: [(classname, bbox, score)]}
gt_all: map of {img_id: [(classname, bbox)]}
ovthresh: scalar, iou threshold
use_07_metric: bool, if true use VOC07 11 point method
Output:
rec: {classname: rec}
prec: {classname: prec_all}
ap: {classname: scalar}
"""
pred = {} # map {classname: pred}
gt = {} # map {classname: gt}
for img_id in pred_all.keys():
for classname, bbox, score in pred_all[img_id]:
if classname not in pred:
pred[classname] = {}
if img_id not in pred[classname]:
pred[classname][img_id] = []
if classname not in gt:
gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
pred[classname][img_id].append((bbox, score))
for img_id in gt_all.keys():
for classname, bbox in gt_all[img_id]:
if classname not in gt:
gt[classname] = {}
if img_id not in gt[classname]:
gt[classname][img_id] = []
gt[classname][img_id].append(bbox)
rec = {}
prec = {}
ap = {}
p = Pool(processes=10)
ret_values = p.map(
eval_det_cls_wrapper,
[
(pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func)
for classname in gt.keys()
if classname in pred
],
)
p.close()
for i, classname in enumerate(gt.keys()):
if classname in pred:
rec[classname], prec[classname], ap[classname] = ret_values[i]
else:
rec[classname] = 0
prec[classname] = 0
ap[classname] = 0
# print(classname, ap[classname])
return rec, prec, ap
|
# Copyright (c) Facebook, Inc. and its affiliates.
""" Utility functions for processing point clouds.
Author: Charles R. Qi and Or Litany
"""
import os
import sys
import torch
# Point cloud IO
import numpy as np
from plyfile import PlyData, PlyElement
# Mesh IO
import trimesh
# ----------------------------------------
# Point Cloud Sampling
# ----------------------------------------
def random_sampling(pc, num_sample, replace=None, return_choices=False):
"""Input is NxC, output is num_samplexC"""
if replace is None:
replace = pc.shape[0] < num_sample
choices = np.random.choice(pc.shape[0], num_sample, replace=replace)
if return_choices:
return pc[choices], choices
else:
return pc[choices]
# ----------------------------------------
# Simple Point manipulations
# ----------------------------------------
def shift_scale_points(pred_xyz, src_range, dst_range=None):
"""
pred_xyz: B x N x 3
src_range: [[B x 3], [B x 3]] - min and max XYZ coords
dst_range: [[B x 3], [B x 3]] - min and max XYZ coords
"""
if dst_range is None:
dst_range = [
torch.zeros((src_range[0].shape[0], 3), device=src_range[0].device),
torch.ones((src_range[0].shape[0], 3), device=src_range[0].device),
]
if pred_xyz.ndim == 4:
src_range = [x[:, None] for x in src_range]
dst_range = [x[:, None] for x in dst_range]
assert src_range[0].shape[0] == pred_xyz.shape[0]
assert dst_range[0].shape[0] == pred_xyz.shape[0]
assert src_range[0].shape[-1] == pred_xyz.shape[-1]
assert src_range[0].shape == src_range[1].shape
assert dst_range[0].shape == dst_range[1].shape
assert src_range[0].shape == dst_range[1].shape
src_diff = src_range[1][:, None, :] - src_range[0][:, None, :]
dst_diff = dst_range[1][:, None, :] - dst_range[0][:, None, :]
prop_xyz = (
((pred_xyz - src_range[0][:, None, :]) * dst_diff) / src_diff
) + dst_range[0][:, None, :]
return prop_xyz
def scale_points(pred_xyz, mult_factor):
if pred_xyz.ndim == 4:
mult_factor = mult_factor[:, None]
scaled_xyz = pred_xyz * mult_factor[:, None, :]
return scaled_xyz
def rotate_point_cloud(points, rotation_matrix=None):
"""Input: (n,3), Output: (n,3)"""
# Rotate in-place around Z axis.
if rotation_matrix is None:
rotation_angle = np.random.uniform() * 2 * np.pi
sinval, cosval = np.sin(rotation_angle), np.cos(rotation_angle)
rotation_matrix = np.array(
[[cosval, sinval, 0], [-sinval, cosval, 0], [0, 0, 1]]
)
ctr = points.mean(axis=0)
rotated_data = np.dot(points - ctr, rotation_matrix) + ctr
return rotated_data, rotation_matrix
def rotate_pc_along_y(pc, rot_angle):
"""Input ps is NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
"""
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))
return pc
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape) + [3, 3]))
c = np.cos(t)
s = np.sin(t)
output[..., 0, 0] = c
output[..., 0, 2] = s
output[..., 1, 1] = 1
output[..., 2, 0] = -s
output[..., 2, 2] = c
return output
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
def point_cloud_to_bbox(points):
"""Extract the axis aligned box from a pcl or batch of pcls
Args:
points: Nx3 points or BxNx3
output is 6 dim: xyz pos of center and 3 lengths
"""
which_dim = len(points.shape) - 2 # first dim if a single cloud and second if batch
mn, mx = points.min(which_dim), points.max(which_dim)
lengths = mx - mn
cntr = 0.5 * (mn + mx)
return np.concatenate([cntr, lengths], axis=which_dim)
def write_bbox(scene_bbox, out_filename):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
def convert_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return
def write_oriented_bbox(scene_bbox, out_filename, colors=None):
"""Export oriented (around Z axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[2, 2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
if colors is not None:
if colors.shape[0] != len(scene_bbox):
colors = [colors for _ in range(len(scene_bbox))]
colors = np.array(colors).astype(np.uint8)
assert colors.shape[0] == len(scene_bbox)
assert colors.shape[1] == 4
scene = trimesh.scene.Scene()
for idx, box in enumerate(scene_bbox):
box_tr = convert_oriented_box_to_trimesh_fmt(box)
if colors is not None:
box_tr.visual.main_color[:] = colors[idx]
box_tr.visual.vertex_colors[:] = colors[idx]
for facet in box_tr.facets:
box_tr.visual.face_colors[facet] = colors[idx]
scene.add_geometry(box_tr)
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return
def write_oriented_bbox_camera_coord(scene_bbox, out_filename):
"""Export oriented (around Y axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Y axis.
Z forward, X rightward, Y downward. heading angle of positive X is 0,
heading angle of negative Z is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[1, 1] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0, :] = np.array([cosval, 0, sinval])
rotmat[2, :] = np.array([-sinval, 0, cosval])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return
def write_lines_as_cylinders(pcl, filename, rad=0.005, res=64):
"""Create lines represented as cylinders connecting pairs of 3D points
Args:
pcl: (N x 2 x 3 numpy array): N pairs of xyz pos
filename: (string) filename for the output mesh (ply) file
rad: radius for the cylinder
res: number of sections used to create the cylinder
"""
scene = trimesh.scene.Scene()
for src, tgt in pcl:
# compute line
vec = tgt - src
M = trimesh.geometry.align_vectors([0, 0, 1], vec, False)
vec = tgt - src # compute again since align_vectors modifies vec in-place!
M[:3, 3] = 0.5 * src + 0.5 * tgt
height = np.sqrt(np.dot(vec, vec))
scene.add_geometry(
trimesh.creation.cylinder(
radius=rad, height=height, sections=res, transform=M
)
)
mesh_list = trimesh.util.concatenate(scene.dump())
trimesh.io.export.export_mesh(mesh_list, "%s.ply" % (filename), file_type="ply")
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import os
from utils.dist import is_primary
def save_checkpoint(
checkpoint_dir,
model_no_ddp,
optimizer,
epoch,
args,
best_val_metrics,
filename=None,
):
if not is_primary():
return
if filename is None:
filename = f"checkpoint_{epoch:04d}.pth"
checkpoint_name = os.path.join(checkpoint_dir, filename)
sd = {
"model": model_no_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"epoch": epoch,
"args": args,
"best_val_metrics": best_val_metrics,
}
torch.save(sd, checkpoint_name)
def resume_if_possible(checkpoint_dir, model_no_ddp, optimizer):
"""
Resume if checkpoint is available.
Return
- epoch of loaded checkpoint.
"""
epoch = -1
best_val_metrics = {}
if not os.path.isdir(checkpoint_dir):
return epoch, best_val_metrics
last_checkpoint = os.path.join(checkpoint_dir, "checkpoint.pth")
if not os.path.isfile(last_checkpoint):
return epoch, best_val_metrics
sd = torch.load(last_checkpoint, map_location=torch.device("cpu"))
epoch = sd["epoch"]
best_val_metrics = sd["best_val_metrics"]
print(f"Found checkpoint at {epoch}. Resuming.")
model_no_ddp.load_state_dict(sd["model"])
optimizer.load_state_dict(sd["optimizer"])
print(
f"Loaded model and optimizer state at {epoch}. Loaded best val metrics so far."
)
return epoch, best_val_metrics
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from urllib import request
import torch
import pickle
## Define the weights you want and where to store them
dataset = "scannet"
encoder = "_masked" # or ""
epoch = 1080
base_url = "https://dl.fbaipublicfiles.com/3detr/checkpoints"
local_dir = "/tmp/"
### Downloading the weights
weights_file = f"{dataset}{encoder}_ep{epoch}.pth"
metrics_file = f"{dataset}{encoder}_ep{epoch}_metrics.pkl"
local_weights = os.path.join(local_dir, weights_file)
local_metrics = os.path.join(local_dir, metrics_file)
url = os.path.join(base_url, weights_file)
request.urlretrieve(url, local_weights)
print(f"Downloaded weights from {url} to {local_weights}")
url = os.path.join(base_url, metrics_file)
request.urlretrieve(url, local_metrics)
print(f"Downloaded metrics from {url} to {local_metrics}")
# weights can be simply loaded with pytorch
weights = torch.load(local_weights, map_location=torch.device("cpu"))
print("Weights loaded successfully.")
# metrics can be loaded with pickle
with open(local_metrics, "rb") as fh:
metrics = pickle.load(fh)
print("Metrics loaded successfully.") |
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
# boxes are axis aigned 2D boxes of shape (n,5) in FLOAT numbers with (x1,y1,x2,y2,score)
""" Ref: https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
Ref: https://github.com/vickyboy47/nms-python/blob/master/nms.py
"""
def nms_2d(boxes, overlap_threshold):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
score = boxes[:, 4]
area = (x2 - x1) * (y2 - y1)
I = np.argsort(score)
pick = []
while I.size != 0:
last = I.size
i = I[-1]
pick.append(i)
suppress = [last - 1]
for pos in range(last - 1):
j = I[pos]
xx1 = max(x1[i], x1[j])
yy1 = max(y1[i], y1[j])
xx2 = min(x2[i], x2[j])
yy2 = min(y2[i], y2[j])
w = xx2 - xx1
h = yy2 - yy1
if w > 0 and h > 0:
o = w * h / area[j]
print("Overlap is", o)
if o > overlap_threshold:
suppress.append(pos)
I = np.delete(I, suppress)
return pick
def nms_2d_faster(boxes, overlap_threshold, old_type=False):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
score = boxes[:, 4]
area = (x2 - x1) * (y2 - y1)
I = np.argsort(score)
pick = []
while I.size != 0:
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[: last - 1]])
yy1 = np.maximum(y1[i], y1[I[: last - 1]])
xx2 = np.minimum(x2[i], x2[I[: last - 1]])
yy2 = np.minimum(y2[i], y2[I[: last - 1]])
w = np.maximum(0, xx2 - xx1)
h = np.maximum(0, yy2 - yy1)
if old_type:
o = (w * h) / area[I[: last - 1]]
else:
inter = w * h
o = inter / (area[i] + area[I[: last - 1]] - inter)
I = np.delete(
I, np.concatenate(([last - 1], np.where(o > overlap_threshold)[0]))
)
return pick
def nms_3d_faster(boxes, overlap_threshold, old_type=False):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
z1 = boxes[:, 2]
x2 = boxes[:, 3]
y2 = boxes[:, 4]
z2 = boxes[:, 5]
score = boxes[:, 6]
area = (x2 - x1) * (y2 - y1) * (z2 - z1)
I = np.argsort(score)
pick = []
while I.size != 0:
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[: last - 1]])
yy1 = np.maximum(y1[i], y1[I[: last - 1]])
zz1 = np.maximum(z1[i], z1[I[: last - 1]])
xx2 = np.minimum(x2[i], x2[I[: last - 1]])
yy2 = np.minimum(y2[i], y2[I[: last - 1]])
zz2 = np.minimum(z2[i], z2[I[: last - 1]])
l = np.maximum(0, xx2 - xx1)
w = np.maximum(0, yy2 - yy1)
h = np.maximum(0, zz2 - zz1)
if old_type:
o = (l * w * h) / area[I[: last - 1]]
else:
inter = l * w * h
o = inter / (area[i] + area[I[: last - 1]] - inter)
I = np.delete(
I, np.concatenate(([last - 1], np.where(o > overlap_threshold)[0]))
)
return pick
def nms_3d_faster_samecls(boxes, overlap_threshold, old_type=False):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
z1 = boxes[:, 2]
x2 = boxes[:, 3]
y2 = boxes[:, 4]
z2 = boxes[:, 5]
score = boxes[:, 6]
cls = boxes[:, 7]
area = (x2 - x1) * (y2 - y1) * (z2 - z1)
I = np.argsort(score)
pick = []
while I.size != 0:
last = I.size
i = I[-1]
pick.append(i)
xx1 = np.maximum(x1[i], x1[I[: last - 1]])
yy1 = np.maximum(y1[i], y1[I[: last - 1]])
zz1 = np.maximum(z1[i], z1[I[: last - 1]])
xx2 = np.minimum(x2[i], x2[I[: last - 1]])
yy2 = np.minimum(y2[i], y2[I[: last - 1]])
zz2 = np.minimum(z2[i], z2[I[: last - 1]])
cls1 = cls[i]
cls2 = cls[I[: last - 1]]
l = np.maximum(0, xx2 - xx1)
w = np.maximum(0, yy2 - yy1)
h = np.maximum(0, zz2 - zz1)
if old_type:
o = (l * w * h) / area[I[: last - 1]]
else:
inter = l * w * h
o = inter / (area[i] + area[I[: last - 1]] - inter)
o = o * (cls1 == cls2)
I = np.delete(
I, np.concatenate(([last - 1], np.where(o > overlap_threshold)[0]))
)
return pick
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
try:
from tensorboardX import SummaryWriter
except ImportError:
print("Cannot import tensorboard. Will log to txt files only.")
SummaryWriter = None
from utils.dist import is_primary
class Logger(object):
def __init__(self, log_dir=None) -> None:
self.log_dir = log_dir
if SummaryWriter is not None and is_primary():
self.writer = SummaryWriter(self.log_dir)
else:
self.writer = None
def log_scalars(self, scalar_dict, step, prefix=None):
if self.writer is None:
return
for k in scalar_dict:
v = scalar_dict[k]
if isinstance(v, torch.Tensor):
v = v.detach().cpu().item()
if prefix is not None:
k = prefix + k
self.writer.add_scalar(k, v, step)
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
def check_aspect(crop_range, aspect_min):
xy_aspect = np.min(crop_range[:2]) / np.max(crop_range[:2])
xz_aspect = np.min(crop_range[[0, 2]]) / np.max(crop_range[[0, 2]])
yz_aspect = np.min(crop_range[1:]) / np.max(crop_range[1:])
return (
(xy_aspect >= aspect_min)
or (xz_aspect >= aspect_min)
or (yz_aspect >= aspect_min)
)
class RandomCuboid(object):
"""
RandomCuboid augmentation from DepthContrast [https://arxiv.org/abs/2101.02691]
We slightly modify this operation to account for object detection.
This augmentation randomly crops a cuboid from the input and
ensures that the cropped cuboid contains at least one bounding box
"""
def __init__(
self,
min_points,
aspect=0.8,
min_crop=0.5,
max_crop=1.0,
box_filter_policy="center",
):
self.aspect = aspect
self.min_crop = min_crop
self.max_crop = max_crop
self.min_points = min_points
self.box_filter_policy = box_filter_policy
def __call__(self, point_cloud, target_boxes, per_point_labels=None):
range_xyz = np.max(point_cloud[:, 0:3], axis=0) - np.min(
point_cloud[:, 0:3], axis=0
)
for _ in range(100):
crop_range = self.min_crop + np.random.rand(3) * (
self.max_crop - self.min_crop
)
if not check_aspect(crop_range, self.aspect):
continue
sample_center = point_cloud[np.random.choice(len(point_cloud)), 0:3]
new_range = range_xyz * crop_range / 2.0
max_xyz = sample_center + new_range
min_xyz = sample_center - new_range
upper_idx = (
np.sum((point_cloud[:, 0:3] <= max_xyz).astype(np.int32), 1) == 3
)
lower_idx = (
np.sum((point_cloud[:, 0:3] >= min_xyz).astype(np.int32), 1) == 3
)
new_pointidx = (upper_idx) & (lower_idx)
if np.sum(new_pointidx) < self.min_points:
continue
new_point_cloud = point_cloud[new_pointidx, :]
# filtering policy is the only modification from DepthContrast
if self.box_filter_policy == "center":
# remove boxes whose center does not lie within the new_point_cloud
new_boxes = target_boxes
if (
target_boxes.sum() > 0
): # ground truth contains no bounding boxes. Common in SUNRGBD.
box_centers = target_boxes[:, 0:3]
new_pc_min_max = np.min(new_point_cloud[:, 0:3], axis=0), np.max(
new_point_cloud[:, 0:3], axis=0
)
keep_boxes = np.logical_and(
np.all(box_centers >= new_pc_min_max[0], axis=1),
np.all(box_centers <= new_pc_min_max[1], axis=1),
)
if keep_boxes.sum() == 0:
# current data augmentation removes all boxes in the pointcloud. fail!
continue
new_boxes = target_boxes[keep_boxes]
if per_point_labels is not None:
new_per_point_labels = [x[new_pointidx] for x in per_point_labels]
else:
new_per_point_labels = None
# if we are here, all conditions are met. return boxes
return new_point_cloud, new_boxes, new_per_point_labels
# fallback
return point_cloud, target_boxes, per_point_labels
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
from torchvision.ops.boxes import box_area
from typing import List
try:
from box_intersection import batch_intersect
except ImportError:
print("Could not import cythonized batch_intersection")
batch_intersect = None
import numpy as np
from scipy.spatial import ConvexHull
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])
# diff_cp = cp2 - cp1
# diff_p = p - cp1
# diff_p = diff_p[[1, 0]]
# mult = diff_cp * diff_p
# return mult[0] > mult[1]
def computeIntersection():
dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]
dp = [ s[0] - e[0], s[1] - e[1] ]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]
# dc = cp1 - cp2
# dp = s - e
# n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
# n2 = s[0] * e[1] - s[1] * e[0]
# n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
# return (n1 * dp - n2 * dc) * n3
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return(outputList)
def helper_computeIntersection(cp1: torch.Tensor, cp2: torch.Tensor, s: torch.Tensor, e: torch.Tensor):
dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]
dp = [ s[0] - e[0], s[1] - e[1] ]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
# return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]
return torch.stack([(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3])
def helper_inside(cp1: torch.Tensor, cp2: torch.Tensor, p: torch.Tensor):
ineq = (cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])
return ineq.item()
def polygon_clip_unnest(subjectPolygon: torch.Tensor, clipPolygon: torch.Tensor):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
outputList = [subjectPolygon[x] for x in range(subjectPolygon.shape[0])]
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList.copy()
outputList.clear()
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if helper_inside(cp1, cp2, e):
if not helper_inside(cp1, cp2, s):
outputList.append(helper_computeIntersection(cp1, cp2, s, e))
outputList.append(e)
elif helper_inside(cp1, cp2, s):
outputList.append(helper_computeIntersection(cp1, cp2, s, e))
s = e
cp1 = cp2
if len(outputList) == 0:
# return None
break
return outputList
def poly_area(x,y):
""" Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def poly_area_tensor(x, y):
return 0.5*torch.abs(torch.dot(x,torch.roll(y,1))-torch.dot(y,torch.roll(x,1)))
def box3d_vol_tensor(corners):
EPS = 1e-6
reshape = False
B, K = corners.shape[0], corners.shape[1]
if len(corners.shape) == 4:
# batch x prop x 8 x 3
reshape = True
corners = corners.view(-1, 8, 3)
a = torch.sqrt((corners[:, 0, :] - corners[:, 1, :]).pow(2).sum(dim=1).clamp(min=EPS))
b = torch.sqrt((corners[:, 1, :] - corners[:, 2, :]).pow(2).sum(dim=1).clamp(min=EPS))
c = torch.sqrt((corners[:, 0, :] - corners[:, 4, :]).pow(2).sum(dim=1).clamp(min=EPS))
vols = a * b * c
if reshape:
vols = vols.view(B, K)
return vols
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1,p2)
if inter_p is not None:
hull_inter = ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def box3d_vol(corners):
''' corners: (8,3) no assumption on axis direction '''
a = np.sqrt(np.sum((corners[0,:] - corners[1,:])**2))
b = np.sqrt(np.sum((corners[1,:] - corners[2,:])**2))
c = np.sqrt(np.sum((corners[0,:] - corners[4,:])**2))
return a*b*c
def enclosing_box3d_vol(corners1, corners2):
"""
volume of enclosing axis-aligned box
"""
assert len(corners1.shape) == 4
assert len(corners2.shape)== 4
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners2.shape[2] == 8
assert corners2.shape[3] == 3
EPS = 1e-6
corners1 = corners1.clone()
corners2 = corners2.clone()
# flip Y axis, since it is negative
corners1[:, :, :, 1] *= -1
corners2[:, :, :, 1] *= -1
# min_a = torch.min(corners1[:, :, 0, :][:, :, None, :] , corners2[:, :, 0, :][:, None, :, :])
# max_a = torch.max(corners1[:, :, 1, :][:, :, None, :] , corners2[:, :, 1, :][:, None, :, :])
# a = (max_a - min_a).pow(2).sum(dim=3).clamp(min=EPS).sqrt()
# min_b = torch.min(corners1[:, :, 1, :][:, :, None, :] , corners2[:, :, 1, :][:, None, :, :])
# max_b = torch.max(corners1[:, :, 2, :][:, :, None, :] , corners2[:, :, 2, :][:, None, :, :])
# b = (max_b - min_b).pow(2).sum(dim=3).clamp(min=EPS).sqrt()
# min_c = torch.min(corners1[:, :, 0, :][:, :, None, :] , corners2[:, :, 0, :][:, None, :, :])
# max_c = torch.max(corners1[:, :, 4, :][:, :, None, :] , corners2[:, :, 4, :][:, None, :, :])
# c = (max_c - min_c).pow(2).sum(dim=3).clamp(min=EPS).sqrt()
# vol = a * b * c
al_xmin = torch.min( torch.min(corners1[:, :, :, 0], dim=2).values[:, :, None], torch.min(corners2[:, :, :, 0], dim=2).values[:, None, :])
al_ymin = torch.max( torch.max(corners1[:, :, :, 1], dim=2).values[:, :, None], torch.max(corners2[:, :, :, 1], dim=2).values[:, None, :])
al_zmin = torch.min( torch.min(corners1[:, :, :, 2], dim=2).values[:, :, None], torch.min(corners2[:, :, :, 2], dim=2).values[:, None, :])
al_xmax = torch.max( torch.max(corners1[:, :, :, 0], dim=2).values[:, :, None], torch.max(corners2[:, :, :, 0], dim=2).values[:, None, :])
al_ymax = torch.min( torch.min(corners1[:, :, :, 1], dim=2).values[:, :, None], torch.min(corners2[:, :, :, 1], dim=2).values[:, None, :])
al_zmax = torch.max( torch.max(corners1[:, :, :, 2], dim=2).values[:, :, None], torch.max(corners2[:, :, :, 2], dim=2).values[:, None, :])
diff_x = torch.abs(al_xmax - al_xmin)
diff_y = torch.abs(al_ymax - al_ymin)
diff_z = torch.abs(al_zmax - al_zmin)
vol = diff_x * diff_y * diff_z
return vol
def is_clockwise(p):
x = p[:,0]
y = p[:,1]
return np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)) > 0
def box3d_iou(corners1, corners2):
''' Compute 3D bounding box IoU.
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
todo (rqi): add more description on corner points' orders.
'''
rect1 = [(corners1[i,0], corners1[i,2]) for i in range(3,-1,-1)]
rect2 = [(corners2[i,0], corners2[i,2]) for i in range(3,-1,-1)]
inter, inter_area = convex_hull_intersection(rect1, rect2)
# corner points are in counter clockwise order
# area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1])
# area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1])
# iou_2d = inter_area/(area1+area2-inter_area)
ymax = min(corners1[0,1], corners2[0,1])
ymin = max(corners1[4,1], corners2[4,1])
inter_vol = inter_area * max(0.0, ymax-ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
union = (vol1 + vol2 - inter_vol)
iou = inter_vol / union
return iou, union
@torch.jit.ignore
def to_list_1d(arr) -> List[float]:
arr = arr.detach().cpu().numpy().tolist()
return arr
@torch.jit.ignore
def to_list_3d(arr) -> List[List[List[float]]]:
arr = arr.detach().cpu().numpy().tolist()
return arr
def generalized_box3d_iou_tensor_non_diff(corners1: torch.Tensor, corners2: torch.Tensor, nums_k2: torch.Tensor, rotated_boxes: bool = True,
return_inter_vols_only: bool = False,
approximate: bool = True):
if batch_intersect is None:
return generalized_box3d_iou_tensor_jit(corners1, corners2, nums_k2, rotated_boxes, return_inter_vols_only)
else:
assert len(corners1.shape) == 4
assert len(corners2.shape)== 4
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == corners2.shape[2]
assert corners1.shape[3] == corners2.shape[3]
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
# # box height. Y is negative, so max is torch.min
ymax = torch.min(corners1[:, :, 0,1][:, :, None], corners2[:, :, 0,1][:, None, :])
ymin = torch.max(corners1[:, :, 4,1][:, :, None], corners2[:, :, 4,1][:, None, :])
height = (ymax - ymin).clamp(min=0)
EPS = 1e-8
idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device)
idx2 = torch.tensor([0,2], dtype=torch.int64, device=corners1.device)
rect1 = corners1[:, :, idx, :]
rect2 = corners2[:, :, idx, :]
rect1 = rect1[:, :, :, idx2]
rect2 = rect2[:, :, :, idx2]
lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, 1][:, None, : ,:])
rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, 3][:, None, : ,:])
wh = (rb - lt).clamp(min=0)
non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1]
non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2)
if nums_k2 is not None:
for b in range(B):
non_rot_inter_areas[b, :, nums_k2[b]:] = 0
enclosing_vols = enclosing_box3d_vol(corners1, corners2)
# vols of boxes
vols1 = box3d_vol_tensor(corners1).clamp(min=EPS)
vols2 = box3d_vol_tensor(corners2).clamp(min=EPS)
sum_vols = vols1[:, :, None] + vols2[:, None, :]
# filter malformed boxes
good_boxes = (enclosing_vols > 2*EPS) * (sum_vols > 4*EPS)
if rotated_boxes:
inter_areas = np.zeros((B, K1, K2), dtype=np.float32)
rect1 = rect1.cpu().detach().numpy()
rect2 = rect2.cpu().detach().numpy()
nums_k2_np = nums_k2.cpu().numpy()
non_rot_inter_areas_np = non_rot_inter_areas.cpu().detach().numpy()
batch_intersect(rect1, rect2, non_rot_inter_areas_np, nums_k2_np, inter_areas, approximate)
inter_areas = torch.from_numpy(inter_areas)
else:
inter_areas = non_rot_inter_areas
inter_areas = inter_areas.to(corners1.device)
### gIOU = iou - (1 - sum_vols/enclose_vol)
inter_vols = inter_areas * height
if return_inter_vols_only:
return inter_vols
union_vols = (sum_vols - inter_vols).clamp(min=EPS)
ious = inter_vols / union_vols
giou_second_term = - (1 - union_vols / enclosing_vols)
gious = ious + giou_second_term
gious *= good_boxes
if nums_k2 is not None:
mask = torch.zeros((B, K1, K2), device=height.device, dtype=torch.float32)
for b in range(B):
mask[b,:,:nums_k2[b]] = 1
gious *= mask
return gious
def generalized_box3d_iou_tensor(corners1: torch.Tensor, corners2: torch.Tensor, nums_k2: torch.Tensor, rotated_boxes: bool = True,
return_inter_vols_only: bool = False, no_grad: bool = False):
"""
Input:
corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y
corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y
Assumes that the box is only rotated along Z direction
Returns:
B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned
The return IOU is differentiable
"""
assert len(corners1.shape) == 4
assert len(corners2.shape)== 4
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == corners2.shape[2]
assert corners1.shape[3] == corners2.shape[3]
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
# # box height. Y is negative, so max is torch.min
ymax = torch.min(corners1[:, :, 0,1][:, :, None], corners2[:, :, 0,1][:, None, :])
ymin = torch.max(corners1[:, :, 4,1][:, :, None], corners2[:, :, 4,1][:, None, :])
height = (ymax - ymin).clamp(min=0)
EPS = 1e-8
idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device)
idx2 = torch.tensor([0,2], dtype=torch.int64, device=corners1.device)
rect1 = corners1[:, :, idx, :]
rect2 = corners2[:, :, idx, :]
rect1 = rect1[:, :, :, idx2]
rect2 = rect2[:, :, :, idx2]
lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, 1][:, None, : ,:])
rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, 3][:, None, : ,:])
wh = (rb - lt).clamp(min=0)
non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1]
non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2)
if nums_k2 is not None:
for b in range(B):
non_rot_inter_areas[b, :, nums_k2[b]:] = 0
enclosing_vols = enclosing_box3d_vol(corners1, corners2)
# vols of boxes
vols1 = box3d_vol_tensor(corners1).clamp(min=EPS)
vols2 = box3d_vol_tensor(corners2).clamp(min=EPS)
sum_vols = vols1[:, :, None] + vols2[:, None, :]
# filter malformed boxes
good_boxes = (enclosing_vols > 2*EPS) * (sum_vols > 4*EPS)
if rotated_boxes:
inter_areas = torch.zeros((B, K1, K2), dtype=torch.float32)
rect1 = rect1.cpu()
rect2 = rect2.cpu()
nums_k2_np = to_list_1d(nums_k2)
non_rot_inter_areas_np = to_list_3d(non_rot_inter_areas)
for b in range(B):
for k1 in range(K1):
for k2 in range(K2):
if nums_k2 is not None and k2 >= nums_k2_np[b]:
break
if non_rot_inter_areas_np[b][k1][k2] == 0:
continue
##### compute volume of intersection
# inter = polygon_clip(rect1[b, k1], rect2[b, k2])
inter = polygon_clip_unnest(rect1[b, k1], rect2[b, k2])
# if inter is None:
# if len(inter) == 0:
# # area = torch.zeros(1, dtype=torch.float32, device=inter_areas.device).squeeze()
# # area = 0
# continue
# else:
if len(inter) > 0:
# inter = torch.stack(inter)
# xs = inter[:, 0]
# ys = inter[:, 1]
xs = torch.stack([x[0] for x in inter])
ys = torch.stack([x[1] for x in inter])
# area = poly_area_tensor(xs, ys)
inter_areas[b,k1,k2] = torch.abs(torch.dot(xs,torch.roll(ys,1))-torch.dot(ys,torch.roll(xs,1)))
inter_areas.mul_(0.5)
else:
inter_areas = non_rot_inter_areas
inter_areas = inter_areas.to(corners1.device)
### gIOU = iou - (1 - sum_vols/enclose_vol)
inter_vols = inter_areas * height
if return_inter_vols_only:
return inter_vols
union_vols = (sum_vols - inter_vols).clamp(min=EPS)
ious = inter_vols / union_vols
giou_second_term = - (1 - union_vols / enclosing_vols)
gious = ious + giou_second_term
gious *= good_boxes
if nums_k2 is not None:
mask = torch.zeros((B, K1, K2), device=height.device, dtype=torch.float32)
for b in range(B):
mask[b,:,:nums_k2[b]] = 1
gious *= mask
return gious
generalized_box3d_iou_tensor_jit = torch.jit.script(generalized_box3d_iou_tensor)
def enclosing_box3d_convex_hull(corners1, corners2, nums_k2, mask, enclosing_vols=None):
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
if enclosing_vols is None:
enclosing_vols = np.zeros((B, K1, K2)).astype(np.float32)
for b in range(B):
for k1 in range(K1):
for k2 in range(K2):
if nums_k2 is not None and k2 >= nums_k2[b]:
break
if mask is not None and mask[b,k1,k2] <= 0:
continue
hull = ConvexHull(np.vstack([corners1[b, k1], corners2[b, k2]]))
enclosing_vols[b, k1, k2] = hull.volume
return enclosing_vols
enclosing_box3d_convex_hull_numba = autojit(enclosing_box3d_convex_hull)
# enclosing_box3d_convex_hull_numba = enclosing_box3d_convex_hull
def generalized_box3d_iou_convex_hull_nondiff_tensor(corners1: torch.Tensor, corners2: torch.Tensor, nums_k2: torch.Tensor, rotated_boxes: bool = True):
"""
Input:
corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y
corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y
Assumes that the box is only rotated along Z direction
Returns:
B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned
The return IOU is differentiable
"""
assert len(corners1.shape) == 4
assert len(corners2.shape)== 4
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == corners2.shape[2]
assert corners1.shape[3] == corners2.shape[3]
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
EPS = 1e-8
# vols of boxes
vols1 = box3d_vol_tensor(corners1).clamp(min=EPS)
vols2 = box3d_vol_tensor(corners2).clamp(min=EPS)
sum_vols = vols1[:, :, None] + vols2[:, None, :]
inter_vols = generalized_box3d_iou_tensor_jit(corners1, corners2, nums_k2, rotated_boxes, return_inter_vols_only=True)
enclosing_vols = enclosing_box3d_vol(corners1, corners2)
if rotated_boxes:
corners1_np = corners1.detach().cpu().numpy()
corners2_np = corners2.detach().cpu().numpy()
mask = inter_vols.detach().cpu().numpy()
nums_k2 = nums_k2.cpu().numpy()
enclosing_vols_np = enclosing_vols.detach().cpu().numpy()
enclosing_vols = enclosing_box3d_convex_hull_numba(corners1_np, corners2_np, nums_k2, mask, enclosing_vols_np)
enclosing_vols = torch.from_numpy(enclosing_vols).to(corners1.device)
union_vols = (sum_vols - inter_vols).clamp(min=EPS)
ious = inter_vols / union_vols
giou_second_term = - (1 - union_vols / enclosing_vols)
gious = ious + giou_second_term
good_boxes = (enclosing_vols > 2*EPS) * (sum_vols > 4*EPS)
gious *= good_boxes
if nums_k2 is not None:
mask = torch.zeros((B, K1, K2), device=corners1.device, dtype=torch.float32)
for b in range(B):
mask[b,:,:nums_k2[b]] = 1
gious *= mask
return gious
def generalized_box3d_iou(corners1, corners2, nums_k2=None):
"""
Input:
corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y
corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y
mask:
Returns:
B x K1 x K2 matrix of generalized IOU
"""
# GenIOU = IOU - (C - sum_of_vols)/ C
# where C = vol of convex_hull containing all points
# degenerate boxes gives inf / nan results
# so do an early check
#TODO:
assert corners1.ndim == 4
assert corners2.ndim == 4
assert corners1.shape[0] == corners2.shape[0]
B, K1, _ , _ = corners1.shape
_, K2, _, _ = corners2.shape
gious = torch.zeros((B, K1, K2), dtype=torch.float32)
corners1_np = corners1.detach().cpu().numpy()
corners2_np = corners2.detach().cpu().numpy()
for b in range(B):
for i in range(K1):
for j in range(K2):
if nums_k2 is not None and j >= nums_k2[b]:
break
iou, sum_of_vols = box3d_iou(corners1_np[b, i], corners2_np[b, j])
hull = ConvexHull(np.vstack([corners1_np[b, i], corners2_np[b, j]]))
C = hull.volume
giou = iou - (C - sum_of_vols) / C
gious[b, i, j] = giou
return gious
# -----------------------------------------------------------
# Convert from box parameters to
# -----------------------------------------------------------
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape)+[3,3]))
c = np.cos(t)
s = np.sin(t)
output[...,0,0] = c
output[...,0,2] = s
output[...,1,1] = 1
output[...,2,0] = -s
output[...,2,2] = c
return output
def get_3d_box(box_size, heading_angle, center):
''' box_size is array(l,w,h), heading_angle is radius clockwise from pos x axis, center is xyz of box center
output (8,3) array for 3D box cornders
Similar to utils/compute_orientation_3d
'''
R = roty(heading_angle)
l,w,h = box_size
x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2];
y_corners = [h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2];
z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2];
corners_3d = np.dot(R, np.vstack([x_corners,y_corners,z_corners]))
corners_3d[0,:] = corners_3d[0,:] + center[0];
corners_3d[1,:] = corners_3d[1,:] + center[1];
corners_3d[2,:] = corners_3d[2,:] + center[2];
corners_3d = np.transpose(corners_3d)
return corners_3d
def get_3d_box_batch(box_size, heading_angle, center):
''' box_size: [x1,x2,...,xn,3] -- box dimensions without flipping [X, Y, Z] -- l, w, h
heading_angle: [x1,x2,...,xn] -- theta in radians
center: [x1,x2,...,xn,3] -- center point has been flipped to camera axis [X, -Z, Y]
Return:
[x1,x3,...,xn,8,3]
'''
input_shape = heading_angle.shape
R = roty_batch(heading_angle)
l = np.expand_dims(box_size[...,0], -1) # [x1,...,xn,1]
w = np.expand_dims(box_size[...,1], -1)
h = np.expand_dims(box_size[...,2], -1)
corners_3d = np.zeros(tuple(list(input_shape)+[8,3]))
corners_3d[...,:,0] = np.concatenate((l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2), -1)
corners_3d[...,:,1] = np.concatenate((h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2), -1)
corners_3d[...,:,2] = np.concatenate((w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2), -1)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape)+1, len(input_shape)]
corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist)))
corners_3d += np.expand_dims(center, -2)
return corners_3d
def roty_batch_tensor(t):
input_shape = t.shape
output = torch.zeros(tuple(list(input_shape)+[3,3]), dtype=torch.float32, device=t.device)
c = torch.cos(t)
s = torch.sin(t)
output[...,0,0] = c
output[...,0,2] = s
output[...,1,1] = 1
output[...,2,0] = -s
output[...,2,2] = c
return output
def flip_axis_to_camera_tensor(pc):
''' Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
'''
pc2 = torch.clone(pc)
pc2[...,[0,1,2]] = pc2[...,[0,2,1]] # cam X,Y,Z = depth X,-Z,Y
pc2[...,1] *= -1
return pc2
def get_3d_box_batch_tensor(box_size, heading_angle, center):
assert isinstance(box_size, torch.Tensor)
assert isinstance(heading_angle, torch.Tensor)
assert isinstance(center, torch.Tensor)
reshape_final = False
if heading_angle.ndim == 2:
assert box_size.ndim == 3
assert center.ndim == 3
bsize = box_size.shape[0]
nprop = box_size.shape[1]
box_size = box_size.view(-1, box_size.shape[-1])
heading_angle = heading_angle.view(-1)
center = center.reshape(-1, 3)
reshape_final = True
input_shape = heading_angle.shape
R = roty_batch_tensor(heading_angle)
l = torch.unsqueeze(box_size[...,0], -1) # [x1,...,xn,1]
w = torch.unsqueeze(box_size[...,1], -1)
h = torch.unsqueeze(box_size[...,2], -1)
corners_3d = torch.zeros(tuple(list(input_shape)+[8,3]), device=box_size.device, dtype=torch.float32)
corners_3d[...,:,0] = torch.cat((l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2), -1)
corners_3d[...,:,1] = torch.cat((h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2), -1)
corners_3d[...,:,2] = torch.cat((w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2), -1)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape)+1, len(input_shape)]
corners_3d = torch.matmul(corners_3d, R.permute(tlist))
corners_3d += torch.unsqueeze(center, -2)
if reshape_final:
corners_3d = corners_3d.reshape(bsize, nprop, 8, 3)
return corners_3d
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
if __name__=='__main__':
# Function for polygon ploting
import matplotlib
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
def plot_polys(plist,scale=500.0):
fig, ax = plt.subplots()
patches = []
for p in plist:
poly = Polygon(np.array(p)/scale, True)
patches.append(poly)
pc = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.5)
colors = 100*np.random.rand(len(patches))
pc.set_array(np.array(colors))
ax.add_collection(pc)
plt.show()
# Demo on ConvexHull
points = np.random.rand(30, 2) # 30 random points in 2-D
hull = ConvexHull(points)
# **In 2D "volume" is is area, "area" is perimeter
print(('Hull area: ', hull.volume))
for simplex in hull.simplices:
print(simplex)
# Demo on convex hull overlaps
sub_poly = [(0,0),(300,0),(300,300),(0,300)]
clip_poly = [(150,150),(300,300),(150,450),(0,300)]
inter_poly = polygon_clip(sub_poly, clip_poly)
print(poly_area(np.array(inter_poly)[:,0], np.array(inter_poly)[:,1]))
# Test convex hull interaction function
rect1 = [(50,0),(50,300),(300,300),(300,0)]
rect2 = [(150,150),(300,300),(150,450),(0,300)]
plot_polys([rect1, rect2])
inter, area = convex_hull_intersection(rect1, rect2)
print((inter, area))
if inter is not None:
print(poly_area(np.array(inter)[:,0], np.array(inter)[:,1]))
print('------------------')
rect1 = [(0.30026005199835404, 8.9408694211408424), \
(-1.1571105364358421, 9.4686676477075533), \
(0.1777082043006144, 13.154404877812102), \
(1.6350787927348105, 12.626606651245391)]
rect1 = [rect1[0], rect1[3], rect1[2], rect1[1]]
rect2 = [(0.23908745901608636, 8.8551095691132886), \
(-1.2771419487733995, 9.4269062966181956), \
(0.13138836963152717, 13.161896351296868), \
(1.647617777421013, 12.590099623791961)]
rect2 = [rect2[0], rect2[3], rect2[2], rect2[1]]
plot_polys([rect1, rect2])
inter, area = convex_hull_intersection(rect1, rect2)
print((inter, area))
|
# Copyright (c) Facebook, Inc. and its affiliates.
""" Helper functions for calculating 2D and 3D bounding box IoU.
Collected and written by Charles R. Qi
Last modified: Apr 2021 by Ishan Misra
"""
import torch
import numpy as np
from scipy.spatial import ConvexHull, Delaunay
from utils.misc import to_list_1d, to_list_3d
try:
from utils.box_intersection import box_intersection
except ImportError:
print(
"Could not import cythonized box intersection. Consider compiling box_intersection.pyx for faster training."
)
box_intersection = None
def in_hull(p, hull):
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
return hull.find_simplex(p) >= 0
def extract_pc_in_box3d(pc, box3d):
"""pc: (N,3), box3d: (8,3)"""
box3d_roi_inds = in_hull(pc[:, 0:3], box3d)
return pc[box3d_roi_inds, :], box3d_roi_inds
def polygon_clip(subjectPolygon, clipPolygon):
"""Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])
def computeIntersection():
dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]
dp = [s[0] - e[0], s[1] - e[1]]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s):
outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0:
return None
return outputList
def poly_area(x, y):
"""Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates"""
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def convex_hull_intersection(p1, p2):
"""Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1, p2)
if inter_p is not None:
hull_inter = ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def box3d_vol(corners):
"""corners: (8,3) no assumption on axis direction"""
a = np.sqrt(np.sum((corners[0, :] - corners[1, :]) ** 2))
b = np.sqrt(np.sum((corners[1, :] - corners[2, :]) ** 2))
c = np.sqrt(np.sum((corners[0, :] - corners[4, :]) ** 2))
return a * b * c
def is_clockwise(p):
x = p[:, 0]
y = p[:, 1]
return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0
def box3d_iou(corners1, corners2):
"""Compute 3D bounding box IoU.
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
todo (rqi): add more description on corner points' orders.
"""
# corner points are in counter clockwise order
rect1 = [(corners1[i, 0], corners1[i, 2]) for i in range(3, -1, -1)]
rect2 = [(corners2[i, 0], corners2[i, 2]) for i in range(3, -1, -1)]
area1 = poly_area(np.array(rect1)[:, 0], np.array(rect1)[:, 1])
area2 = poly_area(np.array(rect2)[:, 0], np.array(rect2)[:, 1])
inter, inter_area = convex_hull_intersection(rect1, rect2)
iou_2d = inter_area / (area1 + area2 - inter_area)
ymax = min(corners1[0, 1], corners2[0, 1])
ymin = max(corners1[4, 1], corners2[4, 1])
inter_vol = inter_area * max(0.0, ymax - ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou, iou_2d
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two 2D bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
assert bb1["x1"] < bb1["x2"]
assert bb1["y1"] < bb1["y2"]
assert bb2["x1"] < bb2["x2"]
assert bb2["y1"] < bb2["y2"]
# determine the coordinates of the intersection rectangle
x_left = max(bb1["x1"], bb2["x1"])
y_top = max(bb1["y1"], bb2["y1"])
x_right = min(bb1["x2"], bb2["x2"])
y_bottom = min(bb1["y2"], bb2["y2"])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1["x2"] - bb1["x1"]) * (bb1["y2"] - bb1["y1"])
bb2_area = (bb2["x2"] - bb2["x1"]) * (bb2["y2"] - bb2["y1"])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
def box2d_iou(box1, box2):
"""Compute 2D bounding box IoU.
Input:
box1: tuple of (xmin,ymin,xmax,ymax)
box2: tuple of (xmin,ymin,xmax,ymax)
Output:
iou: 2D IoU scalar
"""
return get_iou(
{"x1": box1[0], "y1": box1[1], "x2": box1[2], "y2": box1[3]},
{"x1": box2[0], "y1": box2[1], "x2": box2[2], "y2": box2[3]},
)
# -----------------------------------------------------------
# Convert from box parameters to
# -----------------------------------------------------------
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape) + [3, 3]))
c = np.cos(t)
s = np.sin(t)
output[..., 0, 0] = c
output[..., 0, 2] = s
output[..., 1, 1] = 1
output[..., 2, 0] = -s
output[..., 2, 2] = c
return output
def get_3d_box(box_size, heading_angle, center):
"""box_size is array(l,w,h), heading_angle is radius clockwise from pos x axis, center is xyz of box center
output (8,3) array for 3D box cornders
Similar to utils/compute_orientation_3d
"""
R = roty(heading_angle)
l, w, h = box_size
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
corners_3d[0, :] = corners_3d[0, :] + center[0]
corners_3d[1, :] = corners_3d[1, :] + center[1]
corners_3d[2, :] = corners_3d[2, :] + center[2]
corners_3d = np.transpose(corners_3d)
return corners_3d
def flip_axis_to_camera_np(pc):
"""Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
"""
pc2 = pc.copy()
pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # cam X,Y,Z = depth X,-Z,Y
pc2[..., 1] *= -1
return pc2
def get_3d_box_batch_np(box_size, angle, center):
input_shape = angle.shape
R = roty_batch(angle)
l = np.expand_dims(box_size[..., 0], -1) # [x1,...,xn,1]
w = np.expand_dims(box_size[..., 1], -1)
h = np.expand_dims(box_size[..., 2], -1)
corners_3d = np.zeros(tuple(list(input_shape) + [8, 3]))
corners_3d[..., :, 0] = np.concatenate(
(l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1
)
corners_3d[..., :, 1] = np.concatenate(
(h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1
)
corners_3d[..., :, 2] = np.concatenate(
(w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1
)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape) + 1, len(input_shape)]
corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist)))
corners_3d += np.expand_dims(center, -2)
return corners_3d
def flip_axis_to_camera_tensor(pc):
"""Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward
Input and output are both (N,3) array
"""
pc2 = torch.clone(pc)
pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # cam X,Y,Z = depth X,-Z,Y
pc2[..., 1] *= -1
return pc2
def roty_batch_tensor(t):
input_shape = t.shape
output = torch.zeros(
tuple(list(input_shape) + [3, 3]), dtype=torch.float32, device=t.device
)
c = torch.cos(t)
s = torch.sin(t)
output[..., 0, 0] = c
output[..., 0, 2] = s
output[..., 1, 1] = 1
output[..., 2, 0] = -s
output[..., 2, 2] = c
return output
def get_3d_box_batch_tensor(box_size, angle, center):
assert isinstance(box_size, torch.Tensor)
assert isinstance(angle, torch.Tensor)
assert isinstance(center, torch.Tensor)
reshape_final = False
if angle.ndim == 2:
assert box_size.ndim == 3
assert center.ndim == 3
bsize = box_size.shape[0]
nprop = box_size.shape[1]
box_size = box_size.reshape(-1, box_size.shape[-1])
angle = angle.reshape(-1)
center = center.reshape(-1, 3)
reshape_final = True
input_shape = angle.shape
R = roty_batch_tensor(angle)
l = torch.unsqueeze(box_size[..., 0], -1) # [x1,...,xn,1]
w = torch.unsqueeze(box_size[..., 1], -1)
h = torch.unsqueeze(box_size[..., 2], -1)
corners_3d = torch.zeros(
tuple(list(input_shape) + [8, 3]), device=box_size.device, dtype=torch.float32
)
corners_3d[..., :, 0] = torch.cat(
(l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1
)
corners_3d[..., :, 1] = torch.cat(
(h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1
)
corners_3d[..., :, 2] = torch.cat(
(w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1
)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape) + 1, len(input_shape)]
corners_3d = torch.matmul(corners_3d, R.permute(tlist))
corners_3d += torch.unsqueeze(center, -2)
if reshape_final:
corners_3d = corners_3d.reshape(bsize, nprop, 8, 3)
return corners_3d
def get_3d_box_batch(box_size, angle, center):
"""box_size: [x1,x2,...,xn,3]
angle: [x1,x2,...,xn]
center: [x1,x2,...,xn,3]
Return:
[x1,x3,...,xn,8,3]
"""
input_shape = angle.shape
R = roty_batch(angle)
l = np.expand_dims(box_size[..., 0], -1) # [x1,...,xn,1]
w = np.expand_dims(box_size[..., 1], -1)
h = np.expand_dims(box_size[..., 2], -1)
corners_3d = np.zeros(tuple(list(input_shape) + [8, 3]))
corners_3d[..., :, 0] = np.concatenate(
(l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1
)
corners_3d[..., :, 1] = np.concatenate(
(h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1
)
corners_3d[..., :, 2] = np.concatenate(
(w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1
)
tlist = [i for i in range(len(input_shape))]
tlist += [len(input_shape) + 1, len(input_shape)]
corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist)))
corners_3d += np.expand_dims(center, -2)
return corners_3d
####### GIoU related operations. Differentiable #############
def helper_computeIntersection(
cp1: torch.Tensor, cp2: torch.Tensor, s: torch.Tensor, e: torch.Tensor
):
dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]
dp = [s[0] - e[0], s[1] - e[1]]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
# return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]
return torch.stack([(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3])
def helper_inside(cp1: torch.Tensor, cp2: torch.Tensor, p: torch.Tensor):
ineq = (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])
return ineq.item()
def polygon_clip_unnest(subjectPolygon: torch.Tensor, clipPolygon: torch.Tensor):
"""Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
outputList = [subjectPolygon[x] for x in range(subjectPolygon.shape[0])]
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList.copy()
outputList.clear()
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if helper_inside(cp1, cp2, e):
if not helper_inside(cp1, cp2, s):
outputList.append(helper_computeIntersection(cp1, cp2, s, e))
outputList.append(e)
elif helper_inside(cp1, cp2, s):
outputList.append(helper_computeIntersection(cp1, cp2, s, e))
s = e
cp1 = cp2
if len(outputList) == 0:
# return None
break
return outputList
def box3d_vol_tensor(corners):
EPS = 1e-6
reshape = False
B, K = corners.shape[0], corners.shape[1]
if len(corners.shape) == 4:
# batch x prop x 8 x 3
reshape = True
corners = corners.view(-1, 8, 3)
a = torch.sqrt(
(corners[:, 0, :] - corners[:, 1, :]).pow(2).sum(dim=1).clamp(min=EPS)
)
b = torch.sqrt(
(corners[:, 1, :] - corners[:, 2, :]).pow(2).sum(dim=1).clamp(min=EPS)
)
c = torch.sqrt(
(corners[:, 0, :] - corners[:, 4, :]).pow(2).sum(dim=1).clamp(min=EPS)
)
vols = a * b * c
if reshape:
vols = vols.view(B, K)
return vols
def enclosing_box3d_vol(corners1, corners2):
"""
volume of enclosing axis-aligned box
"""
assert len(corners1.shape) == 4
assert len(corners2.shape) == 4
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners2.shape[2] == 8
assert corners2.shape[3] == 3
EPS = 1e-6
corners1 = corners1.clone()
corners2 = corners2.clone()
# flip Y axis, since it is negative
corners1[:, :, :, 1] *= -1
corners2[:, :, :, 1] *= -1
al_xmin = torch.min(
torch.min(corners1[:, :, :, 0], dim=2).values[:, :, None],
torch.min(corners2[:, :, :, 0], dim=2).values[:, None, :],
)
al_ymin = torch.max(
torch.max(corners1[:, :, :, 1], dim=2).values[:, :, None],
torch.max(corners2[:, :, :, 1], dim=2).values[:, None, :],
)
al_zmin = torch.min(
torch.min(corners1[:, :, :, 2], dim=2).values[:, :, None],
torch.min(corners2[:, :, :, 2], dim=2).values[:, None, :],
)
al_xmax = torch.max(
torch.max(corners1[:, :, :, 0], dim=2).values[:, :, None],
torch.max(corners2[:, :, :, 0], dim=2).values[:, None, :],
)
al_ymax = torch.min(
torch.min(corners1[:, :, :, 1], dim=2).values[:, :, None],
torch.min(corners2[:, :, :, 1], dim=2).values[:, None, :],
)
al_zmax = torch.max(
torch.max(corners1[:, :, :, 2], dim=2).values[:, :, None],
torch.max(corners2[:, :, :, 2], dim=2).values[:, None, :],
)
diff_x = torch.abs(al_xmax - al_xmin)
diff_y = torch.abs(al_ymax - al_ymin)
diff_z = torch.abs(al_zmax - al_zmin)
vol = diff_x * diff_y * diff_z
return vol
def generalized_box3d_iou_tensor(
corners1: torch.Tensor,
corners2: torch.Tensor,
nums_k2: torch.Tensor,
rotated_boxes: bool = True,
return_inter_vols_only: bool = False,
):
"""
Input:
corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y
corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y
Assumes that the box is only rotated along Z direction
Returns:
B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned
"""
assert len(corners1.shape) == 4
assert len(corners2.shape) == 4
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == corners2.shape[2]
assert corners1.shape[3] == corners2.shape[3]
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
# # box height. Y is negative, so max is torch.min
ymax = torch.min(corners1[:, :, 0, 1][:, :, None], corners2[:, :, 0, 1][:, None, :])
ymin = torch.max(corners1[:, :, 4, 1][:, :, None], corners2[:, :, 4, 1][:, None, :])
height = (ymax - ymin).clamp(min=0)
EPS = 1e-8
idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device)
idx2 = torch.tensor([0, 2], dtype=torch.int64, device=corners1.device)
rect1 = corners1[:, :, idx, :]
rect2 = corners2[:, :, idx, :]
rect1 = rect1[:, :, :, idx2]
rect2 = rect2[:, :, :, idx2]
lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, 1][:, None, :, :])
rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, 3][:, None, :, :])
wh = (rb - lt).clamp(min=0)
non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1]
non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2)
if nums_k2 is not None:
for b in range(B):
non_rot_inter_areas[b, :, nums_k2[b] :] = 0
enclosing_vols = enclosing_box3d_vol(corners1, corners2)
# vols of boxes
vols1 = box3d_vol_tensor(corners1).clamp(min=EPS)
vols2 = box3d_vol_tensor(corners2).clamp(min=EPS)
sum_vols = vols1[:, :, None] + vols2[:, None, :]
# filter malformed boxes
good_boxes = (enclosing_vols > 2 * EPS) * (sum_vols > 4 * EPS)
if rotated_boxes:
inter_areas = torch.zeros((B, K1, K2), dtype=torch.float32)
rect1 = rect1.cpu()
rect2 = rect2.cpu()
nums_k2_np = to_list_1d(nums_k2)
non_rot_inter_areas_np = to_list_3d(non_rot_inter_areas)
for b in range(B):
for k1 in range(K1):
for k2 in range(K2):
if nums_k2 is not None and k2 >= nums_k2_np[b]:
break
if non_rot_inter_areas_np[b][k1][k2] == 0:
continue
##### compute volume of intersection
inter = polygon_clip_unnest(rect1[b, k1], rect2[b, k2])
if len(inter) > 0:
xs = torch.stack([x[0] for x in inter])
ys = torch.stack([x[1] for x in inter])
inter_areas[b, k1, k2] = torch.abs(
torch.dot(xs, torch.roll(ys, 1))
- torch.dot(ys, torch.roll(xs, 1))
)
inter_areas.mul_(0.5)
else:
inter_areas = non_rot_inter_areas
inter_areas = inter_areas.to(corners1.device)
### gIOU = iou - (1 - sum_vols/enclose_vol)
inter_vols = inter_areas * height
if return_inter_vols_only:
return inter_vols
union_vols = (sum_vols - inter_vols).clamp(min=EPS)
ious = inter_vols / union_vols
giou_second_term = -(1 - union_vols / enclosing_vols)
gious = ious + giou_second_term
gious *= good_boxes
if nums_k2 is not None:
mask = torch.zeros((B, K1, K2), device=height.device, dtype=torch.float32)
for b in range(B):
mask[b, :, : nums_k2[b]] = 1
gious *= mask
return gious
generalized_box3d_iou_tensor_jit = torch.jit.script(generalized_box3d_iou_tensor)
def generalized_box3d_iou_cython(
corners1: torch.Tensor,
corners2: torch.Tensor,
nums_k2: torch.Tensor,
rotated_boxes: bool = True,
return_inter_vols_only: bool = False,
):
"""
Input:
corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y
corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y
Assumes that the box is only rotated along Z direction
Returns:
B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned
"""
assert len(corners1.shape) == 4
assert len(corners2.shape) == 4
assert corners1.shape[2] == 8
assert corners1.shape[3] == 3
assert corners1.shape[0] == corners2.shape[0]
assert corners1.shape[2] == corners2.shape[2]
assert corners1.shape[3] == corners2.shape[3]
B, K1 = corners1.shape[0], corners1.shape[1]
_, K2 = corners2.shape[0], corners2.shape[1]
# # box height. Y is negative, so max is torch.min
ymax = torch.min(corners1[:, :, 0, 1][:, :, None], corners2[:, :, 0, 1][:, None, :])
ymin = torch.max(corners1[:, :, 4, 1][:, :, None], corners2[:, :, 4, 1][:, None, :])
height = (ymax - ymin).clamp(min=0)
EPS = 1e-8
idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device)
idx2 = torch.tensor([0, 2], dtype=torch.int64, device=corners1.device)
rect1 = corners1[:, :, idx, :]
rect2 = corners2[:, :, idx, :]
rect1 = rect1[:, :, :, idx2]
rect2 = rect2[:, :, :, idx2]
lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, 1][:, None, :, :])
rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, 3][:, None, :, :])
wh = (rb - lt).clamp(min=0)
non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1]
non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2)
if nums_k2 is not None:
for b in range(B):
non_rot_inter_areas[b, :, nums_k2[b] :] = 0
enclosing_vols = enclosing_box3d_vol(corners1, corners2)
# vols of boxes
vols1 = box3d_vol_tensor(corners1).clamp(min=EPS)
vols2 = box3d_vol_tensor(corners2).clamp(min=EPS)
sum_vols = vols1[:, :, None] + vols2[:, None, :]
# filter malformed boxes
good_boxes = (enclosing_vols > 2 * EPS) * (sum_vols > 4 * EPS)
if rotated_boxes:
inter_areas = np.zeros((B, K1, K2), dtype=np.float32)
rect1 = rect1.cpu().numpy().astype(np.float32)
rect2 = rect2.cpu().numpy().astype(np.float32)
nums_k2_np = nums_k2.cpu().detach().numpy().astype(np.int32)
non_rot_inter_areas_np = (
non_rot_inter_areas.cpu().detach().numpy().astype(np.float32)
)
box_intersection(
rect1, rect2, non_rot_inter_areas_np, nums_k2_np, inter_areas, True
)
inter_areas = torch.from_numpy(inter_areas)
else:
inter_areas = non_rot_inter_areas
inter_areas = inter_areas.to(corners1.device)
### gIOU = iou - (1 - sum_vols/enclose_vol)
inter_vols = inter_areas * height
if return_inter_vols_only:
return inter_vols
union_vols = (sum_vols - inter_vols).clamp(min=EPS)
ious = inter_vols / union_vols
giou_second_term = -(1 - union_vols / enclosing_vols)
gious = ious + giou_second_term
gious *= good_boxes
if nums_k2 is not None:
mask = torch.zeros((B, K1, K2), device=height.device, dtype=torch.float32)
for b in range(B):
mask[b, :, : nums_k2[b]] = 1
gious *= mask
return gious
def generalized_box3d_iou(
corners1: torch.Tensor,
corners2: torch.Tensor,
nums_k2: torch.Tensor,
rotated_boxes: bool = True,
return_inter_vols_only: bool = False,
needs_grad: bool = False,
):
if needs_grad is True or box_intersection is None:
context = torch.enable_grad if needs_grad else torch.no_grad
with context():
return generalized_box3d_iou_tensor_jit(
corners1, corners2, nums_k2, rotated_boxes, return_inter_vols_only
)
else:
# Cythonized implementation of GIoU
with torch.no_grad():
return generalized_box3d_iou_cython(
corners1, corners2, nums_k2, rotated_boxes, return_inter_vols_only
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Helper functions and class to calculate Average Precisions for 3D object detection.
"""
import logging
import os
import sys
from collections import OrderedDict
import numpy as np
import scipy.special as scipy_special
import torch
from utils.box_util import (extract_pc_in_box3d, flip_axis_to_camera_np,
get_3d_box, get_3d_box_batch)
from utils.eval_det import eval_det_multiprocessing, get_iou_obb
from utils.nms import nms_2d_faster, nms_3d_faster, nms_3d_faster_samecls
def flip_axis_to_depth(pc):
pc2 = np.copy(pc)
pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # depth X,Y,Z = cam X,Z,-Y
pc2[..., 2] *= -1
return pc2
def softmax(x):
"""Numpy function for softmax"""
shape = x.shape
probs = np.exp(x - np.max(x, axis=len(shape) - 1, keepdims=True))
probs /= np.sum(probs, axis=len(shape) - 1, keepdims=True)
return probs
# This is exactly the same as VoteNet so that we can compare evaluations.
def parse_predictions(
predicted_boxes, sem_cls_probs, objectness_probs, point_cloud, config_dict
):
"""Parse predictions to OBB parameters and suppress overlapping boxes
Args:
end_points: dict
{point_clouds, center, heading_scores, heading_residuals,
size_scores, size_residuals, sem_cls_scores}
config_dict: dict
{dataset_config, remove_empty_box, use_3d_nms, nms_iou,
use_old_type_nms, conf_thresh, per_class_proposal}
Returns:
batch_pred_map_cls: a list of len == batch size (BS)
[pred_list_i], i = 0, 1, ..., BS-1
where pred_list_i = [(pred_sem_cls, box_params, box_score)_j]
where j = 0, ..., num of valid detections - 1 from sample input i
"""
sem_cls_probs = sem_cls_probs.detach().cpu().numpy() # B,num_proposal,10
pred_sem_cls_prob = np.max(sem_cls_probs, -1) # B,num_proposal
pred_sem_cls = np.argmax(sem_cls_probs, -1)
obj_prob = objectness_probs.detach().cpu().numpy()
pred_corners_3d_upright_camera = predicted_boxes.detach().cpu().numpy()
K = pred_corners_3d_upright_camera.shape[1] # K==num_proposal
bsize = pred_corners_3d_upright_camera.shape[0]
nonempty_box_mask = np.ones((bsize, K))
if config_dict["remove_empty_box"]:
# -------------------------------------
# Remove predicted boxes without any point within them..
batch_pc = point_cloud.cpu().numpy()[:, :, 0:3] # B,N,3
for i in range(bsize):
pc = batch_pc[i, :, :] # (N,3)
for j in range(K):
box3d = pred_corners_3d_upright_camera[i, j, :, :] # (8,3)
box3d = flip_axis_to_depth(box3d)
pc_in_box, inds = extract_pc_in_box3d(pc, box3d)
if len(pc_in_box) < 5:
nonempty_box_mask[i, j] = 0
if nonempty_box_mask[i].sum() == 0:
nonempty_box_mask[i, obj_prob[i].argmax()] = 1
# -------------------------------------
if "no_nms" in config_dict and config_dict["no_nms"]:
# pred_mask = np.ones((bsize, K))
pred_mask = nonempty_box_mask
elif not config_dict["use_3d_nms"]:
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_2d_with_prob = np.zeros((K, 5))
for j in range(K):
boxes_2d_with_prob[j, 0] = np.min(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_2d_with_prob[j, 2] = np.max(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_2d_with_prob[j, 1] = np.min(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_2d_with_prob[j, 3] = np.max(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_2d_with_prob[j, 4] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
assert len(nonempty_box_inds) > 0
pick = nms_2d_faster(
boxes_2d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict["nms_iou"],
config_dict["use_old_type_nms"],
)
assert len(pick) > 0
pred_mask[i, nonempty_box_inds[pick]] = 1
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict["use_3d_nms"] and (not config_dict["cls_nms"]):
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 7))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 1] = np.min(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 2] = np.min(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 3] = np.max(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 4] = np.max(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 5] = np.max(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
assert len(nonempty_box_inds) > 0
pick = nms_3d_faster(
boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict["nms_iou"],
config_dict["use_old_type_nms"],
)
assert len(pick) > 0
pred_mask[i, nonempty_box_inds[pick]] = 1
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict["use_3d_nms"] and config_dict["cls_nms"]:
# ---------- NMS input: pred_with_prob in (B,K,8) -----------
pred_mask = np.zeros((bsize, K))
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 8))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 1] = np.min(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 2] = np.min(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 3] = np.max(
pred_corners_3d_upright_camera[i, j, :, 0]
)
boxes_3d_with_prob[j, 4] = np.max(
pred_corners_3d_upright_camera[i, j, :, 1]
)
boxes_3d_with_prob[j, 5] = np.max(
pred_corners_3d_upright_camera[i, j, :, 2]
)
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
boxes_3d_with_prob[j, 7] = pred_sem_cls[
i, j
] # only suppress if the two boxes are of the same class!!
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
assert len(nonempty_box_inds) > 0
pick = nms_3d_faster_samecls(
boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict["nms_iou"],
config_dict["use_old_type_nms"],
)
assert len(pick) > 0
pred_mask[i, nonempty_box_inds[pick]] = 1
# ---------- NMS output: pred_mask in (B,K) -----------
batch_pred_map_cls = (
[]
) # a list (len: batch_size) of list (len: num of predictions per sample) of tuples of pred_cls, pred_box and conf (0-1)
for i in range(bsize):
if config_dict["per_class_proposal"]:
assert config_dict["use_cls_confidence_only"] is False
cur_list = []
for ii in range(config_dict["dataset_config"].num_semcls):
cur_list += [
(
ii,
pred_corners_3d_upright_camera[i, j],
sem_cls_probs[i, j, ii] * obj_prob[i, j],
)
for j in range(pred_corners_3d_upright_camera.shape[1])
if pred_mask[i, j] == 1
and obj_prob[i, j] > config_dict["conf_thresh"]
]
batch_pred_map_cls.append(cur_list)
elif config_dict["use_cls_confidence_only"]:
batch_pred_map_cls.append(
[
(
pred_sem_cls[i, j].item(),
pred_corners_3d_upright_camera[i, j],
sem_cls_probs[i, j, pred_sem_cls[i, j].item()],
)
for j in range(pred_corners_3d_upright_camera.shape[1])
if pred_mask[i, j] == 1
and obj_prob[i, j] > config_dict["conf_thresh"]
]
)
else:
batch_pred_map_cls.append(
[
(
pred_sem_cls[i, j].item(),
pred_corners_3d_upright_camera[i, j],
obj_prob[i, j],
)
for j in range(pred_corners_3d_upright_camera.shape[1])
if pred_mask[i, j] == 1
and obj_prob[i, j] > config_dict["conf_thresh"]
]
)
return batch_pred_map_cls
def get_ap_config_dict(
remove_empty_box=True,
use_3d_nms=True,
nms_iou=0.25,
use_old_type_nms=False,
cls_nms=True,
per_class_proposal=True,
use_cls_confidence_only=False,
conf_thresh=0.05,
no_nms=False,
dataset_config=None,
):
"""
Default mAP evaluation settings for VoteNet
"""
config_dict = {
"remove_empty_box": remove_empty_box,
"use_3d_nms": use_3d_nms,
"nms_iou": nms_iou,
"use_old_type_nms": use_old_type_nms,
"cls_nms": cls_nms,
"per_class_proposal": per_class_proposal,
"use_cls_confidence_only": use_cls_confidence_only,
"conf_thresh": conf_thresh,
"no_nms": no_nms,
"dataset_config": dataset_config,
}
return config_dict
class APCalculator(object):
"""Calculating Average Precision"""
def __init__(
self,
dataset_config,
ap_iou_thresh=[0.25, 0.5],
class2type_map=None,
exact_eval=True,
ap_config_dict=None,
):
"""
Args:
ap_iou_thresh: List of float between 0 and 1.0
IoU threshold to judge whether a prediction is positive.
class2type_map: [optional] dict {class_int:class_name}
"""
self.ap_iou_thresh = ap_iou_thresh
if ap_config_dict is None:
ap_config_dict = get_ap_config_dict(
dataset_config=dataset_config, remove_empty_box=exact_eval
)
self.ap_config_dict = ap_config_dict
self.class2type_map = class2type_map
self.reset()
def make_gt_list(self, gt_box_corners, gt_box_sem_cls_labels, gt_box_present):
batch_gt_map_cls = []
bsize = gt_box_corners.shape[0]
for i in range(bsize):
batch_gt_map_cls.append(
[
(gt_box_sem_cls_labels[i, j].item(), gt_box_corners[i, j])
for j in range(gt_box_corners.shape[1])
if gt_box_present[i, j] == 1
]
)
return batch_gt_map_cls
def step_meter(self, outputs, targets):
if "outputs" in outputs:
outputs = outputs["outputs"]
self.step(
predicted_box_corners=outputs["box_corners"],
sem_cls_probs=outputs["sem_cls_prob"],
objectness_probs=outputs["objectness_prob"],
point_cloud=targets["point_clouds"],
gt_box_corners=targets["gt_box_corners"],
gt_box_sem_cls_labels=targets["gt_box_sem_cls_label"],
gt_box_present=targets["gt_box_present"],
)
def step(
self,
predicted_box_corners,
sem_cls_probs,
objectness_probs,
point_cloud,
gt_box_corners,
gt_box_sem_cls_labels,
gt_box_present,
):
"""
Perform NMS on predicted boxes and threshold them according to score.
Convert GT boxes
"""
gt_box_corners = gt_box_corners.cpu().detach().numpy()
gt_box_sem_cls_labels = gt_box_sem_cls_labels.cpu().detach().numpy()
gt_box_present = gt_box_present.cpu().detach().numpy()
batch_gt_map_cls = self.make_gt_list(
gt_box_corners, gt_box_sem_cls_labels, gt_box_present
)
batch_pred_map_cls = parse_predictions(
predicted_box_corners,
sem_cls_probs,
objectness_probs,
point_cloud,
self.ap_config_dict,
)
self.accumulate(batch_pred_map_cls, batch_gt_map_cls)
def accumulate(self, batch_pred_map_cls, batch_gt_map_cls):
"""Accumulate one batch of prediction and groundtruth.
Args:
batch_pred_map_cls: a list of lists [[(pred_cls, pred_box_params, score),...],...]
batch_gt_map_cls: a list of lists [[(gt_cls, gt_box_params),...],...]
should have the same length with batch_pred_map_cls (batch_size)
"""
bsize = len(batch_pred_map_cls)
assert bsize == len(batch_gt_map_cls)
for i in range(bsize):
self.gt_map_cls[self.scan_cnt] = batch_gt_map_cls[i]
self.pred_map_cls[self.scan_cnt] = batch_pred_map_cls[i]
self.scan_cnt += 1
def compute_metrics(self):
"""Use accumulated predictions and groundtruths to compute Average Precision."""
overall_ret = OrderedDict()
for ap_iou_thresh in self.ap_iou_thresh:
ret_dict = OrderedDict()
rec, prec, ap = eval_det_multiprocessing(
self.pred_map_cls, self.gt_map_cls, ovthresh=ap_iou_thresh
)
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
ret_dict["%s Average Precision" % (clsname)] = ap[key]
ap_vals = np.array(list(ap.values()), dtype=np.float32)
ap_vals[np.isnan(ap_vals)] = 0
ret_dict["mAP"] = ap_vals.mean()
rec_list = []
for key in sorted(ap.keys()):
clsname = self.class2type_map[key] if self.class2type_map else str(key)
try:
ret_dict["%s Recall" % (clsname)] = rec[key][-1]
rec_list.append(rec[key][-1])
except:
ret_dict["%s Recall" % (clsname)] = 0
rec_list.append(0)
ret_dict["AR"] = np.mean(rec_list)
overall_ret[ap_iou_thresh] = ret_dict
return overall_ret
def __str__(self):
overall_ret = self.compute_metrics()
return self.metrics_to_str(overall_ret)
def metrics_to_str(self, overall_ret, per_class=True):
mAP_strs = []
AR_strs = []
per_class_metrics = []
for ap_iou_thresh in self.ap_iou_thresh:
mAP = overall_ret[ap_iou_thresh]["mAP"] * 100
mAP_strs.append(f"{mAP:.2f}")
ar = overall_ret[ap_iou_thresh]["AR"] * 100
AR_strs.append(f"{ar:.2f}")
if per_class:
# per-class metrics
per_class_metrics.append("-" * 5)
per_class_metrics.append(f"IOU Thresh={ap_iou_thresh}")
for x in list(overall_ret[ap_iou_thresh].keys()):
if x == "mAP" or x == "AR":
pass
else:
met_str = f"{x}: {overall_ret[ap_iou_thresh][x]*100:.2f}"
per_class_metrics.append(met_str)
ap_header = [f"mAP{x:.2f}" for x in self.ap_iou_thresh]
ap_str = ", ".join(ap_header)
ap_str += ": " + ", ".join(mAP_strs)
ap_str += "\n"
ar_header = [f"AR{x:.2f}" for x in self.ap_iou_thresh]
ap_str += ", ".join(ar_header)
ap_str += ": " + ", ".join(AR_strs)
if per_class:
per_class_metrics = "\n".join(per_class_metrics)
ap_str += "\n"
ap_str += per_class_metrics
return ap_str
def metrics_to_dict(self, overall_ret):
metrics_dict = {}
for ap_iou_thresh in self.ap_iou_thresh:
metrics_dict[f"mAP_{ap_iou_thresh}"] = (
overall_ret[ap_iou_thresh]["mAP"] * 100
)
metrics_dict[f"AR_{ap_iou_thresh}"] = overall_ret[ap_iou_thresh]["AR"] * 100
return metrics_dict
def reset(self):
self.gt_map_cls = {} # {scan_id: [(classname, bbox)]}
self.pred_map_cls = {} # {scan_id: [(classname, bbox, score)]}
self.scan_cnt = 0
|
# Copyright (c) Facebook, Inc. and its affiliates.
import pickle
import torch
import torch.distributed as dist
def is_distributed():
if not dist.is_available() or not dist.is_initialized():
return False
return True
def get_rank():
if not is_distributed():
return 0
return dist.get_rank()
def is_primary():
return get_rank() == 0
def get_world_size():
if not is_distributed():
return 1
return dist.get_world_size()
def barrier():
if not is_distributed():
return
torch.distributed.barrier()
def setup_print_for_distributed(is_primary):
"""
This function disables printing when not in primary process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_primary or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed(gpu_id, global_rank, world_size, dist_url, dist_backend):
torch.cuda.set_device(gpu_id)
print(
f"| distributed init (rank {global_rank}) (world {world_size}): {dist_url}",
flush=True,
)
torch.distributed.init_process_group(
backend=dist_backend,
init_method=dist_url,
world_size=world_size,
rank=global_rank,
)
torch.distributed.barrier()
setup_print_for_distributed(is_primary())
def all_reduce_sum(tensor):
if not is_distributed():
return tensor
dim_squeeze = False
if tensor.ndim == 0:
tensor = tensor[None, ...]
dim_squeeze = True
torch.distributed.all_reduce(tensor)
if dim_squeeze:
tensor = tensor.squeeze(0)
return tensor
def all_reduce_average(tensor):
val = all_reduce_sum(tensor)
return val / get_world_size()
# Function from DETR - https://github.com/facebookresearch/detr/blob/master/util/misc.py
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
torch.distributed.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
# Function from https://github.com/facebookresearch/detr/blob/master/util/misc.py
def all_gather_pickle(data, device):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device)
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device=device)
size_list = [torch.tensor([0], device=device) for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device))
if local_size != max_size:
padding = torch.empty(
size=(max_size - local_size,), dtype=torch.uint8, device=device
)
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def all_gather_dict(data):
"""
Run all_gather on data which is a dictionary of Tensors
"""
assert isinstance(data, dict)
gathered_dict = {}
for item_key in data:
if isinstance(data[item_key], torch.Tensor):
if is_distributed():
data[item_key] = data[item_key].contiguous()
tensor_list = [torch.empty_like(data[item_key]) for _ in range(get_world_size())]
dist.all_gather(tensor_list, data[item_key])
gathered_tensor = torch.cat(tensor_list, dim=0)
else:
gathered_tensor = data[item_key]
gathered_dict[item_key] = gathered_tensor
return gathered_dict
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from third_party.pointnet2.pointnet2_modules import PointnetSAModuleVotes
from third_party.pointnet2.pointnet2_utils import furthest_point_sample
from utils.pc_util import scale_points, shift_scale_points
from models.helpers import GenericMLP
from models.position_embedding import PositionEmbeddingCoordsSine
from models.transformer import (MaskedTransformerEncoder, TransformerDecoder,
TransformerDecoderLayer, TransformerEncoder,
TransformerEncoderLayer)
class BoxProcessor(object):
"""
Class to convert 3DETR MLP head outputs into bounding boxes
"""
def __init__(self, dataset_config):
self.dataset_config = dataset_config
def compute_predicted_center(self, center_offset, query_xyz, point_cloud_dims):
center_unnormalized = query_xyz + center_offset
center_normalized = shift_scale_points(
center_unnormalized, src_range=point_cloud_dims
)
return center_normalized, center_unnormalized
def compute_predicted_size(self, size_normalized, point_cloud_dims):
scene_scale = point_cloud_dims[1] - point_cloud_dims[0]
scene_scale = torch.clamp(scene_scale, min=1e-1)
size_unnormalized = scale_points(size_normalized, mult_factor=scene_scale)
return size_unnormalized
def compute_predicted_angle(self, angle_logits, angle_residual):
if angle_logits.shape[-1] == 1:
# special case for datasets with no rotation angle
# we still use the predictions so that model outputs are used
# in the backwards pass (DDP may complain otherwise)
angle = angle_logits * 0 + angle_residual * 0
angle = angle.squeeze(-1).clamp(min=0)
else:
angle_per_cls = 2 * np.pi / self.dataset_config.num_angle_bin
pred_angle_class = angle_logits.argmax(dim=-1).detach()
angle_center = angle_per_cls * pred_angle_class
angle = angle_center + angle_residual.gather(
2, pred_angle_class.unsqueeze(-1)
).squeeze(-1)
mask = angle > np.pi
angle[mask] = angle[mask] - 2 * np.pi
return angle
def compute_objectness_and_cls_prob(self, cls_logits):
assert cls_logits.shape[-1] == self.dataset_config.num_semcls + 1
cls_prob = torch.nn.functional.softmax(cls_logits, dim=-1)
objectness_prob = 1 - cls_prob[..., -1]
return cls_prob[..., :-1], objectness_prob
def box_parametrization_to_corners(
self, box_center_unnorm, box_size_unnorm, box_angle
):
return self.dataset_config.box_parametrization_to_corners(
box_center_unnorm, box_size_unnorm, box_angle
)
class Model3DETR(nn.Module):
"""
Main 3DETR model. Consists of the following learnable sub-models
- pre_encoder: takes raw point cloud, subsamples it and projects into "D" dimensions
Input is a Nx3 matrix of N point coordinates
Output is a N'xD matrix of N' point features
- encoder: series of self-attention blocks to extract point features
Input is a N'xD matrix of N' point features
Output is a N''xD matrix of N'' point features.
N'' = N' for regular encoder; N'' = N'//2 for masked encoder
- query computation: samples a set of B coordinates from the N'' points
and outputs a BxD matrix of query features.
- decoder: series of self-attention and cross-attention blocks to produce BxD box features
Takes N''xD features from the encoder and BxD query features.
- mlp_heads: Predicts bounding box parameters and classes from the BxD box features
"""
def __init__(
self,
pre_encoder,
encoder,
decoder,
dataset_config,
encoder_dim=256,
decoder_dim=256,
position_embedding="fourier",
mlp_dropout=0.3,
num_queries=256,
):
super().__init__()
self.pre_encoder = pre_encoder
self.encoder = encoder
if hasattr(self.encoder, "masking_radius"):
hidden_dims = [encoder_dim]
else:
hidden_dims = [encoder_dim, encoder_dim]
self.encoder_to_decoder_projection = GenericMLP(
input_dim=encoder_dim,
hidden_dims=hidden_dims,
output_dim=decoder_dim,
norm_fn_name="bn1d",
activation="relu",
use_conv=True,
output_use_activation=True,
output_use_norm=True,
output_use_bias=False,
)
self.pos_embedding = PositionEmbeddingCoordsSine(
d_pos=decoder_dim, pos_type=position_embedding, normalize=True
)
self.query_projection = GenericMLP(
input_dim=decoder_dim,
hidden_dims=[decoder_dim],
output_dim=decoder_dim,
use_conv=True,
output_use_activation=True,
hidden_use_bias=True,
)
self.decoder = decoder
self.build_mlp_heads(dataset_config, decoder_dim, mlp_dropout)
self.num_queries = num_queries
self.box_processor = BoxProcessor(dataset_config)
def build_mlp_heads(self, dataset_config, decoder_dim, mlp_dropout):
mlp_func = partial(
GenericMLP,
norm_fn_name="bn1d",
activation="relu",
use_conv=True,
hidden_dims=[decoder_dim, decoder_dim],
dropout=mlp_dropout,
input_dim=decoder_dim,
)
# Semantic class of the box
# add 1 for background/not-an-object class
semcls_head = mlp_func(output_dim=dataset_config.num_semcls + 1)
# geometry of the box
center_head = mlp_func(output_dim=3)
size_head = mlp_func(output_dim=3)
angle_cls_head = mlp_func(output_dim=dataset_config.num_angle_bin)
angle_reg_head = mlp_func(output_dim=dataset_config.num_angle_bin)
mlp_heads = [
("sem_cls_head", semcls_head),
("center_head", center_head),
("size_head", size_head),
("angle_cls_head", angle_cls_head),
("angle_residual_head", angle_reg_head),
]
self.mlp_heads = nn.ModuleDict(mlp_heads)
def get_query_embeddings(self, encoder_xyz, point_cloud_dims):
query_inds = furthest_point_sample(encoder_xyz, self.num_queries)
query_inds = query_inds.long()
query_xyz = [torch.gather(encoder_xyz[..., x], 1, query_inds) for x in range(3)]
query_xyz = torch.stack(query_xyz)
query_xyz = query_xyz.permute(1, 2, 0)
# Gater op above can be replaced by the three lines below from the pointnet2 codebase
# xyz_flipped = encoder_xyz.transpose(1, 2).contiguous()
# query_xyz = gather_operation(xyz_flipped, query_inds.int())
# query_xyz = query_xyz.transpose(1, 2)
pos_embed = self.pos_embedding(query_xyz, input_range=point_cloud_dims)
query_embed = self.query_projection(pos_embed)
return query_xyz, query_embed
def _break_up_pc(self, pc):
# pc may contain color/normals.
xyz = pc[..., 0:3].contiguous()
features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None
return xyz, features
def run_encoder(self, point_clouds):
xyz, features = self._break_up_pc(point_clouds)
pre_enc_xyz, pre_enc_features, pre_enc_inds = self.pre_encoder(xyz, features)
# xyz: batch x npoints x 3
# features: batch x channel x npoints
# inds: batch x npoints
# nn.MultiHeadAttention in encoder expects npoints x batch x channel features
pre_enc_features = pre_enc_features.permute(2, 0, 1)
# xyz points are in batch x npointx channel order
enc_xyz, enc_features, enc_inds = self.encoder(
pre_enc_features, xyz=pre_enc_xyz
)
if enc_inds is None:
# encoder does not perform any downsampling
enc_inds = pre_enc_inds
else:
# use gather here to ensure that it works for both FPS and random sampling
enc_inds = torch.gather(pre_enc_inds, 1, enc_inds.type(torch.int64))
return enc_xyz, enc_features, enc_inds
def get_box_predictions(self, query_xyz, point_cloud_dims, box_features):
"""
Parameters:
query_xyz: batch x nqueries x 3 tensor of query XYZ coords
point_cloud_dims: List of [min, max] dims of point cloud
min: batch x 3 tensor of min XYZ coords
max: batch x 3 tensor of max XYZ coords
box_features: num_layers x num_queries x batch x channel
"""
# box_features change to (num_layers x batch) x channel x num_queries
box_features = box_features.permute(0, 2, 3, 1)
num_layers, batch, channel, num_queries = (
box_features.shape[0],
box_features.shape[1],
box_features.shape[2],
box_features.shape[3],
)
box_features = box_features.reshape(num_layers * batch, channel, num_queries)
# mlp head outputs are (num_layers x batch) x noutput x nqueries, so transpose last two dims
cls_logits = self.mlp_heads["sem_cls_head"](box_features).transpose(1, 2)
center_offset = (
self.mlp_heads["center_head"](box_features).sigmoid().transpose(1, 2) - 0.5
)
size_normalized = (
self.mlp_heads["size_head"](box_features).sigmoid().transpose(1, 2)
)
angle_logits = self.mlp_heads["angle_cls_head"](box_features).transpose(1, 2)
angle_residual_normalized = self.mlp_heads["angle_residual_head"](
box_features
).transpose(1, 2)
# reshape outputs to num_layers x batch x nqueries x noutput
cls_logits = cls_logits.reshape(num_layers, batch, num_queries, -1)
center_offset = center_offset.reshape(num_layers, batch, num_queries, -1)
size_normalized = size_normalized.reshape(num_layers, batch, num_queries, -1)
angle_logits = angle_logits.reshape(num_layers, batch, num_queries, -1)
angle_residual_normalized = angle_residual_normalized.reshape(
num_layers, batch, num_queries, -1
)
angle_residual = angle_residual_normalized * (
np.pi / angle_residual_normalized.shape[-1]
)
outputs = []
for l in range(num_layers):
# box processor converts outputs so we can get a 3D bounding box
(
center_normalized,
center_unnormalized,
) = self.box_processor.compute_predicted_center(
center_offset[l], query_xyz, point_cloud_dims
)
angle_continuous = self.box_processor.compute_predicted_angle(
angle_logits[l], angle_residual[l]
)
size_unnormalized = self.box_processor.compute_predicted_size(
size_normalized[l], point_cloud_dims
)
box_corners = self.box_processor.box_parametrization_to_corners(
center_unnormalized, size_unnormalized, angle_continuous
)
# below are not used in computing loss (only for matching/mAP eval)
# we compute them with no_grad() so that distributed training does not complain about unused variables
with torch.no_grad():
(
semcls_prob,
objectness_prob,
) = self.box_processor.compute_objectness_and_cls_prob(cls_logits[l])
box_prediction = {
"sem_cls_logits": cls_logits[l],
"center_normalized": center_normalized.contiguous(),
"center_unnormalized": center_unnormalized,
"size_normalized": size_normalized[l],
"size_unnormalized": size_unnormalized,
"angle_logits": angle_logits[l],
"angle_residual": angle_residual[l],
"angle_residual_normalized": angle_residual_normalized[l],
"angle_continuous": angle_continuous,
"objectness_prob": objectness_prob,
"sem_cls_prob": semcls_prob,
"box_corners": box_corners,
}
outputs.append(box_prediction)
# intermediate decoder layer outputs are only used during training
aux_outputs = outputs[:-1]
outputs = outputs[-1]
return {
"outputs": outputs, # output from last layer of decoder
"aux_outputs": aux_outputs, # output from intermediate layers of decoder
}
def forward(self, inputs, encoder_only=False):
point_clouds = inputs["point_clouds"]
enc_xyz, enc_features, enc_inds = self.run_encoder(point_clouds)
enc_features = self.encoder_to_decoder_projection(
enc_features.permute(1, 2, 0)
).permute(2, 0, 1)
# encoder features: npoints x batch x channel
# encoder xyz: npoints x batch x 3
if encoder_only:
# return: batch x npoints x channels
return enc_xyz, enc_features.transpose(0, 1)
point_cloud_dims = [
inputs["point_cloud_dims_min"],
inputs["point_cloud_dims_max"],
]
query_xyz, query_embed = self.get_query_embeddings(enc_xyz, point_cloud_dims)
# query_embed: batch x channel x npoint
enc_pos = self.pos_embedding(enc_xyz, input_range=point_cloud_dims)
# decoder expects: npoints x batch x channel
enc_pos = enc_pos.permute(2, 0, 1)
query_embed = query_embed.permute(2, 0, 1)
tgt = torch.zeros_like(query_embed)
box_features = self.decoder(
tgt, enc_features, query_pos=query_embed, pos=enc_pos
)[0]
box_predictions = self.get_box_predictions(
query_xyz, point_cloud_dims, box_features
)
return box_predictions
def build_preencoder(args):
mlp_dims = [3 * int(args.use_color), 64, 128, args.enc_dim]
preencoder = PointnetSAModuleVotes(
radius=0.2,
nsample=64,
npoint=args.preenc_npoints,
mlp=mlp_dims,
normalize_xyz=True,
)
return preencoder
def build_encoder(args):
if args.enc_type == "vanilla":
encoder_layer = TransformerEncoderLayer(
d_model=args.enc_dim,
nhead=args.enc_nhead,
dim_feedforward=args.enc_ffn_dim,
dropout=args.enc_dropout,
activation=args.enc_activation,
)
encoder = TransformerEncoder(
encoder_layer=encoder_layer, num_layers=args.enc_nlayers
)
elif args.enc_type in ["masked"]:
encoder_layer = TransformerEncoderLayer(
d_model=args.enc_dim,
nhead=args.enc_nhead,
dim_feedforward=args.enc_ffn_dim,
dropout=args.enc_dropout,
activation=args.enc_activation,
)
interim_downsampling = PointnetSAModuleVotes(
radius=0.4,
nsample=32,
npoint=args.preenc_npoints // 2,
mlp=[args.enc_dim, 256, 256, args.enc_dim],
normalize_xyz=True,
)
masking_radius = [math.pow(x, 2) for x in [0.4, 0.8, 1.2]]
encoder = MaskedTransformerEncoder(
encoder_layer=encoder_layer,
num_layers=3,
interim_downsampling=interim_downsampling,
masking_radius=masking_radius,
)
else:
raise ValueError(f"Unknown encoder type {args.enc_type}")
return encoder
def build_decoder(args):
decoder_layer = TransformerDecoderLayer(
d_model=args.dec_dim,
nhead=args.dec_nhead,
dim_feedforward=args.dec_ffn_dim,
dropout=args.dec_dropout,
)
decoder = TransformerDecoder(
decoder_layer, num_layers=args.dec_nlayers, return_intermediate=True
)
return decoder
def build_3detr(args, dataset_config):
pre_encoder = build_preencoder(args)
encoder = build_encoder(args)
decoder = build_decoder(args)
model = Model3DETR(
pre_encoder,
encoder,
decoder,
dataset_config,
encoder_dim=args.enc_dim,
decoder_dim=args.dec_dim,
mlp_dropout=args.mlp_dropout,
num_queries=args.nqueries,
)
output_processor = BoxProcessor(dataset_config)
return model, output_processor
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .model_3detr import build_3detr
MODEL_FUNCS = {
"3detr": build_3detr,
}
def build_model(args, dataset_config):
model, processor = MODEL_FUNCS[args.model_name](args, dataset_config)
return model, processor |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modified from DETR Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
from typing import Optional
import torch
from torch import Tensor, nn
from models.helpers import (ACTIVATION_DICT, NORM_DICT, WEIGHT_INIT_DICT,
get_clones)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers,
norm=None, weight_init_name="xavier_uniform"):
super().__init__()
self.layers = get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self._reset_parameters(weight_init_name)
def _reset_parameters(self, weight_init_name):
func = WEIGHT_INIT_DICT[weight_init_name]
for p in self.parameters():
if p.dim() > 1:
func(p)
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
xyz: Optional [Tensor] = None,
transpose_swap: Optional[bool] = False,
):
if transpose_swap:
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
if pos is not None:
pos = pos.flatten(2).permute(2, 0, 1)
output = src
orig_mask = mask
if orig_mask is not None and isinstance(orig_mask, list):
assert len(orig_mask) == len(self.layers)
elif orig_mask is not None:
orig_mask = [mask for _ in range(len(self.layers))]
for idx, layer in enumerate(self.layers):
if orig_mask is not None:
mask = orig_mask[idx]
# mask must be tiled to num_heads of the transformer
bsz, n, n = mask.shape
nhead = layer.nhead
mask = mask.unsqueeze(1)
mask = mask.repeat(1, nhead, 1, 1)
mask = mask.view(bsz * nhead, n, n)
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
if transpose_swap:
output = output.permute(1, 2, 0).view(bs, c, h, w).contiguous()
xyz_inds = None
return xyz, output, xyz_inds
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm_fn_name="ln",
return_intermediate=False,
weight_init_name="xavier_uniform"):
super().__init__()
self.layers = get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = None
if norm_fn_name is not None:
self.norm = NORM_DICT[norm_fn_name](self.layers[0].linear2.out_features)
self.return_intermediate = return_intermediate
self._reset_parameters(weight_init_name)
def _reset_parameters(self, weight_init_name):
func = WEIGHT_INIT_DICT[weight_init_name]
for p in self.parameters():
if p.dim() > 1:
func(p)
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
transpose_swap: Optional [bool] = False,
return_attn_weights: Optional [bool] = False,
):
if transpose_swap:
bs, c, h, w = memory.shape
memory = memory.flatten(2).permute(2, 0, 1) # memory: bs, c, t -> t, b, c
if pos is not None:
pos = pos.flatten(2).permute(2, 0, 1)
output = tgt
intermediate = []
attns = []
for layer in self.layers:
output, attn = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos,
return_attn_weights=return_attn_weights)
if self.return_intermediate:
intermediate.append(self.norm(output))
if return_attn_weights:
attns.append(attn)
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if return_attn_weights:
attns = torch.stack(attns)
if self.return_intermediate:
return torch.stack(intermediate), attns
return output, attns
class MaskedTransformerEncoder(TransformerEncoder):
def __init__(self, encoder_layer, num_layers, masking_radius, interim_downsampling,
norm=None, weight_init_name="xavier_uniform"):
super().__init__(encoder_layer, num_layers, norm=norm, weight_init_name=weight_init_name)
assert len(masking_radius) == num_layers
self.masking_radius = masking_radius
self.interim_downsampling = interim_downsampling
def compute_mask(self, xyz, radius, dist=None):
with torch.no_grad():
if dist is None or dist.shape[1] != xyz.shape[1]:
dist = torch.cdist(xyz, xyz, p=2)
# entries that are True in the mask do not contribute to self-attention
# so points outside the radius are not considered
mask = dist >= radius
return mask, dist
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
xyz: Optional [Tensor] = None,
transpose_swap: Optional[bool] = False,
):
if transpose_swap:
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
if pos is not None:
pos = pos.flatten(2).permute(2, 0, 1)
output = src
xyz_dist = None
xyz_inds = None
for idx, layer in enumerate(self.layers):
mask = None
if self.masking_radius[idx] > 0:
mask, xyz_dist = self.compute_mask(xyz, self.masking_radius[idx], xyz_dist)
# mask must be tiled to num_heads of the transformer
bsz, n, n = mask.shape
nhead = layer.nhead
mask = mask.unsqueeze(1)
mask = mask.repeat(1, nhead, 1, 1)
mask = mask.view(bsz * nhead, n, n)
output = layer(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
if idx == 0 and self.interim_downsampling:
# output is npoints x batch x channel. make batch x channel x npoints
output = output.permute(1, 2, 0)
xyz, output, xyz_inds = self.interim_downsampling(xyz, output)
# swap back
output = output.permute(2, 0, 1)
if self.norm is not None:
output = self.norm(output)
if transpose_swap:
output = output.permute(1, 2, 0).view(bs, c, h, w).contiguous()
return xyz, output, xyz_inds
def extra_repr(self):
radius_str = ", ".join(["%.2f"%(x) for x in self.masking_radius])
return f"masking_radius={radius_str}"
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead=4, dim_feedforward=128,
dropout=0.1, dropout_attn=None,
activation="relu", normalize_before=True, norm_name="ln",
use_ffn=True,
ffn_use_bias=True):
super().__init__()
if dropout_attn is None:
dropout_attn = dropout
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout_attn)
self.use_ffn = use_ffn
if self.use_ffn:
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward, bias=ffn_use_bias)
self.dropout = nn.Dropout(dropout, inplace=True)
self.linear2 = nn.Linear(dim_feedforward, d_model, bias=ffn_use_bias)
self.norm2 = NORM_DICT[norm_name](d_model)
self.norm2 = NORM_DICT[norm_name](d_model)
self.dropout2 = nn.Dropout(dropout, inplace=True)
self.norm1 = NORM_DICT[norm_name](d_model)
self.dropout1 = nn.Dropout(dropout, inplace=True)
self.activation = ACTIVATION_DICT[activation]()
self.normalize_before = normalize_before
self.nhead = nhead
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
value = src
src2 = self.self_attn(q, k, value=value, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
if self.use_norm_fn_on_input:
src = self.norm1(src)
if self.use_ffn:
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
return_attn_weights: Optional [Tensor] = False):
src2 = self.norm1(src)
value = src2
q = k = self.with_pos_embed(src2, pos)
src2, attn_weights = self.self_attn(q, k, value=value, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.dropout1(src2)
if self.use_ffn:
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
if return_attn_weights:
return src, attn_weights
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
return_attn_weights: Optional [Tensor] = False):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos, return_attn_weights)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
def extra_repr(self):
st = ""
if hasattr(self.self_attn, "dropout"):
st += f"attn_dr={self.self_attn.dropout}"
return st
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead=4, dim_feedforward=256,
dropout=0.1, dropout_attn=None,
activation="relu", normalize_before=True,
norm_fn_name="ln"):
super().__init__()
if dropout_attn is None:
dropout_attn = dropout
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm1 = NORM_DICT[norm_fn_name](d_model)
self.norm2 = NORM_DICT[norm_fn_name](d_model)
self.norm3 = NORM_DICT[norm_fn_name](d_model)
self.dropout1 = nn.Dropout(dropout, inplace=True)
self.dropout2 = nn.Dropout(dropout, inplace=True)
self.dropout3 = nn.Dropout(dropout, inplace=True)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout, inplace=True)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.activation = ACTIVATION_DICT[activation]()
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
return_attn_weights: Optional [bool] = False):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2, attn = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
if return_attn_weights:
return tgt, attn
return tgt, None
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
return_attn_weights: Optional [bool] = False):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2, attn = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
if return_attn_weights:
return tgt, attn
return tgt, None
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
return_attn_weights: Optional [bool] = False):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos, return_attn_weights)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos, return_attn_weights)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
import numpy as np
from utils.pc_util import shift_scale_points
class PositionEmbeddingCoordsSine(nn.Module):
def __init__(
self,
temperature=10000,
normalize=False,
scale=None,
pos_type="fourier",
d_pos=None,
d_in=3,
gauss_scale=1.0,
):
super().__init__()
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
assert pos_type in ["sine", "fourier"]
self.pos_type = pos_type
self.scale = scale
if pos_type == "fourier":
assert d_pos is not None
assert d_pos % 2 == 0
# define a gaussian matrix input_ch -> output_ch
B = torch.empty((d_in, d_pos // 2)).normal_()
B *= gauss_scale
self.register_buffer("gauss_B", B)
self.d_pos = d_pos
def get_sine_embeddings(self, xyz, num_channels, input_range):
# clone coords so that shift/scale operations do not affect original tensor
orig_xyz = xyz
xyz = orig_xyz.clone()
ncoords = xyz.shape[1]
if self.normalize:
xyz = shift_scale_points(xyz, src_range=input_range)
ndim = num_channels // xyz.shape[2]
if ndim % 2 != 0:
ndim -= 1
# automatically handle remainder by assiging it to the first dim
rems = num_channels - (ndim * xyz.shape[2])
assert (
ndim % 2 == 0
), f"Cannot handle odd sized ndim={ndim} where num_channels={num_channels} and xyz={xyz.shape}"
final_embeds = []
prev_dim = 0
for d in range(xyz.shape[2]):
cdim = ndim
if rems > 0:
# add remainder in increments of two to maintain even size
cdim += 2
rems -= 2
if cdim != prev_dim:
dim_t = torch.arange(cdim, dtype=torch.float32, device=xyz.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / cdim)
# create batch x cdim x nccords embedding
raw_pos = xyz[:, :, d]
if self.scale:
raw_pos *= self.scale
pos = raw_pos[:, :, None] / dim_t
pos = torch.stack(
(pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), dim=3
).flatten(2)
final_embeds.append(pos)
prev_dim = cdim
final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1)
return final_embeds
def get_fourier_embeddings(self, xyz, num_channels=None, input_range=None):
# Follows - https://people.eecs.berkeley.edu/~bmild/fourfeat/index.html
if num_channels is None:
num_channels = self.gauss_B.shape[1] * 2
bsize, npoints = xyz.shape[0], xyz.shape[1]
assert num_channels > 0 and num_channels % 2 == 0
d_in, max_d_out = self.gauss_B.shape[0], self.gauss_B.shape[1]
d_out = num_channels // 2
assert d_out <= max_d_out
assert d_in == xyz.shape[-1]
# clone coords so that shift/scale operations do not affect original tensor
orig_xyz = xyz
xyz = orig_xyz.clone()
ncoords = xyz.shape[1]
if self.normalize:
xyz = shift_scale_points(xyz, src_range=input_range)
xyz *= 2 * np.pi
xyz_proj = torch.mm(xyz.view(-1, d_in), self.gauss_B[:, :d_out]).view(
bsize, npoints, d_out
)
final_embeds = [xyz_proj.sin(), xyz_proj.cos()]
# return batch x d_pos x npoints embedding
final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1)
return final_embeds
def forward(self, xyz, num_channels=None, input_range=None):
assert isinstance(xyz, torch.Tensor)
assert xyz.ndim == 3
# xyz is batch x npoints x 3
if self.pos_type == "sine":
with torch.no_grad():
return self.get_sine_embeddings(xyz, num_channels, input_range)
elif self.pos_type == "fourier":
with torch.no_grad():
return self.get_fourier_embeddings(xyz, num_channels, input_range)
else:
raise ValueError(f"Unknown {self.pos_type}")
def extra_repr(self):
st = f"type={self.pos_type}, scale={self.scale}, normalize={self.normalize}"
if hasattr(self, "gauss_B"):
st += (
f", gaussB={self.gauss_B.shape}, gaussBsum={self.gauss_B.sum().item()}"
)
return st
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
from functools import partial
import copy
class BatchNormDim1Swap(nn.BatchNorm1d):
"""
Used for nn.Transformer that uses a HW x N x C rep
"""
def forward(self, x):
"""
x: HW x N x C
permute to N x C x HW
Apply BN on C
permute back
"""
hw, n, c = x.shape
x = x.permute(1, 2, 0)
x = super(BatchNormDim1Swap, self).forward(x)
# x: n x c x hw -> hw x n x c
x = x.permute(2, 0, 1)
return x
NORM_DICT = {
"bn": BatchNormDim1Swap,
"bn1d": nn.BatchNorm1d,
"id": nn.Identity,
"ln": nn.LayerNorm,
}
ACTIVATION_DICT = {
"relu": nn.ReLU,
"gelu": nn.GELU,
"leakyrelu": partial(nn.LeakyReLU, negative_slope=0.1),
}
WEIGHT_INIT_DICT = {
"xavier_uniform": nn.init.xavier_uniform_,
}
class GenericMLP(nn.Module):
def __init__(
self,
input_dim,
hidden_dims,
output_dim,
norm_fn_name=None,
activation="relu",
use_conv=False,
dropout=None,
hidden_use_bias=False,
output_use_bias=True,
output_use_activation=False,
output_use_norm=False,
weight_init_name=None,
):
super().__init__()
activation = ACTIVATION_DICT[activation]
norm = None
if norm_fn_name is not None:
norm = NORM_DICT[norm_fn_name]
if norm_fn_name == "ln" and use_conv:
norm = lambda x: nn.GroupNorm(1, x) # easier way to use LayerNorm
if dropout is not None:
if not isinstance(dropout, list):
dropout = [dropout for _ in range(len(hidden_dims))]
layers = []
prev_dim = input_dim
for idx, x in enumerate(hidden_dims):
if use_conv:
layer = nn.Conv1d(prev_dim, x, 1, bias=hidden_use_bias)
else:
layer = nn.Linear(prev_dim, x, bias=hidden_use_bias)
layers.append(layer)
if norm:
layers.append(norm(x))
layers.append(activation())
if dropout is not None:
layers.append(nn.Dropout(p=dropout[idx]))
prev_dim = x
if use_conv:
layer = nn.Conv1d(prev_dim, output_dim, 1, bias=output_use_bias)
else:
layer = nn.Linear(prev_dim, output_dim, bias=output_use_bias)
layers.append(layer)
if output_use_norm:
layers.append(norm(output_dim))
if output_use_activation:
layers.append(activation())
self.layers = nn.Sequential(*layers)
if weight_init_name is not None:
self.do_weight_init(weight_init_name)
def do_weight_init(self, weight_init_name):
func = WEIGHT_INIT_DICT[weight_init_name]
for (_, param) in self.named_parameters():
if param.dim() > 1: # skips batchnorm/layernorm
func(param)
def forward(self, x):
output = self.layers(x)
return output
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
|
# Copyright (c) Facebook, Inc. and its affiliates.
''' Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch '''
import torch
import torch.nn as nn
from typing import List, Tuple
class SharedMLP(nn.Sequential):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = ""
):
super().__init__()
for i in range(len(args) - 1):
self.add_module(
name + 'layer{}'.format(i),
Conv2d(
args[i],
args[i + 1],
bn=(not first or not preact or (i != 0)) and bn,
activation=activation
if (not first or not preact or (i != 0)) else None,
preact=preact
)
)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super().__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class BatchNorm3d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)
class _ConvBase(nn.Sequential):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=None,
batch_norm=None,
bias=True,
preact=False,
name=""
):
super().__init__()
bias = bias and (not bn)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias
)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if not preact:
bn_unit = batch_norm(out_size)
else:
bn_unit = batch_norm(in_size)
if preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'conv', conv_unit)
if not preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
class Conv1d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv1d,
batch_norm=BatchNorm1d,
bias=bias,
preact=preact,
name=name
)
class Conv2d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int] = (1, 1),
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv2d,
batch_norm=BatchNorm2d,
bias=bias,
preact=preact,
name=name
)
class Conv3d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int, int] = (1, 1, 1),
stride: Tuple[int, int, int] = (1, 1, 1),
padding: Tuple[int, int, int] = (0, 0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = ""
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv3d,
batch_norm=BatchNorm3d,
bias=bias,
preact=preact,
name=name
)
class FC(nn.Sequential):
def __init__(
self,
in_size: int,
out_size: int,
*,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=None,
preact: bool = False,
name: str = ""
):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=not bn)
if init is not None:
init(fc.weight)
if not bn:
nn.init.constant_(fc.bias, 0)
if preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(in_size))
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'fc', fc)
if not preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(out_size))
if activation is not None:
self.add_module(name + 'activation', activation)
def set_bn_momentum_default(bn_momentum):
def fn(m):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.momentum = bn_momentum
return fn
class BNMomentumScheduler(object):
def __init__(
self, model, bn_lambda, last_epoch=-1,
setter=set_bn_momentum_default
):
if not isinstance(model, nn.Module):
raise RuntimeError(
"Class '{}' is not a PyTorch nn Module".format(
type(model).__name__
)
)
self.model = model
self.setter = setter
self.lmbd = bn_lambda
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.model.apply(self.setter(self.lmbd(epoch)))
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
import glob
import os.path as osp
this_dir = osp.dirname(osp.abspath(__file__))
_ext_src_root = "_ext_src"
_ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob(
"{}/src/*.cu".format(_ext_src_root)
)
_ext_headers = glob.glob("{}/include/*".format(_ext_src_root))
setup(
name='pointnet2',
ext_modules=[
CUDAExtension(
name='pointnet2._ext',
sources=_ext_sources,
extra_compile_args={
"cxx": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
"nvcc": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
},
include_dirs=[osp.join(this_dir, _ext_src_root, "include")],
)
],
cmdclass={
'build_ext': BuildExtension
}
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
''' Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch '''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
from torch.autograd import Function
import torch.nn as nn
import pytorch_utils as pt_utils
import sys
try:
import builtins
except:
import __builtin__ as builtins
try:
import pointnet2._ext as _ext
except ImportError:
if not getattr(builtins, "__POINTNET2_SETUP__", False):
raise ImportError(
"Could not import _ext module.\n"
"Please see the setup instructions in the README: "
"https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst"
)
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class RandomDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(RandomDropout, self).__init__()
self.p = p
self.inplace = inplace
def forward(self, X):
theta = torch.Tensor(1).uniform_(0, self.p)[0]
return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace)
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz, npoint):
# type: (Any, torch.Tensor, int) -> torch.Tensor
r"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor where N > npoint
npoint : int32
number of features in the sampled set
Returns
-------
torch.Tensor
(B, npoint) tensor containing the set
"""
fps_inds = _ext.furthest_point_sampling(xyz, npoint)
ctx.mark_non_differentiable(fps_inds)
return fps_inds
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor
idx : torch.Tensor
(B, npoint) tensor of the features to gather
Returns
-------
torch.Tensor
(B, C, npoint) tensor
"""
_, C, N = features.size()
ctx.for_backwards = (idx, C, N)
return _ext.gather_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown, known):
# type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Find the three nearest neighbors of unknown in known
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of known features
known : torch.Tensor
(B, m, 3) tensor of unknown features
Returns
-------
dist : torch.Tensor
(B, n, 3) l2 distance to the three nearest neighbors
idx : torch.Tensor
(B, n, 3) index of 3 nearest neighbors
"""
dist2, idx = _ext.three_nn(unknown, known)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features, idx, weight):
# type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor
r"""
Performs weight linear interpolation on 3 features
Parameters
----------
features : torch.Tensor
(B, c, m) Features descriptors to be interpolated from
idx : torch.Tensor
(B, n, 3) three nearest neighbors of the target features in features
weight : torch.Tensor
(B, n, 3) weights
Returns
-------
torch.Tensor
(B, c, n) tensor of the interpolated features
"""
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
return _ext.three_interpolate(features, idx, weight)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, c, n) tensor with gradients of ouputs
Returns
-------
grad_features : torch.Tensor
(B, c, m) tensor with gradients of features
None
None
"""
idx, weight, m = ctx.three_interpolate_for_backward
grad_features = _ext.three_interpolate_grad(
grad_out.contiguous(), idx, weight, m
)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features, idx):
# type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
features : torch.Tensor
(B, C, N) tensor of features to group
idx : torch.Tensor
(B, npoint, nsample) tensor containing the indicies of features to group with
Returns
-------
torch.Tensor
(B, C, npoint, nsample) tensor
"""
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
ctx.for_backwards = (idx, N)
return _ext.group_points(features, idx)
@staticmethod
def backward(ctx, grad_out):
# type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]
r"""
Parameters
----------
grad_out : torch.Tensor
(B, C, npoint, nsample) tensor of the gradients of the output from forward
Returns
-------
torch.Tensor
(B, C, N) gradient of the features
None
"""
idx, N = ctx.for_backwards
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius, nsample, xyz, new_xyz):
# type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor
r"""
Parameters
----------
radius : float
radius of the balls
nsample : int
maximum number of features in the balls
xyz : torch.Tensor
(B, N, 3) xyz coordinates of the features
new_xyz : torch.Tensor
(B, npoint, 3) centers of the ball query
Returns
-------
torch.Tensor
(B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
inds = _ext.ball_query(new_xyz, xyz, radius, nsample)
ctx.mark_non_differentiable(inds)
return inds
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
r"""
Groups with a ball query of radius
Parameters
---------
radius : float32
Radius of ball
nsample : int32
Maximum number of features to gather in the ball
"""
def __init__(self, radius, nsample, use_xyz=True, ret_grouped_xyz=False, normalize_xyz=False, sample_uniformly=False, ret_unique_cnt=False):
# type: (QueryAndGroup, float, int, bool) -> None
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
self.ret_grouped_xyz = ret_grouped_xyz
self.normalize_xyz = normalize_xyz
self.sample_uniformly = sample_uniformly
self.ret_unique_cnt = ret_unique_cnt
if self.ret_unique_cnt:
assert(self.sample_uniformly)
def forward(self, xyz, new_xyz, features=None):
# type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
centriods (B, npoint, 3)
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, 3 + C, npoint, nsample) tensor
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
if self.sample_uniformly:
unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
for i_batch in range(idx.shape[0]):
for i_region in range(idx.shape[1]):
unique_ind = torch.unique(idx[i_batch, i_region, :])
num_unique = unique_ind.shape[0]
unique_cnt[i_batch, i_region] = num_unique
sample_ind = torch.randint(0, num_unique, (self.nsample - num_unique,), dtype=torch.long)
all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
idx[i_batch, i_region, :] = all_ind
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if self.normalize_xyz:
grouped_xyz /= self.radius
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert (
self.use_xyz
), "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
ret = [new_features]
if self.ret_grouped_xyz:
ret.append(grouped_xyz)
if self.ret_unique_cnt:
ret.append(unique_cnt)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
class GroupAll(nn.Module):
r"""
Groups all features
Parameters
---------
"""
def __init__(self, use_xyz=True, ret_grouped_xyz=False):
# type: (GroupAll, bool) -> None
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz, new_xyz, features=None):
# type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor]
r"""
Parameters
----------
xyz : torch.Tensor
xyz coordinates of the features (B, N, 3)
new_xyz : torch.Tensor
Ignored
features : torch.Tensor
Descriptors of the features (B, C, N)
Returns
-------
new_features : torch.Tensor
(B, C + 3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat(
[grouped_xyz, grouped_features], dim=1
) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
if self.ret_grouped_xyz:
return new_features, grouped_xyz
else:
return new_features
|
# Copyright (c) Facebook, Inc. and its affiliates.
''' Testing customized ops. '''
import torch
from torch.autograd import gradcheck
import numpy as np
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
def test_interpolation_grad():
batch_size = 1
feat_dim = 2
m = 4
feats = torch.randn(batch_size, feat_dim, m, requires_grad=True).float().cuda()
def interpolate_func(inputs):
idx = torch.from_numpy(np.array([[[0,1,2],[1,2,3]]])).int().cuda()
weight = torch.from_numpy(np.array([[[1,1,1],[2,2,2]]])).float().cuda()
interpolated_feats = pointnet2_utils.three_interpolate(inputs, idx, weight)
return interpolated_feats
assert (gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1))
if __name__=='__main__':
test_interpolation_grad()
|
# Copyright (c) Facebook, Inc. and its affiliates.
''' Pointnet2 layers.
Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch
Extended with the following:
1. Uniform sampling in each local region (sample_uniformly)
2. Return sampled points indices to support votenet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
import pytorch_utils as pt_utils
from typing import List
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, N, C) tensor of the descriptors of the the features
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
r"""Pointnet set abstrction layer with multiscale grouping
Parameters
----------
npoint : int
Number of features
radii : list of float32
list of radii to group with
nsamples : list of int32
Number of samples in each ball query
mlps : list of list of int32
Spec of the pointnet before the global max_pool for each scale
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
npoint: int,
radii: List[float],
nsamples: List[int],
mlps: List[List[int]],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
class PointnetSAModule(PointnetSAModuleMSG):
r"""Pointnet set abstrction layer
Parameters
----------
npoint : int
Number of features
radius : float
Radius of ball
nsample : int
Number of samples in the ball query
mlp : list
Spec of the pointnet before the global max_pool
bn : bool
Use batchnorm
"""
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True
):
super().__init__(
mlps=[mlp],
npoint=npoint,
radii=[radius],
nsamples=[nsample],
bn=bn,
use_xyz=use_xyz
)
class PointnetSAModuleVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlp: List[int],
npoint: int = None,
radius: float = None,
nsample: int = None,
bn: bool = True,
use_xyz: bool = True,
pooling: str = 'max',
sigma: float = None, # for RBF pooling
normalize_xyz: bool = False, # noramlize local XYZ with radius
sample_uniformly: bool = False,
ret_unique_cnt: bool = False
):
super().__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.pooling = pooling
self.mlp_module = None
self.use_xyz = use_xyz
self.sigma = sigma
if self.sigma is None:
self.sigma = self.radius/2
self.normalize_xyz = normalize_xyz
self.ret_unique_cnt = ret_unique_cnt
if npoint is not None:
self.grouper = pointnet2_utils.QueryAndGroup(radius, nsample,
use_xyz=use_xyz, ret_grouped_xyz=True, normalize_xyz=normalize_xyz,
sample_uniformly=sample_uniformly, ret_unique_cnt=ret_unique_cnt)
else:
self.grouper = pointnet2_utils.GroupAll(use_xyz, ret_grouped_xyz=True)
mlp_spec = mlp
if use_xyz and len(mlp_spec)>0:
mlp_spec[0] += 3
self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn)
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None,
inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, N) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
else:
assert(inds.shape[1] == self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
if not self.ret_unique_cnt:
grouped_features, grouped_xyz = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample)
else:
grouped_features, grouped_xyz, unique_cnt = self.grouper(
xyz, new_xyz, features
) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint)
new_features = self.mlp_module(
grouped_features
) # (B, mlp[-1], npoint, nsample)
if self.pooling == 'max':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'avg':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pooling == 'rbf':
# Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma)
# Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel
rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1,keepdim=False) / (self.sigma**2) / 2) # (B, npoint, nsample)
new_features = torch.sum(new_features * rbf.unsqueeze(1), -1, keepdim=True) / float(self.nsample) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
if not self.ret_unique_cnt:
return new_xyz, new_features, inds
else:
return new_xyz, new_features, inds, unique_cnt
class PointnetSAModuleMSGVotes(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
with extra support for returning point indices for getting their GT votes '''
def __init__(
self,
*,
mlps: List[List[int]],
npoint: int,
radii: List[float],
nsamples: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz, sample_uniformly=sample_uniformly)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz: torch.Tensor,
features: torch.Tensor = None, inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor):
r"""
Parameters
----------
xyz : torch.Tensor
(B, N, 3) tensor of the xyz coordinates of the features
features : torch.Tensor
(B, C, C) tensor of the descriptors of the the features
inds : torch.Tensor
(B, npoint) tensor that stores index to the xyz points (values in 0-N-1)
Returns
-------
new_xyz : torch.Tensor
(B, npoint, 3) tensor of the new features' xyz
new_features : torch.Tensor
(B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
inds: torch.Tensor
(B, npoint) tensor of the inds
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if inds is None:
inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint)
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped, inds
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz, new_xyz, features
) # (B, C, npoint, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], npoint, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1), inds
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another
Parameters
----------
mlp : list
Pointnet module parameters
bn : bool
Use batchnorm
"""
def __init__(self, *, mlp: List[int], bn: bool = True):
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor,
unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
r"""
Parameters
----------
unknown : torch.Tensor
(B, n, 3) tensor of the xyz positions of the unknown features
known : torch.Tensor
(B, m, 3) tensor of the xyz positions of the known features
unknow_feats : torch.Tensor
(B, C1, n) tensor of the features to be propigated to
known_feats : torch.Tensor
(B, C2, m) tensor of features to be propigated
Returns
-------
new_features : torch.Tensor
(B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
else:
interpolated_feats = known_feats.expand(
*known_feats.size()[0:2], unknown.size(1)
)
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats],
dim=1) #(B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
class PointnetLFPModuleMSG(nn.Module):
''' Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG
learnable feature propagation layer.'''
def __init__(
self,
*,
mlps: List[List[int]],
radii: List[float],
nsamples: List[int],
post_mlp: List[int],
bn: bool = True,
use_xyz: bool = True,
sample_uniformly: bool = False
):
super().__init__()
assert(len(mlps) == len(nsamples) == len(radii))
self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn)
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz,
sample_uniformly=sample_uniformly)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn))
def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor,
features2: torch.Tensor, features1: torch.Tensor) -> torch.Tensor:
r""" Propagate features from xyz1 to xyz2.
Parameters
----------
xyz2 : torch.Tensor
(B, N2, 3) tensor of the xyz coordinates of the features
xyz1 : torch.Tensor
(B, N1, 3) tensor of the xyz coordinates of the features
features2 : torch.Tensor
(B, C2, N2) tensor of the descriptors of the the features
features1 : torch.Tensor
(B, C1, N1) tensor of the descriptors of the the features
Returns
-------
new_features1 : torch.Tensor
(B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors
"""
new_features_list = []
for i in range(len(self.groupers)):
new_features = self.groupers[i](
xyz1, xyz2, features1
) # (B, C1, N2, nsample)
new_features = self.mlps[i](
new_features
) # (B, mlp[-1], N2, nsample)
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], N2, 1)
new_features = new_features.squeeze(-1) # (B, mlp[-1], N2)
if features2 is not None:
new_features = torch.cat([new_features, features2],
dim=1) #(B, mlp[-1] + C2, N2)
new_features = new_features.unsqueeze(-1)
new_features = self.post_mlp(new_features)
new_features_list.append(new_features)
return torch.cat(new_features_list, dim=1).squeeze(-1)
if __name__ == "__main__":
from torch.autograd import Variable
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True)
xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True)
test_module = PointnetSAModuleMSG(
npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]]
)
test_module.cuda()
print(test_module(xyz, xyz_feats))
for _ in range(1):
_, new_features = test_module(xyz, xyz_feats)
new_features.backward(
torch.cuda.FloatTensor(*new_features.size()).fill_(1)
)
print(new_features)
print(xyz.grad)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
np.random.seed(1234)
# we want 500 for training, 100 for test for wach class
n = 500
def get_total(data):
data_x, data_y = [], []
for k, v in data.items():
for i in range(len(v)):
data_x.append(v[i])
data_y.append(k)
d = {}
d['images'] = data_x
d['labels'] = data_y
return d
# loading the pickled data
with open(os.path.join('../data/miniimagenet/data.pkl'), 'rb') as f:
data_dict = pickle.load(f)
data = data_dict['images']
labels = data_dict['labels']
# split data into classes, 600 images per class
class_dict = {}
for i in range(len(set(labels))):
class_dict[i] = []
for i in range(len(data)):
class_dict[labels[i]].append(data[i])
# Split data for each class to 500 and 100
x_train, x_test = {}, {}
for i in range(len(set(labels))):
np.random.shuffle(class_dict[i])
x_test[i] = class_dict[i][n:]
x_train[i] = class_dict[i][:n]
# mix the data
d_train = get_total(x_train)
d_test = get_total(x_test)
with open(os.path.join('../data/miniimagenet/train.pkl'), 'wb') as f:
pickle.dump(d_train, f)
with open(os.path.join('../data/miniimagenet/test.pkl'), 'wb') as f:
pickle.dump(d_test, f) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, time, os
import numpy as np
import torch
import copy
import utils
from copy import deepcopy
from tqdm import tqdm
sys.path.append('../')
from networks.discriminator import Discriminator
class ACL(object):
def __init__(self, model, args, network):
self.args=args
self.nepochs=args.nepochs
self.sbatch=args.batch_size
# optimizer & adaptive lr
self.e_lr=args.e_lr
self.d_lr=args.d_lr
if not args.experiment == 'multidatasets':
self.e_lr=[args.e_lr] * args.ntasks
self.d_lr=[args.d_lr] * args.ntasks
else:
self.e_lr = [self.args.lrs[i][1] for i in range(len(args.lrs))]
self.d_lr = [self.args.lrs[i][1]/10. for i in range(len(args.lrs))]
print ("d_lrs : ", self.d_lr)
self.lr_min=args.lr_min
self.lr_factor=args.lr_factor
self.lr_patience=args.lr_patience
self.samples=args.samples
self.device=args.device
self.checkpoint=args.checkpoint
self.adv_loss_reg=args.adv
self.diff_loss_reg=args.orth
self.s_steps=args.s_step
self.d_steps=args.d_step
self.diff=args.diff
self.network=network
self.inputsize=args.inputsize
self.taskcla=args.taskcla
self.num_tasks=args.ntasks
# Initialize generator and discriminator
self.model=model
self.discriminator=self.get_discriminator(0)
self.discriminator.get_size()
self.latent_dim=args.latent_dim
self.task_loss=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_d=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_s=torch.nn.CrossEntropyLoss().to(self.device)
self.diff_loss=DiffLoss().to(self.device)
self.optimizer_S=self.get_S_optimizer(0)
self.optimizer_D=self.get_D_optimizer(0)
self.use_memory = True if self.args.use_memory == 'yes' else False
self.task_encoded={}
self.mu=0.0
self.sigma=1.0
print()
def get_discriminator(self, task_id):
discriminator=Discriminator(self.args, task_id).to(self.args.device)
return discriminator
def get_S_optimizer(self, task_id, e_lr=None):
if e_lr is None: e_lr=self.e_lr[task_id]
optimizer_S=torch.optim.SGD(self.model.parameters(), momentum=self.args.mom,
weight_decay=self.args.e_wd, lr=e_lr)
return optimizer_S
def get_D_optimizer(self, task_id, d_lr=None):
if d_lr is None: d_lr=self.d_lr[task_id]
optimizer_D=torch.optim.SGD(self.discriminator.parameters(), weight_decay=self.args.d_wd, lr=d_lr)
return optimizer_D
def train(self, task_id, dataset):
if task_id > 0:
self.model = self.prepare_model(task_id)
self.discriminator=self.get_discriminator(task_id)
best_loss=np.inf
best_model=utils.get_model(self.model)
best_loss_d=np.inf
best_model_d=utils.get_model(self.discriminator)
dis_lr_update=True
d_lr=self.d_lr[task_id]
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
e_lr=self.e_lr[task_id]
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(dataset['train'], task_id)
clock1=time.time()
train_res=self.eval_(dataset['train'], task_id)
utils.report_tr(train_res, e, self.sbatch, clock0, clock1)
# lowering the learning rate in the beginning if it predicts random chance for the first 5 epochs
if (self.args.experiment == 'cifar100' or self.args.experiment == 'miniimagenet') and e == 4:
random_chance=20.
threshold=random_chance + 2
if train_res['acc_t'] < threshold:
# Restore best validation model
d_lr=self.d_lr[task_id] / 10.
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print("Performance on task {} is {} so Dis's lr is decreased to {}".format(task_id, train_res[
'acc_t'], d_lr), end=" ")
e_lr=self.e_lr[task_id] / 10.
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
self.discriminator=self.get_discriminator(task_id)
if task_id > 0:
self.model=self.load_checkpoint(task_id - 1)
else:
self.model=self.network.Net(self.args).to(self.args.device)
# Valid
valid_res=self.eval_(dataset['valid'], task_id)
utils.report_val(valid_res)
# Adapt lr for S and D
if valid_res['loss_tot'] < best_loss:
best_loss=valid_res['loss_tot']
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *', end='')
else:
patience-=1
if patience <= 0:
e_lr/=self.lr_factor
print(' lr={:.1e}'.format(e_lr), end='')
if e_lr < self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
if train_res['loss_a'] < best_loss_d:
best_loss_d=train_res['loss_a']
best_model_d=utils.get_model(self.discriminator)
patience_d=self.lr_patience
else:
patience_d-=1
if patience_d <= 0 and dis_lr_update:
d_lr/=self.lr_factor
print(' Dis lr={:.1e}'.format(d_lr))
if d_lr < self.lr_min:
dis_lr_update=False
print("Dis lr reached minimum value")
print()
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print()
# Restore best validation model (early-stopping)
self.model.load_state_dict(copy.deepcopy(best_model))
self.discriminator.load_state_dict(copy.deepcopy(best_model_d))
self.save_all_models(task_id)
def train_epoch(self, train_loader, task_id):
self.model.train()
self.discriminator.train()
for data, target, tt, td in train_loader:
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
if self.use_memory:
# Detaching samples in the batch which do not belong to the current task before feeding them to P
t_current = task_id * torch.ones_like(tt)
body_mask = torch.eq(t_current, tt).cpu().numpy()
# x_task_module=data.to(device=self.device)
x_task_module = data.clone()
for index in range(x.size(0)):
if body_mask[index] == 0:
x_task_module[index] = x_task_module[index].detach()
x_task_module = x_task_module.to(device=self.device)
# Discriminator's real and fake task labels
t_real_D=td.to(self.device)
t_fake_D=torch.zeros_like(t_real_D).to(self.device)
# ================================================================== #
# Train Shared Module #
# ================================================================== #
# training S for s_steps
for s_step in range(self.s_steps):
self.optimizer_S.zero_grad()
self.model.zero_grad()
if self.use_memory:
output=self.model(x, x_task_module, tt)
else:
output = self.model(x, x)
# task_loss=self.task_loss(output, y)
task_loss=self.task_loss(output['out'], y)
shared_out, private_out = output['shared'], output['private']
dis_out_gen_training=self.discriminator.forward(shared_out)
adv_loss=self.adversarial_loss_s(dis_out_gen_training, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, private_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
total_loss.backward(retain_graph=True)
self.optimizer_S.step()
# ================================================================== #
# Train Discriminator #
# ================================================================== #
# training discriminator for d_steps
for d_step in range(self.d_steps):
self.optimizer_D.zero_grad()
self.discriminator.zero_grad()
# training discriminator on real data
if self.use_memory:
output=self.model(x, x_task_module, tt)
else:
output = self.model(x, x)
# training discriminator on real data
shared_out, private_out = output['shared'], output['private']
dis_real_out=self.discriminator.forward(shared_out.detach())
dis_real_loss=self.adversarial_loss_d(dis_real_out, t_real_D)
if self.args.experiment == 'miniimagenet':
dis_real_loss*=self.args.adv
dis_real_loss.backward(retain_graph=True)
# training discriminator on fake data
z_fake=torch.as_tensor(np.random.normal(self.mu, self.sigma, (x.size(0), self.latent_dim)),
dtype=torch.float32, device=self.device)
dis_fake_out=self.discriminator.forward(z_fake)
dis_fake_loss=self.adversarial_loss_d(dis_fake_out, t_fake_D)
if self.args.experiment == 'miniimagenet':
dis_fake_loss*=self.args.adv
dis_fake_loss.backward(retain_graph=True)
self.optimizer_D.step()
return
def eval_(self, data_loader, task_id):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t = 0, 0
num=0
batch=0
self.model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
t_real_D=td.to(self.device)
# Forward
output = self.model(x, x)
shared_out, private_out = output['shared'], output['private']
_, pred=output['out'].max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator(shared_out)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
# Loss values
task_loss=self.task_loss(output['out'], y)
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, private_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss = task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
#
def test(self, data_loader, task_id, model):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t=0, 0
num=0
batch=0
model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
t_real_D=td.to(self.device)
# Forward
output = model(x, x)
# shared_out, private_out = self.model.get_encoded_ftrs(x, x)
shared_out, private_out = output['shared'], output['private']
_, pred=output['out'].max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator(shared_out)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, private_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
# Loss values
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
task_loss=self.task_loss(output['out'], y)
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
def save_all_models(self, task_id):
print("Saving all models for task {} ...".format(task_id+1))
dis=utils.get_model(self.discriminator)
torch.save({'model_state_dict': dis,
}, os.path.join(self.checkpoint, 'discriminator_{}.pth.tar'.format(task_id)))
model=utils.get_model(self.model)
torch.save({'model_state_dict': model,
}, os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
def load_model(self, task_id):
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
# # Change the previous shared module with the current one
current_shared_module=deepcopy(self.model.shared.state_dict())
net.shared.load_state_dict(current_shared_module)
net=net.to(self.args.device)
return net
def load_checkpoint(self, task_id):
print("Loading checkpoint for task {} ...".format(task_id))
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
net=net.to(self.args.device)
return net
def prepare_model(self, task_id):
# Load a previous model and grab its shared module
old_net = self.load_checkpoint(task_id-1)
old_shared_module = old_net.shared.state_dict()
# Instantiate a new model and replace its shared module
model = self.network.Net(self.args)
model.shared.load_state_dict(old_shared_module)
model = model.to(self.device)
return model
def loader_size(self, data_loader):
return data_loader.dataset.__len__()
def get_tsne_embeddings_first_ten_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
model.eval()
tag_ = '_diff_{}'.format(self.args.diff)
all_images, all_shared, all_private = [], [], []
# Test final model on first 10 tasks:
writer = SummaryWriter()
for t in range(10):
for itr, (data, _, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
print (torch.stack(all_shared).size())
tag = ['Shared10_{}_{}'.format(tag_,i) for i in range(1,11)]
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#, metadata_header=list(range(1,11)))
tag = ['Private10_{}_{}'.format(tag_, i) for i in range(1, 11)]
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#,metadata_header=list(range(1,11)))
writer.close()
def get_tsne_embeddings_last_three_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
# Test final model on last 3 tasks:
model.eval()
tag = '_diff_{}'.format(self.args.diff)
for t in [17,18,19]:
all_images, all_labels, all_shared, all_private = [], [], [], []
writer = SummaryWriter()
for itr, (data, target, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
y = target.to(device=self.device, dtype=torch.long)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
# print (shared_out.size())
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
all_labels.append(y)
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Shared_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Private_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.close()
def inference(self, data_loader, task_id, model):
loss_a, loss_t, loss_d, loss_total = 0, 0, 0, 0
correct_d, correct_t = 0, 0
num = 0
batch = 0
model.eval()
self.discriminator.eval()
res = {}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x = data.to(device=self.device)
y = target.to(device=self.device, dtype=torch.long)
tt = tt.to(device=self.device)
# Forward
output = model.forward(x, x, tt, task_id)
shared_out, task_out = model.get_encoded_ftrs(x, x, task_id)
_, pred = output.max(1)
correct_t += pred.eq(y.view_as(pred)).sum().item()
if self.diff == 'yes':
diff_loss = self.diff_loss(shared_out, task_out)
else:
diff_loss = torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg = 0
# Loss values
task_loss = self.task_loss(output, y)
total_loss = task_loss + self.diff_loss_reg * diff_loss
loss_t += task_loss
# loss_a += adv_loss
loss_d += diff_loss
loss_total += total_loss
num += x.size(0)
res['loss_t'], res['acc_t'] = loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_d'] = loss_d.item() / (batch + 1)
res['loss_tot'] = loss_total.item() / (batch + 1)
res['size'] = self.loader_size(data_loader)
return res
#
class DiffLoss(torch.nn.Module):
# From: Domain Separation Networks (https://arxiv.org/abs/1608.06019)
# Konstantinos Bousmalis, George Trigeorgis, Nathan Silberman, Dilip Krishnan, Dumitru Erhan
def __init__(self):
super(DiffLoss, self).__init__()
def forward(self, D1, D2):
D1=D1.view(D1.size(0), -1)
D1_norm=torch.norm(D1, p=2, dim=1, keepdim=True).detach()
D1_norm=D1.div(D1_norm.expand_as(D1) + 1e-6)
D2=D2.view(D2.size(0), -1)
D2_norm=torch.norm(D2, p=2, dim=1, keepdim=True).detach()
D2_norm=D2.div(D2_norm.expand_as(D2) + 1e-6)
# return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from copy import deepcopy
import pickle
import time
import uuid
from subprocess import call
########################################################################################################################
def human_format(num):
magnitude=0
while abs(num)>=1000:
magnitude+=1
num/=1000.0
return '%.1f%s'%(num,['','K','M','G','T','P'][magnitude])
def report_tr(res, e, sbatch, clock0, clock1):
# Training performance
print(
'| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train losses={:.3f} | T: loss={:.3f}, acc={:5.2f}% | D: loss={:.3f}, acc={:5.1f}%, '
'Diff loss:{:.3f} |'.format(
e + 1,
1000 * sbatch * (clock1 - clock0) / res['size'],
1000 * sbatch * (time.time() - clock1) / res['size'], res['loss_tot'],
res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
def report_val(res):
# Validation performance
print(' Valid losses={:.3f} | T: loss={:.6f}, acc={:5.2f}%, | D: loss={:.3f}, acc={:5.2f}%, Diff loss={:.3f} |'.format(
res['loss_tot'], res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
########################################################################################################################
def get_model(model):
return deepcopy(model.state_dict())
########################################################################################################################
def compute_conv_output_size(Lin,kernel_size,stride=1,padding=0,dilation=1):
return int(np.floor((Lin+2*padding-dilation*(kernel_size-1)-1)/float(stride)+1))
########################################################################################################################
def save_print_log(taskcla, acc, lss, output_path):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
print ('ACC: {:5.4f}%'.format((np.mean(acc[acc.shape[0]-1,:]))))
print()
print ('BWD Transfer = ')
print ()
print ("Diagonal R_ii")
for i in range(acc.shape[0]):
print('\t',end='')
print('{:5.2f}% '.format(np.diag(acc)[i]), end=',')
print()
print ("Last row")
for i in range(acc.shape[0]):
print('\t', end=',')
print('{:5.2f}% '.format(acc[-1][i]), end=',')
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on our UCB paper (https://openreview.net/pdf?id=HklUCCVKDB)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
with open(os.path.join(output_path, 'logs.p'), 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", os.path.join(output_path, 'logs.p'))
def print_log_acc_bwt(taskcla, acc, lss, output_path, run_id):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
avg_acc = np.mean(acc[acc.shape[0]-1,:])
print ('ACC: {:5.4f}%'.format(avg_acc))
print()
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on UCB paper (https://arxiv.org/abs/1906.02425)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
path = os.path.join(output_path, 'logs_run_id_{}.p'.format(run_id))
with open(path, 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", path)
return avg_acc, gem_bwt
def print_running_acc_bwt(acc, task_num):
print()
acc = acc[:task_num+1,:task_num+1]
avg_acc = np.mean(acc[acc.shape[0] - 1, :])
gem_bwt = sum(acc[-1] - np.diag(acc)) / (len(acc[-1]) - 1)
print('ACC: {:5.4f}% || BWT: {:5.2f}% '.format(avg_acc, gem_bwt))
print()
def make_directories(args):
uid = uuid.uuid4().hex
if args.checkpoint is None:
os.mkdir('checkpoints')
args.checkpoint = os.path.join('./checkpoints/',uid)
os.mkdir(args.checkpoint)
else:
if not os.path.exists(args.checkpoint):
os.mkdir(args.checkpoint)
args.checkpoint = os.path.join(args.checkpoint, uid)
os.mkdir(args.checkpoint)
def some_sanity_checks(args):
# Making sure the chosen experiment matches with the number of tasks performed in the paper:
datasets_tasks = {}
datasets_tasks['mnist5']=[5]
datasets_tasks['pmnist']=[10,20,30,40]
datasets_tasks['cifar100']=[20]
datasets_tasks['miniimagenet']=[20]
datasets_tasks['multidatasets']=[5]
if not args.ntasks in datasets_tasks[args.experiment]:
raise Exception("Chosen number of tasks ({}) does not match with {} experiment".format(args.ntasks,args.experiment))
# Making sure if memory usage is happenning:
if args.use_memory == 'yes' and not args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
if args.use_memory == 'no' and args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
def save_code(args):
cwd = os.getcwd()
des = os.path.join(args.checkpoint, 'code') + '/'
if not os.path.exists(des):
os.mkdir(des)
def get_folder(folder):
return os.path.join(cwd,folder)
folders = [get_folder(item) for item in ['dataloaders', 'networks', 'configs', 'main.py', 'acl.py', 'utils.py']]
for folder in folders:
call('cp -rf {} {}'.format(folder, des),shell=True)
def print_time():
from datetime import datetime
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("Job finished at =", dt_string)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os,argparse,time
import numpy as np
from omegaconf import OmegaConf
from copy import deepcopy
import torch
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import utils
tstart=time.time()
# Arguments
parser = argparse.ArgumentParser(description='Adversarial Continual Learning...')
# Load the config file
parser.add_argument('--config', type=str, default='./configs/config_mnist5.yml')
flags = parser.parse_args()
args = OmegaConf.load(flags.config)
print()
########################################################################################################################
# Args -- Experiment
if args.experiment=='pmnist':
from dataloaders import pmnist as datagenerator
elif args.experiment=='mnist5':
from dataloaders import mnist5 as datagenerator
elif args.experiment=='cifar100':
from dataloaders import cifar100 as datagenerator
elif args.experiment=='miniimagenet':
from dataloaders import miniimagenet as datagenerator
elif args.experiment=='multidatasets':
from dataloaders import mulitidatasets as datagenerator
else:
raise NotImplementedError
from acl import ACL as approach
# Args -- Network
if args.experiment == 'mnist5' or args.experiment == 'pmnist':
from networks import mlp_acl as network
elif args.experiment == 'cifar100' or args.experiment == 'miniimagenet' or args.experiment == 'multidatasets':
if args.arch == 'alexnet':
from networks import alexnet_acl as network
elif args.arch == 'resnet':
from networks import resnet_acl as network
else:
raise NotImplementedError
else:
raise NotImplementedError
########################################################################################################################
def run(args, run_id):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
# Faster run but not deterministic:
# torch.backends.cudnn.benchmark = True
# To get deterministic results that match with paper at cost of lower speed:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Data loader
print('Instantiate data generators and model...')
dataloader = datagenerator.DatasetGen(args)
args.taskcla, args.inputsize = dataloader.taskcla, dataloader.inputsize
if args.experiment == 'multidatasets': args.lrs = dataloader.lrs
# Model
net = network.Net(args)
net = net.to(args.device)
net.print_model_size()
# print (net)
# Approach
appr=approach(net,args,network=network)
# Loop tasks
acc=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
lss=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
for t,ncla in args.taskcla:
print('*'*250)
dataset = dataloader.get(t)
print(' '*105, 'Dataset {:2d} ({:s})'.format(t+1,dataset[t]['name']))
print('*'*250)
# Train
appr.train(t,dataset[t])
print('-'*250)
print()
for u in range(t+1):
# Load previous model and replace the shared module with the current one
test_model = appr.load_model(u)
test_res = appr.test(dataset[u]['test'], u, model=test_model)
print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'.format(u, dataset[u]['name'],
test_res['loss_t'],
test_res['acc_t']))
acc[t, u] = test_res['acc_t']
lss[t, u] = test_res['loss_t']
# Save
print()
print('Saved accuracies at '+os.path.join(args.checkpoint,args.output))
np.savetxt(os.path.join(args.checkpoint,args.output),acc,'%.6f')
# Extract embeddings to plot in tensorboard for miniimagenet
if args.tsne == 'yes' and args.experiment == 'miniimagenet':
appr.get_tsne_embeddings_first_ten_tasks(dataset, model=appr.load_model(t))
appr.get_tsne_embeddings_last_three_tasks(dataset, model=appr.load_model(t))
avg_acc, gem_bwt = utils.print_log_acc_bwt(args.taskcla, acc, lss, output_path=args.checkpoint, run_id=run_id)
return avg_acc, gem_bwt
#######################################################################################################################
def main(args):
utils.make_directories(args)
utils.some_sanity_checks(args)
utils.save_code(args)
print('=' * 100)
print('Arguments =')
for arg in vars(args):
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
accuracies, forgetting = [], []
for n in range(args.num_runs):
args.seed = n
args.output = '{}_{}_tasks_seed_{}.txt'.format(args.experiment, args.ntasks, args.seed)
print ("args.output: ", args.output)
print (" >>>> Run #", n)
acc, bwt = run(args, n)
accuracies.append(acc)
forgetting.append(bwt)
print('*' * 100)
print ("Average over {} runs: ".format(args.num_runs))
print ('AVG ACC: {:5.4f}% \pm {:5.4f}'.format(np.array(accuracies).mean(), np.array(accuracies).std()))
print ('AVG BWT: {:5.2f}% \pm {:5.4f}'.format(np.array(forgetting).mean(), np.array(forgetting).std()))
print ("All Done! ")
print('[Elapsed time = {:.1f} min]'.format((time.time()-tstart)/(60)))
utils.print_time()
def test_trained_model(args, final_model_id):
args.seed = 0
print('Instantiate data generators and model...')
dataloader = datagenerator.DatasetGen(args)
args.taskcla, args.inputsize = dataloader.taskcla, dataloader.inputsize
if args.experiment == 'multidatasets': args.lrs = dataloader.lrs
def get_model(final_model_id, test_data_id):
# Load the test model
test_net = network.Net(args)
checkpoint_test = torch.load(os.path.join(args.checkpoint, 'model_{}.pth.tar'.format(test_data_id)))
test_net.load_state_dict(checkpoint_test['model_state_dict'])
# Load your final trained model
net = network.Net(args)
checkpoint = torch.load(os.path.join(args.checkpoint, 'model_{}.pth.tar'.format(final_model_id)))
net.load_state_dict(checkpoint['model_state_dict'])
# # Change the shared module with the final model's shared module
final_shared = deepcopy(net.shared.state_dict())
test_net.shared.load_state_dict(final_shared)
test_net = test_net.to(args.device)
return test_net
for t,ncla in args.taskcla:
print('*'*250)
dataset = dataloader.get(t)
print(' '*105, 'Dataset {:2d} ({:s})'.format(t+1,dataset[t]['name']))
print('*'*250)
# Model
test_model = get_model(final_model_id, test_data_id=t)
# Approach
appr = approach(test_model, args, network=network)
# Test
test_res = appr.inference(dataset[t]['test'], t, model=test_model)
print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.4f}% <<<'.format(t, dataset[t]['name'],
test_res['loss_t'],
test_res['acc_t']))
#######################################################################################################################
if __name__ == '__main__':
main(args)
# test_trained_model(args, final_model_id=4) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from utils import *
class iCIFAR10(datasets.CIFAR10):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None, target_transform=None, download=True):
super(iCIFAR10, self).__init__(root, transform=transform,
target_transform=target_transform, download=True)
self.train = train # training set or test set
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
if self.train:
train_data = []
train_labels = []
train_tt = [] # task module labels
train_td = [] # disctiminator labels
for i in range(len(self.data)):
if self.targets[i] in classes:
train_data.append(self.data[i])
train_labels.append(self.class_mapping[self.targets[i]])
train_tt.append(task_num)
train_td.append(task_num+1)
self.class_indices[self.class_mapping[self.targets[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
train_data.append(memory[task_id]['x'][i])
train_labels.append(memory[task_id]['y'][i])
train_tt.append(memory[task_id]['tt'][i])
train_td.append(memory[task_id]['td'][i])
self.train_data = np.array(train_data)
self.train_labels = train_labels
self.train_tt = train_tt
self.train_td = train_td
if not self.train:
f = self.test_list[0][0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
test_data = []
test_labels = []
test_tt = [] # task module labels
test_td = [] # disctiminator labels
for i in range(len(self.test_data)):
if self.test_labels[i] in classes:
test_data.append(self.test_data[i])
test_labels.append(self.class_mapping[self.test_labels[i]])
test_tt.append(task_num)
test_td.append(task_num + 1)
self.class_indices[self.class_mapping[self.test_labels[i]]].append(i)
self.test_data = np.array(test_data)
self.test_labels = test_labels
self.test_tt = test_tt
self.test_td = test_td
def __getitem__(self, index):
if self.train:
img, target, tt, td = self.train_data[index], self.train_labels[index], self.train_tt[index], self.train_td[index]
else:
img, target, tt, td = self.test_data[index], self.test_labels[index], self.test_tt[index], self.test_td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None:
img = self.transform(img)
except:
pass
try:
if self.target_transform is not None:
target = self.target_transform(target)
except:
pass
return img, target, tt, td
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
class iCIFAR100(iCIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,32,32]
mean=[x/255 for x in [125.3,123.0,113.9]]
std=[x/255 for x in [63.0,62.1,66.7]]
self.transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.test_set = {}
self.train_split = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
self.use_memory = args.use_memory
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=memory_classes,
memory=memory, task_num=task_id, train=True, download=True, transform=self.transformation)
self.test_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False,
download=True, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'CIFAR100-{}-{}'.format(task_id,self.task_ids[task_id])
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
# Getting all samples for this class
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x'])))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# https://github.com/pytorch/vision/blob/8635be94d1216f10fb8302da89233bd86445e449/torchvision/datasets/utils.py
import os
import os.path
import hashlib
import gzip
import errno
import tarfile
import zipfile
import numpy as np
import torch
import codecs
from torch.utils.model_zoo import tqdm
def gen_bar_updater():
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def calculate_md5(fpath, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath, md5, **kwargs):
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
from six.moves import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
else:
raise e
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def _save_response_content(response, destination, chunk_size=32768):
with open(destination, "wb") as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress += len(chunk)
pbar.update(progress - pbar.n)
pbar.close()
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None,
md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def iterable_to_str(iterable):
return "'" + "', '".join([str(item) for item in iterable]) + "'"
def verify_str_arg(value, arg=None, valid_values=None, custom_msg=None):
if not isinstance(value, torch._six.string_classes):
if arg is None:
msg = "Expected type str, but got type {type}."
else:
msg = "Expected type str for argument {arg}, but got type {type}."
msg = msg.format(type=type(value), arg=arg)
raise ValueError(msg)
if valid_values is None:
return value
if value not in valid_values:
if custom_msg is not None:
msg = custom_msg
else:
msg = ("Unknown value '{value}' for argument {arg}. "
"Valid values are {{{valid_values}}}.")
msg = msg.format(value=value, arg=arg,
valid_values=iterable_to_str(valid_values))
raise ValueError(msg)
return value
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def open_maybe_compressed_file(path):
"""Return a file object that possibly decompresses 'path' on the fly.
Decompression occurs when argument `path` is a string and ends with '.gz' or '.xz'.
"""
if not isinstance(path, torch._six.string_classes):
return path
if path.endswith('.gz'):
import gzip
return gzip.open(path, 'rb')
if path.endswith('.xz'):
import lzma
return lzma.open(path, 'rb')
return open(path, 'rb')
def read_sn3_pascalvincent_tensor(path, strict=True):
"""Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
Argument may be a filename, compressed filename, or file object.
"""
# typemap
if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'):
read_sn3_pascalvincent_tensor.typemap = {
8: (torch.uint8, np.uint8, np.uint8),
9: (torch.int8, np.int8, np.int8),
11: (torch.int16, np.dtype('>i2'), 'i2'),
12: (torch.int32, np.dtype('>i4'), 'i4'),
13: (torch.float32, np.dtype('>f4'), 'f4'),
14: (torch.float64, np.dtype('>f8'), 'f8')}
# read
with open_maybe_compressed_file(path) as f:
data = f.read()
# parse
magic = get_int(data[0:4])
nd = magic % 256
ty = magic // 256
assert nd >= 1 and nd <= 3
assert ty >= 8 and ty <= 14
m = read_sn3_pascalvincent_tensor.typemap[ty]
s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)]
parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))
assert parsed.shape[0] == np.prod(s) or not strict
return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
def read_label_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 1)
return x.long()
def read_image_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 3)
return x |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import transforms
from utils import *
class MiniImageNet(torch.utils.data.Dataset):
def __init__(self, root, train):
super(MiniImageNet, self).__init__()
if train:
self.name='train'
else:
self.name='test'
root = os.path.join(root, 'miniimagenet')
with open(os.path.join(root,'{}.pkl'.format(self.name)), 'rb') as f:
data_dict = pickle.load(f)
self.data = data_dict['images']
self.labels = data_dict['labels']
def __len__(self):
return len(self.data)
def __getitem__(self, i):
img, label = self.data[i], self.labels[i]
return img, label
class iMiniImageNet(MiniImageNet):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None):
super(iMiniImageNet, self).__init__(root=root, train=train)
self.transform = transform
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
data = []
labels = []
tt = [] # task module labels
td = [] # disctiminator labels
for i in range(len(self.data)):
if self.labels[i] in classes:
data.append(self.data[i])
labels.append(self.class_mapping[self.labels[i]])
tt.append(task_num)
td.append(task_num+1)
self.class_indices[self.class_mapping[self.labels[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
data.append(memory[task_id]['x'][i])
labels.append(memory[task_id]['y'][i])
tt.append(memory[task_id]['tt'][i])
td.append(memory[task_id]['td'][i])
self.data = np.array(data)
self.labels = labels
self.tt = tt
self.td = td
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.labels[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
if not torch.is_tensor(img):
img = Image.fromarray(img)
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.use_memory = args.use_memory
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,84,84]
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
self.transformation = transforms.Compose([
transforms.Resize((84,84)),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.train_split = {}
self.test_set = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id],
memory_classes=memory_classes, memory=memory,
task_num=task_id, train=True, transform=self.transformation)
self.test_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'iMiniImageNet-{}-{}'.format(task_id,self.task_ids[task_id])
self.dataloaders[task_id]['tsne'] = torch.utils.data.DataLoader(self.test_set[task_id],
batch_size=len(test_loader.dataset),
num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
print ("Task ID: ", task_id)
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
for i in range(len(self.task_ids[task_id])):
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class] # randomly sample some data
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x']))) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import os.path
import sys
import warnings
import urllib.request
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from .utils import *
# from scipy.imageio import imread
import pandas as pd
import os
import torch
from PIL import Image
import scipy.io as sio
from collections import defaultdict
from itertools import chain
from collections import OrderedDict
class CIFAR10_(datasets.CIFAR10):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
num_classes = 10
def __init__(self, root, task_num, num_samples_per_class, train, transform, target_transform, download=True):
# root, task_num, train, transform = None, download = False):
super(CIFAR10_, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
if not num_samples_per_class:
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
else:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
self._load_meta()
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
# if self.train:
return len(self.data)
# else:
# return len(self.test_data)
def report_size(self):
print("CIFAR10 size at train={} time: {} ".format(self.train,self.__len__()))
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError('Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
class CIFAR100_(CIFAR10_):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
num_classes = 100
class SVHN_(torch.utils.data.Dataset):
url = ""
filename = ""
file_md5 = ""
split_list = {
'train': ["http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"],
'test': ["http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3"],
'extra': ["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"]}
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=True):
self.root = os.path.expanduser(root)
# root, task_num, train, transform = None, download = False):
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if self.train:
split="train"
else:
split="test"
self.num_classes = 10
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat['X']
# loading from the .mat file gives an np array of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.targets = loaded_mat['y'].astype(np.int64).squeeze()
self.data = np.transpose(self.data, (3, 2, 0, 1))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes+1):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = np.array(sum(y,[])).astype(np.int64)
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.targets, self.targets == 10, 0)
# print ("svhn: ", self.data.shape)
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self):
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
class MNIST_RGB(datasets.MNIST):
def __init__(self, root, task_num, num_samples_per_class, train=True, transform=None, target_transform=None, download=False):
super(MNIST_RGB, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
self.train = train # training set or test set
self.target_transform=target_transform
self.transform=transform
self.num_classes=10
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
# self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data=np.array(self.data).astype(np.float32)
self.targets=list(np.array(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img, mode='L').convert('RGB')
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self):
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return (os.path.exists(os.path.join(self.processed_folder,
self.training_file)) and
os.path.exists(os.path.join(self.processed_folder,
self.test_file)))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
for url in self.urls:
filename = url.rpartition('/')[2]
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class FashionMNIST_(MNIST_RGB):
"""`Fashion MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
class notMNIST_(torch.utils.data.Dataset):
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform=target_transform
self.train = train
self.url = "https://github.com/facebookresearch/Adversarial-Continual-Learning/raw/master/data/notMNIST.zip"
self.filename = 'notMNIST.zip'
fpath = os.path.join(root, self.filename)
if not os.path.isfile(fpath):
if not download:
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print('Downloading from '+self.url)
download_url(self.url, root, filename=self.filename)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
if self.train:
fpath = os.path.join(root, 'notMNIST', 'Train')
else:
fpath = os.path.join(root, 'notMNIST', 'Test')
X, Y = [], []
folders = os.listdir(fpath)
for folder in folders:
folder_path = os.path.join(fpath, folder)
for ims in os.listdir(folder_path):
try:
img_path = os.path.join(folder_path, ims)
X.append(np.array(Image.open(img_path).convert('RGB')))
Y.append(ord(folder) - 65) # Folders are A-J so labels will be 0-9
except:
print("File {}/{} is broken".format(folder, ims))
self.data = np.array(X)
self.targets = Y
self.num_classes = len(set(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.labels = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
img = Image.fromarray(img)#.convert('RGB')
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
def download(self):
"""Download the notMNIST data if it doesn't exist in processed_folder already."""
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
if args.experiment == 'cifar100':
hiddens = [64, 128, 256, 1024, 1024, 512]
elif args.experiment == 'miniimagenet':
hiddens = [64, 128, 256, 512, 512, 512]
# ----------------------------------
elif args.experiment == 'multidatasets':
hiddens = [64, 128, 256, 1024, 1024, 512]
else:
raise NotImplementedError
self.conv1=torch.nn.Conv2d(self.ncha,hiddens[0],kernel_size=size//8)
s=utils.compute_conv_output_size(size,size//8)
s=s//2
self.conv2=torch.nn.Conv2d(hiddens[0],hiddens[1],kernel_size=size//10)
s=utils.compute_conv_output_size(s,size//10)
s=s//2
self.conv3=torch.nn.Conv2d(hiddens[1],hiddens[2],kernel_size=2)
s=utils.compute_conv_output_size(s,2)
s=s//2
self.maxpool=torch.nn.MaxPool2d(2)
self.relu=torch.nn.ReLU()
self.drop1=torch.nn.Dropout(0.2)
self.drop2=torch.nn.Dropout(0.5)
self.fc1=torch.nn.Linear(hiddens[2]*s*s,hiddens[3])
self.fc2=torch.nn.Linear(hiddens[3],hiddens[4])
self.fc3=torch.nn.Linear(hiddens[4],hiddens[5])
self.fc4=torch.nn.Linear(hiddens[5], self.latent_dim)
def forward(self, x_s):
x_s = x_s.view_as(x_s)
h = self.maxpool(self.drop1(self.relu(self.conv1(x_s))))
h = self.maxpool(self.drop1(self.relu(self.conv2(h))))
h = self.maxpool(self.drop2(self.relu(self.conv3(h))))
h = h.view(x_s.size(0), -1)
h = self.drop2(self.relu(self.fc1(h)))
h = self.drop2(self.relu(self.fc2(h)))
h = self.drop2(self.relu(self.fc3(h)))
h = self.drop2(self.relu(self.fc4(h)))
return h
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'cifar100':
hiddens=[32,32]
flatten=1152
elif args.experiment == 'miniimagenet':
# hiddens=[8,8]
# flatten=1800
hiddens=[16,16]
flatten=3600
elif args.experiment == 'multidatasets':
hiddens=[32,32]
flatten=1152
else:
raise NotImplementedError
self.task_out = torch.nn.Sequential()
self.task_out.add_module('conv1', torch.nn.Conv2d(self.ncha, hiddens[0], kernel_size=self.size // 8))
self.task_out.add_module('relu1', torch.nn.ReLU(inplace=True))
self.task_out.add_module('drop1', torch.nn.Dropout(0.2))
self.task_out.add_module('maxpool1', torch.nn.MaxPool2d(2))
self.task_out.add_module('conv2', torch.nn.Conv2d(hiddens[0], hiddens[1], kernel_size=self.size // 10))
self.task_out.add_module('relu2', torch.nn.ReLU(inplace=True))
self.task_out.add_module('dropout2', torch.nn.Dropout(0.5))
self.task_out.add_module('maxpool2', torch.nn.MaxPool2d(2))
self.linear = torch.nn.Sequential()
self.linear.add_module('linear1', torch.nn.Linear(flatten, self.latent_dim))
self.linear.add_module('relu3', torch.nn.ReLU(inplace=True))
def forward(self, x):
x = x.view_as(x)
out = self.task_out(x)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
# def forward(self, x, task_id):
# x = x.view_as(x)
# out = self.task_out[2*task_id].forward(x)
# out = out.view(out.size(0),-1)
# out = self.task_out[2*task_id+1].forward(out)
# return out
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.ntasks = args.ntasks
self.samples = args.samples
self.image_size = self.ncha*size*size
self.args=args
self.hidden1 = args.head_units
self.hidden2 = args.head_units//2
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.Sequential(
torch.nn.Linear(2*self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[0][1])
)
def forward(self, x_s, x_p, tt=None):
x_s = x_s.view_as(x_s)
x_p = x_p.view_as(x_p)
# x_s = self.shared(x_s)
# x_p = self.private(x_p)
#
# x = torch.cat([x_p, x_s], dim=1)
# if self.args.experiment == 'multidatasets':
# # if no memory is used this is faster:
# y=[]
# for i,_ in self.taskcla:
# y.append(self.head[i](x))
# return y[task_id]
# else:
# return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
# if torch.is_tensor(tt):
# return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
# else:
# return self.head(x)
output = {}
output['shared'] = self.shared(x_s)
output['private'] = self.private(x_p)
concat_features = torch.cat([output['private'], output['shared']], dim=1)
if torch.is_tensor(tt):
output['out'] = torch.stack([self.head[tt[i]].forward(concat_features[i]) for i in range(
concat_features.size(0))])
else:
output['out'] = self.head(concat_features)
return output
# def get_encoded_ftrs(self, x_s, x_p, task_id=None):
# return self.shared(x_s), self.private(x_p)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print("Size of the network for one task including (S+P+p)")
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s ' % (self.pretty_print(count_P)))
print('Num parameters in p = %s ' % (self.pretty_print(count_H)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P + count_H))
print('--------------------------> Architecture size in total for all tasks: %s parameters (%sB)' % (
self.pretty_print(count_S + self.ntasks*count_P + self.ntasks*count_H),
self.pretty_print(4 * (count_S + self.ntasks*count_P + self.ntasks*count_H))))
classes_per_task = self.taskcla[0][1]
print("--------------------------> Memory size: %s samples per task (%sB)" % (self.samples*classes_per_task,
self.pretty_print(
self.ntasks * 4 * self.samples * classes_per_task* self.image_size)))
print("------------------------------------------------------------------------------")
print(" TOTAL: %sB" % self.pretty_print(
4 * (count_S + self.ntasks *count_P + self.ntasks *count_H) + self.ntasks * 4 * self.samples * classes_per_task * self.image_size))
def pretty_print(self, num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
ncha,size,_ = args.inputsize
self.pretrained = False
if args.experiment == 'cifar100':
hiddens = [64, 128, 256]
elif args.experiment == 'miniimagenet':
hiddens = [1024, 512, 256]
else:
raise NotImplementedError
# Small resnet
resnet = resnet18_small(self.latent_dim, shared=True)
self.features = torch.nn.Sequential(*list(resnet.children())[:-2])
if args.experiment == 'miniimagenet':
# num_ftrs = 4608
num_ftrs = 2304 # without average pool (-2)
elif args.experiment == 'cifar100':
# num_ftrs = 25088 # without average pool
num_ftrs = 256
else:
raise NotImplementedError
self.relu=torch.nn.ReLU()
self.drop1=torch.nn.Dropout(0.2)
self.drop2=torch.nn.Dropout(0.5)
self.fc1=torch.nn.Linear(num_ftrs,hiddens[0])
self.fc2=torch.nn.Linear(hiddens[0],hiddens[1])
self.fc3=torch.nn.Linear(hiddens[1],hiddens[1])
self.fc4=torch.nn.Linear(hiddens[1], self.latent_dim)
def forward(self, x):
x = x.view_as(x)
x = self.features(x)
x = torch.flatten(x, 1)
x = self.drop2(self.relu(self.fc1(x)))
x = self.drop2(self.relu(self.fc2(x)))
x = self.drop2(self.relu(self.fc3(x)))
x = self.drop2(self.relu(self.fc4(x)))
return x
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
ncha,size,_=args.inputsize
self.image_size = ncha * size * size
self.taskcla = args.taskcla
self.latent_dim = args.latent_dim
self.ntasks = args.ntasks
self.samples = args.samples
self.image_size = ncha * size * size
self.use_memory = args.use_memory
self.hidden1 = args.head_units
self.hidden2 = args.head_units
self.shared = Shared(args)
self.private = resnet18_small(self.latent_dim, shared=False)
self.head = torch.nn.Sequential(
torch.nn.Linear(2*self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[0][1])
)
def forward(self, x_s, x_p, tt=None):
x_s = x_s.view_as(x_s)
x_p = x_p.view_as(x_p)
# x_s = self.shared(x_s)
# x_p = self.private(x_p)
# x = torch.cat([x_p, x_s], dim=1)
# if self.args.experiment == 'multidatasets':
# # if no memory is used this is faster:
# y=[]
# for i,_ in self.taskcla:
# y.append(self.head[i](x))
# return y[task_id]
# else:
# return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
output = {}
output['shared'] = self.shared(x_s)
output['private'] = self.private(x_p)
concat_features = torch.cat([output['private'], output['shared']], dim=1)
if torch.is_tensor(tt):
output['out'] = torch.stack([self.head[tt[i]].forward(concat_features[i]) for i in range(
concat_features.size(0))])
else:
output['out'] = self.head(concat_features)
return output
# output['shared'] = self.shared(x_s)
# output['private'] = self.private(x_p)
#
# concat_features = torch.cat([output['private'], output['shared']], dim=1)
#
# if torch.is_tensor(tt):
#
# output['out'] = torch.stack([self.head[tt[i]].forward(concat_features[i]) for i in range(concat_features.size(0))])
# else:
# if self.use_memory == 'no':
# output['out'] = self.head.forward(concat_features)
#
# elif self.use_memory == 'yes':
# y = []
# for i, _ in self.taskcla:
# y.append(self.head[i](concat_features))
# output['out'] = y[task_id]
#
# return output
# def get_encoded_ftrs(self, x_s, x_p, task_id=None):
# return self.shared(x_s), self.private(x_p)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print("Size of the network for one task including (S+P+p)")
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s ' % (self.pretty_print(count_P)))
print('Num parameters in p = %s ' % (self.pretty_print(count_H)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P + count_H))
print('--------------------------> Architecture size in total for all tasks: %s parameters (%sB)' % (
self.pretty_print(count_S + self.ntasks*count_P + self.ntasks*count_H),
self.pretty_print(4 * (count_S + self.ntasks*count_P + self.ntasks*count_H))))
classes_per_task = self.taskcla[0][1]
print("--------------------------> Memory size: %s samples per task (%sB)" % (self.samples*classes_per_task,
self.pretty_print(
self.ntasks * 4 * self.samples * classes_per_task* self.image_size)))
print("------------------------------------------------------------------------------")
print(" TOTAL: %sB" % self.pretty_print(
4 * (count_S + self.ntasks *count_P + self.ntasks *count_H) + self.ntasks * 4 * self.samples * classes_per_task * self.image_size))
def pretty_print(self, num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
class _CustomDataParallel(torch.nn.DataParallel):
def __init__(self, model):
super(_CustomDataParallel, self).__init__(model)
def __getattr__(self, name):
try:
return super(_CustomDataParallel, self).__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, shared, block, layers, num_classes, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
# small resnet
if shared:
hiddens = [32, 64, 128, 256]
else:
hiddens = [16, 32, 32, 64]
# original resnet
# hiddens = [64, 128, 256, 512]
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, hiddens[0], layers[0])
self.layer2 = self._make_layer(block, hiddens[1], layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, hiddens[2], layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, hiddens[3], layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(hiddens[3] * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
x = self.relu(x)
return x
def forward(self, x):
return self._forward_impl(x)
def resnet18_small(latend_dim, shared):
# r"""ResNet-18 model from
# `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
return ResNet(shared, BasicBlock, [2, 2, 2, 2], num_classes=latend_dim)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.nhid = args.units
self.device = args.device
self.task_out = torch.nn.ModuleList()
for _ in range(self.num_tasks):
self.linear = torch.nn.Sequential()
self.linear.add_module('linear', torch.nn.Linear(self.ncha*self.size*self.size, self.latent_dim))
self.linear.add_module('relu', torch.nn.ReLU(inplace=True))
self.task_out.append(self.linear)
def forward(self, x_p, task_id):
x_p = x_p.view(x_p.size(0), -1)
return self.task_out[task_id].forward(x_p)
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.nhid = args.units
self.nlayers = args.nlayers
self.relu=torch.nn.ReLU()
self.drop=torch.nn.Dropout(0.2)
self.fc1=torch.nn.Linear(ncha*self.size*self.size, self.nhid)
if self.nlayers == 3:
self.fc2 = torch.nn.Linear(self.nhid, self.nhid)
self.fc3=torch.nn.Linear(self.nhid,self.latent_dim)
else:
self.fc2 = torch.nn.Linear(self.nhid,self.latent_dim)
def forward(self, x_s):
h = x_s.view(x_s.size(0), -1)
h = self.drop(self.relu(self.fc1(h)))
h = self.drop(self.relu(self.fc2(h)))
if self.nlayers == 3:
h = self.drop(self.relu(self.fc3(h)))
return h
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'mnist5':
self.hidden1 = 28
self.hidden2 = 14
elif args.experiment == 'pmnist':
self.hidden1 = 28
self.hidden2 = 28
self.samples = args.samples
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.ModuleList()
for i in range(self.num_tasks):
self.head.append(
torch.nn.Sequential(
torch.nn.Linear(2 * self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[i][1])
))
def forward(self,x_s, x_p, tt, task_id):
h_s = x_s.view(x_s.size(0), -1)
h_p = x_s.view(x_p.size(0), -1)
x_s = self.shared(h_s)
x_p = self.private(h_p, task_id)
x = torch.cat([x_p, x_s], dim=1)
return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
def get_encoded_ftrs(self, x_s, x_p, task_id):
return self.shared(x_s), self.private(x_p, task_id)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s, per task = %s ' % (self.pretty_print(count_P),self.pretty_print(count_P/self.num_tasks)))
print('Num parameters in p = %s, per task = %s ' % (self.pretty_print(count_H),self.pretty_print(count_H/self.num_tasks)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P+count_H))
print('--------------------------> Total architecture size: %s parameters (%sB)' % (self.pretty_print(count_S + count_P + count_H),
self.pretty_print(4*(count_S + count_P + count_H))))
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Discriminator(torch.nn.Module):
def __init__(self,args,task_id):
super(Discriminator, self).__init__()
self.num_tasks=args.ntasks
self.units=args.units
self.latent_dim=args.latent_dim
if args.diff == 'yes':
self.dis = torch.nn.Sequential(
GradientReversal(args.lam),
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
else:
self.dis = torch.nn.Sequential(
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
def forward(self, z):
return self.dis(z)
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
def get_size(self):
count=sum(p.numel() for p in self.dis.parameters() if p.requires_grad)
print('Num parameters in D = %s ' % (self.pretty_print(count)))
class GradientReversalFunction(torch.autograd.Function):
"""
From:
https://github.com/jvanvugt/pytorch-domain-adaptation/blob/cb65581f20b71ff9883dd2435b2275a1fd4b90df/utils.py#L26
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(torch.nn.Module):
def __init__(self, lambda_):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import numpy as np
import os
np.random.seed(1234)
# we want 500 for training, 100 for test for wach class
n = 500
def get_total(data):
data_x, data_y = [], []
for k, v in data.items():
for i in range(len(v)):
data_x.append(v[i])
data_y.append(k)
d = {}
d['images'] = data_x
d['labels'] = data_y
return d
# loading the pickled data
with open(os.path.join('../data/miniimagenet/data.pkl'), 'rb') as f:
data_dict = pickle.load(f)
data = data_dict['images']
labels = data_dict['labels']
# split data into classes, 600 images per class
class_dict = {}
for i in range(len(set(labels))):
class_dict[i] = []
for i in range(len(data)):
class_dict[labels[i]].append(data[i])
# Split data for each class to 500 and 100
x_train, x_test = {}, {}
for i in range(len(set(labels))):
np.random.shuffle(class_dict[i])
x_test[i] = class_dict[i][n:]
x_train[i] = class_dict[i][:n]
# mix the data
d_train = get_total(x_train)
d_test = get_total(x_test)
with open(os.path.join('../data/miniimagenet/train.pkl'), 'wb') as f:
pickle.dump(d_train, f)
with open(os.path.join('../data/miniimagenet/test.pkl'), 'wb') as f:
pickle.dump(d_test, f) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, time, os
import numpy as np
import torch
import copy
import utils
from copy import deepcopy
from tqdm import tqdm
sys.path.append('../')
from networks.discriminator import Discriminator
class ACL(object):
def __init__(self, model, args, network):
self.args=args
self.nepochs=args.nepochs
self.sbatch=args.batch_size
# optimizer & adaptive lr
self.e_lr=args.e_lr
self.d_lr=args.d_lr
if not args.experiment == 'multidatasets':
self.e_lr=[args.e_lr] * args.ntasks
self.d_lr=[args.d_lr] * args.ntasks
else:
self.e_lr = [self.args.lrs[i][1] for i in range(len(args.lrs))]
self.d_lr = [self.args.lrs[i][1]/10. for i in range(len(args.lrs))]
print ("d_lrs : ", self.d_lr)
self.lr_min=args.lr_min
self.lr_factor=args.lr_factor
self.lr_patience=args.lr_patience
self.samples=args.samples
self.device=args.device
self.checkpoint=args.checkpoint
self.adv_loss_reg=args.adv
self.diff_loss_reg=args.orth
self.s_steps=args.s_step
self.d_steps=args.d_step
self.diff=args.diff
self.network=network
self.inputsize=args.inputsize
self.taskcla=args.taskcla
self.num_tasks=args.ntasks
# Initialize generator and discriminator
self.model=model
self.discriminator=self.get_discriminator(0)
self.discriminator.get_size()
self.latent_dim=args.latent_dim
self.task_loss=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_d=torch.nn.CrossEntropyLoss().to(self.device)
self.adversarial_loss_s=torch.nn.CrossEntropyLoss().to(self.device)
self.diff_loss=DiffLoss().to(self.device)
self.optimizer_S=self.get_S_optimizer(0)
self.optimizer_D=self.get_D_optimizer(0)
self.task_encoded={}
self.mu=0.0
self.sigma=1.0
print()
def get_discriminator(self, task_id):
discriminator=Discriminator(self.args, task_id).to(self.args.device)
return discriminator
def get_S_optimizer(self, task_id, e_lr=None):
if e_lr is None: e_lr=self.e_lr[task_id]
optimizer_S=torch.optim.SGD(self.model.parameters(), momentum=self.args.mom,
weight_decay=self.args.e_wd, lr=e_lr)
return optimizer_S
def get_D_optimizer(self, task_id, d_lr=None):
if d_lr is None: d_lr=self.d_lr[task_id]
optimizer_D=torch.optim.SGD(self.discriminator.parameters(), weight_decay=self.args.d_wd, lr=d_lr)
return optimizer_D
def train(self, task_id, dataset):
self.discriminator=self.get_discriminator(task_id)
best_loss=np.inf
best_model=utils.get_model(self.model)
best_loss_d=np.inf
best_model_d=utils.get_model(self.discriminator)
dis_lr_update=True
d_lr=self.d_lr[task_id]
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
e_lr=self.e_lr[task_id]
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
for e in range(self.nepochs):
# Train
clock0=time.time()
self.train_epoch(dataset['train'], task_id)
clock1=time.time()
train_res=self.eval_(dataset['train'], task_id)
utils.report_tr(train_res, e, self.sbatch, clock0, clock1)
# lowering the learning rate in the beginning if it predicts random chance for the first 5 epochs
if (self.args.experiment == 'cifar100' or self.args.experiment == 'miniimagenet') and e == 4:
random_chance=20.
threshold=random_chance + 2
if train_res['acc_t'] < threshold:
# Restore best validation model
d_lr=self.d_lr[task_id] / 10.
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print("Performance on task {} is {} so Dis's lr is decreased to {}".format(task_id, train_res[
'acc_t'], d_lr), end=" ")
e_lr=self.e_lr[task_id] / 10.
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
self.discriminator=self.get_discriminator(task_id)
if task_id > 0:
self.model=self.load_checkpoint(task_id - 1)
else:
self.model=self.network.Net(self.args).to(self.args.device)
# Valid
valid_res=self.eval_(dataset['valid'], task_id)
utils.report_val(valid_res)
# Adapt lr for S and D
if valid_res['loss_tot'] < best_loss:
best_loss=valid_res['loss_tot']
best_model=utils.get_model(self.model)
patience=self.lr_patience
print(' *', end='')
else:
patience-=1
if patience <= 0:
e_lr/=self.lr_factor
print(' lr={:.1e}'.format(e_lr), end='')
if e_lr < self.lr_min:
print()
break
patience=self.lr_patience
self.optimizer_S=self.get_S_optimizer(task_id, e_lr)
if train_res['loss_a'] < best_loss_d:
best_loss_d=train_res['loss_a']
best_model_d=utils.get_model(self.discriminator)
patience_d=self.lr_patience
else:
patience_d-=1
if patience_d <= 0 and dis_lr_update:
d_lr/=self.lr_factor
print(' Dis lr={:.1e}'.format(d_lr))
if d_lr < self.lr_min:
dis_lr_update=False
print("Dis lr reached minimum value")
print()
patience_d=self.lr_patience
self.optimizer_D=self.get_D_optimizer(task_id, d_lr)
print()
# Restore best validation model (early-stopping)
self.model.load_state_dict(copy.deepcopy(best_model))
self.discriminator.load_state_dict(copy.deepcopy(best_model_d))
self.save_all_models(task_id)
def train_epoch(self, train_loader, task_id):
self.model.train()
self.discriminator.train()
for data, target, tt, td in train_loader:
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
# Detaching samples in the batch which do not belong to the current task before feeding them to P
t_current=task_id * torch.ones_like(tt)
body_mask=torch.eq(t_current, tt).cpu().numpy()
# x_task_module=data.to(device=self.device)
x_task_module=data.clone()
for index in range(x.size(0)):
if body_mask[index] == 0:
x_task_module[index]=x_task_module[index].detach()
x_task_module=x_task_module.to(device=self.device)
# Discriminator's real and fake task labels
t_real_D=td.to(self.device)
t_fake_D=torch.zeros_like(t_real_D).to(self.device)
# ================================================================== #
# Train Shared Module #
# ================================================================== #
# training S for s_steps
for s_step in range(self.s_steps):
self.optimizer_S.zero_grad()
self.model.zero_grad()
output=self.model(x, x_task_module, tt, task_id)
task_loss=self.task_loss(output, y)
shared_encoded, task_encoded=self.model.get_encoded_ftrs(x, x_task_module, task_id)
dis_out_gen_training=self.discriminator.forward(shared_encoded, t_real_D, task_id)
adv_loss=self.adversarial_loss_s(dis_out_gen_training, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_encoded, task_encoded)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
total_loss.backward(retain_graph=True)
self.optimizer_S.step()
# ================================================================== #
# Train Discriminator #
# ================================================================== #
# training discriminator for d_steps
for d_step in range(self.d_steps):
self.optimizer_D.zero_grad()
self.discriminator.zero_grad()
# training discriminator on real data
output=self.model(x, x_task_module, tt, task_id)
shared_encoded, task_out=self.model.get_encoded_ftrs(x, x_task_module, task_id)
dis_real_out=self.discriminator.forward(shared_encoded.detach(), t_real_D, task_id)
dis_real_loss=self.adversarial_loss_d(dis_real_out, t_real_D)
if self.args.experiment == 'miniimagenet':
dis_real_loss*=self.adv_loss_reg
dis_real_loss.backward(retain_graph=True)
# training discriminator on fake data
z_fake=torch.as_tensor(np.random.normal(self.mu, self.sigma, (x.size(0), self.latent_dim)),dtype=torch.float32, device=self.device)
dis_fake_out=self.discriminator.forward(z_fake, t_real_D, task_id)
dis_fake_loss=self.adversarial_loss_d(dis_fake_out, t_fake_D)
if self.args.experiment == 'miniimagenet':
dis_fake_loss*=self.adv_loss_reg
dis_fake_loss.backward(retain_graph=True)
self.optimizer_D.step()
return
def eval_(self, data_loader, task_id):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t = 0, 0
num=0
batch=0
self.model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
t_real_D=td.to(self.device)
# Forward
output=self.model(x, x, tt, task_id)
shared_out, task_out=self.model.get_encoded_ftrs(x, x, task_id)
_, pred=output.max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator.forward(shared_out, t_real_D, task_id)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
# Loss values
task_loss=self.task_loss(output, y)
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, task_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
total_loss = task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
#
def test(self, data_loader, task_id, model):
loss_a, loss_t, loss_d, loss_total=0, 0, 0, 0
correct_d, correct_t=0, 0
num=0
batch=0
model.eval()
self.discriminator.eval()
res={}
with torch.no_grad():
for batch, (data, target, tt, td) in enumerate(data_loader):
x=data.to(device=self.device)
y=target.to(device=self.device, dtype=torch.long)
tt=tt.to(device=self.device)
t_real_D=td.to(self.device)
# Forward
output=model.forward(x, x, tt, task_id)
shared_out, task_out=model.get_encoded_ftrs(x, x, task_id)
_, pred=output.max(1)
correct_t+=pred.eq(y.view_as(pred)).sum().item()
# Discriminator's performance:
output_d=self.discriminator.forward(shared_out, tt, task_id)
_, pred_d=output_d.max(1)
correct_d+=pred_d.eq(t_real_D.view_as(pred_d)).sum().item()
if self.diff == 'yes':
diff_loss=self.diff_loss(shared_out, task_out)
else:
diff_loss=torch.tensor(0).to(device=self.device, dtype=torch.float32)
self.diff_loss_reg=0
# Loss values
adv_loss=self.adversarial_loss_d(output_d, t_real_D)
task_loss=self.task_loss(output, y)
total_loss=task_loss + self.adv_loss_reg * adv_loss + self.diff_loss_reg * diff_loss
loss_t+=task_loss
loss_a+=adv_loss
loss_d+=diff_loss
loss_total+=total_loss
num+=x.size(0)
res['loss_t'], res['acc_t']=loss_t.item() / (batch + 1), 100 * correct_t / num
res['loss_a'], res['acc_d']=loss_a.item() / (batch + 1), 100 * correct_d / num
res['loss_d']=loss_d.item() / (batch + 1)
res['loss_tot']=loss_total.item() / (batch + 1)
res['size']=self.loader_size(data_loader)
return res
def save_all_models(self, task_id):
print("Saving all models for task {} ...".format(task_id+1))
dis=utils.get_model(self.discriminator)
torch.save({'model_state_dict': dis,
}, os.path.join(self.checkpoint, 'discriminator_{}.pth.tar'.format(task_id)))
model=utils.get_model(self.model)
torch.save({'model_state_dict': model,
}, os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
def load_model(self, task_id):
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
# # Change the previous shared module with the current one
current_shared_module=deepcopy(self.model.shared.state_dict())
net.shared.load_state_dict(current_shared_module)
net=net.to(self.args.device)
return net
def load_checkpoint(self, task_id):
print("Loading checkpoint for task {} ...".format(task_id))
# Load a previous model
net=self.network.Net(self.args)
checkpoint=torch.load(os.path.join(self.checkpoint, 'model_{}.pth.tar'.format(task_id)))
net.load_state_dict(checkpoint['model_state_dict'])
net=net.to(self.args.device)
return net
def loader_size(self, data_loader):
return data_loader.dataset.__len__()
def get_tsne_embeddings_first_ten_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
model.eval()
tag_ = '_diff_{}'.format(self.args.diff)
all_images, all_shared, all_private = [], [], []
# Test final model on first 10 tasks:
writer = SummaryWriter()
for t in range(10):
for itr, (data, _, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
print (torch.stack(all_shared).size())
tag = ['Shared10_{}_{}'.format(tag_,i) for i in range(1,11)]
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#, metadata_header=list(range(1,11)))
tag = ['Private10_{}_{}'.format(tag_, i) for i in range(1, 11)]
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data, metadata=list(range(1,11)),
tag=tag)#,metadata_header=list(range(1,11)))
writer.close()
def get_tsne_embeddings_last_three_tasks(self, dataset, model):
from tensorboardX import SummaryWriter
# Test final model on last 3 tasks:
model.eval()
tag = '_diff_{}'.format(self.args.diff)
for t in [17,18,19]:
all_images, all_labels, all_shared, all_private = [], [], [], []
writer = SummaryWriter()
for itr, (data, target, tt, td) in enumerate(dataset[t]['tsne']):
x = data.to(device=self.device)
y = target.to(device=self.device, dtype=torch.long)
tt = tt.to(device=self.device)
output = model.forward(x, x, tt, t)
shared_out, private_out = model.get_encoded_ftrs(x, x, t)
# print (shared_out.size())
all_shared.append(shared_out)
all_private.append(private_out)
all_images.append(x)
all_labels.append(y)
writer.add_embedding(mat=torch.stack(all_shared,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Shared_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.add_embedding(mat=torch.stack(all_private,dim=1).data, label_img=torch.stack(all_images,dim=1).data,
metadata=list(range(1,6)), tag='Private_{}_{}'.format(t, tag))
# ,metadata_header=list(range(1,6)))
writer.close()
#
class DiffLoss(torch.nn.Module):
# From: Domain Separation Networks (https://arxiv.org/abs/1608.06019)
# Konstantinos Bousmalis, George Trigeorgis, Nathan Silberman, Dilip Krishnan, Dumitru Erhan
def __init__(self):
super(DiffLoss, self).__init__()
def forward(self, D1, D2):
D1=D1.view(D1.size(0), -1)
D1_norm=torch.norm(D1, p=2, dim=1, keepdim=True).detach()
D1_norm=D1.div(D1_norm.expand_as(D1) + 1e-6)
D2=D2.view(D2.size(0), -1)
D2_norm=torch.norm(D2, p=2, dim=1, keepdim=True).detach()
D2_norm=D2.div(D2_norm.expand_as(D2) + 1e-6)
# return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
return torch.mean((D1_norm.mm(D2_norm.t()).pow(2)))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from copy import deepcopy
import pickle
import time
import uuid
from subprocess import call
########################################################################################################################
def human_format(num):
magnitude=0
while abs(num)>=1000:
magnitude+=1
num/=1000.0
return '%.1f%s'%(num,['','K','M','G','T','P'][magnitude])
def report_tr(res, e, sbatch, clock0, clock1):
# Training performance
print(
'| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train losses={:.3f} | T: loss={:.3f}, acc={:5.2f}% | D: loss={:.3f}, acc={:5.1f}%, '
'Diff loss:{:.3f} |'.format(
e + 1,
1000 * sbatch * (clock1 - clock0) / res['size'],
1000 * sbatch * (time.time() - clock1) / res['size'], res['loss_tot'],
res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
def report_val(res):
# Validation performance
print(' Valid losses={:.3f} | T: loss={:.6f}, acc={:5.2f}%, | D: loss={:.3f}, acc={:5.2f}%, Diff loss={:.3f} |'.format(
res['loss_tot'], res['loss_t'], res['acc_t'], res['loss_a'], res['acc_d'], res['loss_d']), end='')
########################################################################################################################
def get_model(model):
return deepcopy(model.state_dict())
########################################################################################################################
def compute_conv_output_size(Lin,kernel_size,stride=1,padding=0,dilation=1):
return int(np.floor((Lin+2*padding-dilation*(kernel_size-1)-1)/float(stride)+1))
########################################################################################################################
def save_print_log(taskcla, acc, lss, output_path):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
print ('ACC: {:5.4f}%'.format((np.mean(acc[acc.shape[0]-1,:]))))
print()
print ('BWD Transfer = ')
print ()
print ("Diagonal R_ii")
for i in range(acc.shape[0]):
print('\t',end='')
print('{:5.2f}% '.format(np.diag(acc)[i]), end=',')
print()
print ("Last row")
for i in range(acc.shape[0]):
print('\t', end=',')
print('{:5.2f}% '.format(acc[-1][i]), end=',')
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on our UCB paper (https://openreview.net/pdf?id=HklUCCVKDB)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
with open(os.path.join(output_path, 'logs.p'), 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", os.path.join(output_path, 'logs.p'))
def print_log_acc_bwt(taskcla, acc, lss, output_path, run_id):
print('*'*100)
print('Accuracies =')
for i in range(acc.shape[0]):
print('\t',end=',')
for j in range(acc.shape[1]):
print('{:5.4f}% '.format(acc[i,j]),end=',')
print()
avg_acc = np.mean(acc[acc.shape[0]-1,:])
print ('ACC: {:5.4f}%'.format(avg_acc))
print()
print()
# BWT calculated based on GEM paper (https://arxiv.org/abs/1706.08840)
gem_bwt = sum(acc[-1]-np.diag(acc))/ (len(acc[-1])-1)
# BWT calculated based on UCB paper (https://arxiv.org/abs/1906.02425)
ucb_bwt = (acc[-1] - np.diag(acc)).mean()
print ('BWT: {:5.2f}%'.format(gem_bwt))
# print ('BWT (UCB paper): {:5.2f}%'.format(ucb_bwt))
print('*'*100)
print('Done!')
logs = {}
# save results
logs['name'] = output_path
logs['taskcla'] = taskcla
logs['acc'] = acc
logs['loss'] = lss
logs['gem_bwt'] = gem_bwt
logs['ucb_bwt'] = ucb_bwt
logs['rii'] = np.diag(acc)
logs['rij'] = acc[-1]
# pickle
path = os.path.join(output_path, 'logs_run_id_{}.p'.format(run_id))
with open(path, 'wb') as output:
pickle.dump(logs, output)
print ("Log file saved in ", path)
return avg_acc, gem_bwt
def print_running_acc_bwt(acc, task_num):
print()
acc = acc[:task_num+1,:task_num+1]
avg_acc = np.mean(acc[acc.shape[0] - 1, :])
gem_bwt = sum(acc[-1] - np.diag(acc)) / (len(acc[-1]) - 1)
print('ACC: {:5.4f}% || BWT: {:5.2f}% '.format(avg_acc, gem_bwt))
print()
def make_directories(args):
uid = uuid.uuid4().hex
if args.checkpoint is None:
os.mkdir('checkpoints')
args.checkpoint = os.path.join('./checkpoints/',uid)
os.mkdir(args.checkpoint)
else:
if not os.path.exists(args.checkpoint):
os.mkdir(args.checkpoint)
args.checkpoint = os.path.join(args.checkpoint, uid)
os.mkdir(args.checkpoint)
def some_sanity_checks(args):
# Making sure the chosen experiment matches with the number of tasks performed in the paper:
datasets_tasks = {}
datasets_tasks['mnist5']=[5]
datasets_tasks['pmnist']=[10,20,30,40]
datasets_tasks['cifar100']=[20]
datasets_tasks['miniimagenet']=[20]
datasets_tasks['multidatasets']=[5]
if not args.ntasks in datasets_tasks[args.experiment]:
raise Exception("Chosen number of tasks ({}) does not match with {} experiment".format(args.ntasks,args.experiment))
# Making sure if memory usage is happenning:
if args.use_memory == 'yes' and not args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
if args.use_memory == 'no' and args.samples > 0:
raise Exception("Flags required to use memory: --use_memory yes --samples n where n>0")
def save_code(args):
cwd = os.getcwd()
des = os.path.join(args.checkpoint, 'code') + '/'
if not os.path.exists(des):
os.mkdir(des)
def get_folder(folder):
return os.path.join(cwd,folder)
folders = [get_folder(item) for item in ['dataloaders', 'networks', 'configs', 'main.py', 'acl.py', 'utils.py']]
for folder in folders:
call('cp -rf {} {}'.format(folder, des),shell=True)
def print_time():
from datetime import datetime
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("Job finished at =", dt_string)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os,argparse,time
import numpy as np
from omegaconf import OmegaConf
import torch
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import utils
tstart=time.time()
# Arguments
parser = argparse.ArgumentParser(description='Adversarial Continual Learning...')
# Load the config file
parser.add_argument('--config', type=str, default='./configs/config_mnist5.yml')
flags = parser.parse_args()
args = OmegaConf.load(flags.config)
print()
########################################################################################################################
# Args -- Experiment
if args.experiment=='pmnist':
from dataloaders import pmnist as datagenerator
elif args.experiment=='mnist5':
from dataloaders import mnist5 as datagenerator
elif args.experiment=='cifar100':
from dataloaders import cifar100 as datagenerator
elif args.experiment=='miniimagenet':
from dataloaders import miniimagenet as datagenerator
elif args.experiment=='multidatasets':
from dataloaders import mulitidatasets as datagenerator
else:
raise NotImplementedError
from acl import ACL as approach
# Args -- Network
if args.experiment == 'mnist5' or args.experiment == 'pmnist':
from networks import mlp_acl as network
elif args.experiment == 'cifar100' or args.experiment == 'miniimagenet' or args.experiment == 'multidatasets':
from networks import alexnet_acl as network
else:
raise NotImplementedError
########################################################################################################################
def run(args, run_id):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
# Faster run but not deterministic:
# torch.backends.cudnn.benchmark = True
# To get deterministic results that match with paper at cost of lower speed:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Data loader
print('Instantiate data generators and model...')
dataloader = datagenerator.DatasetGen(args)
args.taskcla, args.inputsize = dataloader.taskcla, dataloader.inputsize
if args.experiment == 'multidatasets': args.lrs = dataloader.lrs
# Model
net = network.Net(args)
net = net.to(args.device)
net.print_model_size()
# print (net)
# Approach
appr=approach(net,args,network=network)
# Loop tasks
acc=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
lss=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
for t,ncla in args.taskcla:
print('*'*250)
dataset = dataloader.get(t)
print(' '*105, 'Dataset {:2d} ({:s})'.format(t+1,dataset[t]['name']))
print('*'*250)
# Train
appr.train(t,dataset[t])
print('-'*250)
print()
for u in range(t+1):
# Load previous model and replace the shared module with the current one
test_model = appr.load_model(u)
test_res = appr.test(dataset[u]['test'], u, model=test_model)
print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'.format(u, dataset[u]['name'],
test_res['loss_t'],
test_res['acc_t']))
acc[t, u] = test_res['acc_t']
lss[t, u] = test_res['loss_t']
# Save
print()
print('Saved accuracies at '+os.path.join(args.checkpoint,args.output))
np.savetxt(os.path.join(args.checkpoint,args.output),acc,'%.6f')
# Extract embeddings to plot in tensorboard for miniimagenet
if args.tsne == 'yes' and args.experiment == 'miniimagenet':
appr.get_tsne_embeddings_first_ten_tasks(dataset, model=appr.load_model(t))
appr.get_tsne_embeddings_last_three_tasks(dataset, model=appr.load_model(t))
avg_acc, gem_bwt = utils.print_log_acc_bwt(args.taskcla, acc, lss, output_path=args.checkpoint, run_id=run_id)
return avg_acc, gem_bwt
#######################################################################################################################
def main(args):
utils.make_directories(args)
utils.some_sanity_checks(args)
utils.save_code(args)
print('=' * 100)
print('Arguments =')
for arg in vars(args):
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
accuracies, forgetting = [], []
for n in range(args.num_runs):
args.seed = n
args.output = '{}_{}_tasks_seed_{}.txt'.format(args.experiment, args.ntasks, args.seed)
print ("args.output: ", args.output)
print (" >>>> Run #", n)
acc, bwt = run(args, n)
accuracies.append(acc)
forgetting.append(bwt)
print('*' * 100)
print ("Average over {} runs: ".format(args.num_runs))
print ('AVG ACC: {:5.4f}% \pm {:5.4f}'.format(np.array(accuracies).mean(), np.array(accuracies).std()))
print ('AVG BWT: {:5.2f}% \pm {:5.4f}'.format(np.array(forgetting).mean(), np.array(forgetting).std()))
print ("All Done! ")
print('[Elapsed time = {:.1f} min]'.format((time.time()-tstart)/(60)))
utils.print_time()
#######################################################################################################################
if __name__ == '__main__':
main(args)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from utils import *
class iCIFAR10(datasets.CIFAR10):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None, target_transform=None, download=True):
super(iCIFAR10, self).__init__(root, transform=transform,
target_transform=target_transform, download=True)
self.train = train # training set or test set
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
if self.train:
train_data = []
train_labels = []
train_tt = [] # task module labels
train_td = [] # disctiminator labels
for i in range(len(self.data)):
if self.targets[i] in classes:
train_data.append(self.data[i])
train_labels.append(self.class_mapping[self.targets[i]])
train_tt.append(task_num)
train_td.append(task_num+1)
self.class_indices[self.class_mapping[self.targets[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
train_data.append(memory[task_id]['x'][i])
train_labels.append(memory[task_id]['y'][i])
train_tt.append(memory[task_id]['tt'][i])
train_td.append(memory[task_id]['td'][i])
self.train_data = np.array(train_data)
self.train_labels = train_labels
self.train_tt = train_tt
self.train_td = train_td
if not self.train:
f = self.test_list[0][0]
file = os.path.join(self.root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
test_data = []
test_labels = []
test_tt = [] # task module labels
test_td = [] # disctiminator labels
for i in range(len(self.test_data)):
if self.test_labels[i] in classes:
test_data.append(self.test_data[i])
test_labels.append(self.class_mapping[self.test_labels[i]])
test_tt.append(task_num)
test_td.append(task_num + 1)
self.class_indices[self.class_mapping[self.test_labels[i]]].append(i)
self.test_data = np.array(test_data)
self.test_labels = test_labels
self.test_tt = test_tt
self.test_td = test_td
def __getitem__(self, index):
if self.train:
img, target, tt, td = self.train_data[index], self.train_labels[index], self.train_tt[index], self.train_td[index]
else:
img, target, tt, td = self.test_data[index], self.test_labels[index], self.test_tt[index], self.test_td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None:
img = self.transform(img)
except:
pass
try:
if self.target_transform is not None:
target = self.target_transform(target)
except:
pass
return img, target, tt, td
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
class iCIFAR100(iCIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,32,32]
mean=[x/255 for x in [125.3,123.0,113.9]]
std=[x/255 for x in [63.0,62.1,66.7]]
self.transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.test_set = {}
self.train_split = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
self.use_memory = args.use_memory
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=memory_classes,
memory=memory, task_num=task_id, train=True, download=True, transform=self.transformation)
self.test_set[task_id] = iCIFAR100(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False,
download=True, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'CIFAR100-{}-{}'.format(task_id,self.task_ids[task_id])
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
# Getting all samples for this class
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x'])))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import torch.utils.data
from .datasets_utils import *
from utils import *
from torchvision import transforms
mean_datasets = {
'CIFAR10': [x/255 for x in [125.3,123.0,113.9]],
'notMNIST': (0.4254,),
'MNIST': (0.1,) ,
'SVHN':[0.4377,0.4438,0.4728] ,
'FashionMNIST': (0.2190,),
}
std_datasets = {
'CIFAR10': [x/255 for x in [63.0,62.1,66.7]],
'notMNIST': (0.4501,),
'MNIST': (0.2752,),
'SVHN': [0.198,0.201,0.197],
'FashionMNIST': (0.3318,)
}
classes_datasets = {
'CIFAR10': 10,
'notMNIST': 10,
'MNIST': 10,
'SVHN': 10,
'FashionMNIST': 10,
}
lr_datasets = {
'CIFAR10': 0.001,
'notMNIST': 0.01,
'MNIST': 0.01,
'SVHN': 0.001,
'FashionMNIST': 0.01,
}
gray_datasets = {
'CIFAR10': False,
'notMNIST': True,
'MNIST': True,
'SVHN': False,
'FashionMNIST': True,
}
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_samples = args.samples
self.inputsize = [3,32,32]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
self.datasets_idx = list(np.random.permutation(self.num_tasks))
print('Task order =', [list(classes_datasets.keys())[item] for item in self.datasets_idx])
self.datasets_names = [list(classes_datasets.keys())[item] for item in self.datasets_idx]
self.taskcla = []
self.lrs = []
for i in range(self.num_tasks):
t = self.datasets_idx[i]
self.taskcla.append([i, list(classes_datasets.values())[t]])
self.lrs.append([i, list(lr_datasets.values())[t]])
print('Learning Rates =', self.lrs)
print('taskcla =', self.taskcla)
self.train_set = {}
self.train_split = {}
self.test_set = {}
self.args=args
self.dataloaders, self.memory_set = {}, {}
self.memoryloaders = {}
self.dataloaders, self.memory_set, self.indices = {}, {}, {}
self.memoryloaders = {}
self.saliency_loaders, self.saliency_set = {}, {}
for i in range(self.num_tasks):
self.dataloaders[i] = {}
self.memory_set[i] = {}
self.memoryloaders[i] = {}
self.indices[i] = {}
# self.saliency_set = {}
self.saliency_loaders[i] = {}
self.download = True
self.train_set = {}
self.test_set = {}
self.train_split = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
self.use_memory = args.use_memory
def get_dataset(self, dataset_idx, task_num, num_samples_per_class=False, normalize=True):
dataset_name = list(mean_datasets.keys())[dataset_idx]
nspc = num_samples_per_class
if normalize:
transformation = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean_datasets[dataset_name],std_datasets[dataset_name])])
mnist_transformation = transforms.Compose([
transforms.Pad(padding=2, fill=0),
transforms.ToTensor(),
transforms.Normalize(mean_datasets[dataset_name], std_datasets[dataset_name])])
else:
transformation = transforms.Compose([transforms.ToTensor()])
mnist_transformation = transforms.Compose([
transforms.Pad(padding=2, fill=0),
transforms.ToTensor(),
])
# target_transormation = transforms.Compose([transforms.ToTensor()])
target_transormation = None
if dataset_idx == 0:
trainset = CIFAR10_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=True, download=self.download, target_transform = target_transormation, transform=transformation)
testset = CIFAR10_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=False, download=self.download, target_transform = target_transormation, transform=transformation)
if dataset_idx == 1:
trainset = notMNIST_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=True, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
testset = notMNIST_(root=self.root, task_num=task_num, num_samples_per_class=nspc, train=False, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
if dataset_idx == 2:
trainset = MNIST_RGB(root=self.root, train=True, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
testset = MNIST_RGB(root=self.root, train=False, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
if dataset_idx == 3:
trainset = SVHN_(root=self.root, train=True, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=transformation)
testset = SVHN_(root=self.root, train=False, num_samples_per_class=nspc, task_num=task_num, download=self.download, target_transform = target_transormation, transform=transformation)
if dataset_idx == 4:
trainset = FashionMNIST_(root=self.root, num_samples_per_class=nspc, task_num=task_num, train=True, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
testset = FashionMNIST_(root=self.root, num_samples_per_class=nspc, task_num=task_num, train=False, download=self.download, target_transform = target_transormation, transform=mnist_transformation)
return trainset, testset
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
current_dataset_idx = self.datasets_idx[task_id]
dataset_name = list(mean_datasets.keys())[current_dataset_idx]
self.train_set[task_id], self.test_set[task_id] = self.get_dataset(current_dataset_idx,task_id)
self.num_classes = classes_datasets[dataset_name]
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = '{} - {} classes - {} images'.format(dataset_name,
classes_datasets[dataset_name],
len(self.train_set[task_id]))
self.dataloaders[task_id]['classes'] = self.num_classes
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
# Getting all samples for this class
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x'])))
def report_size(self,dataset_name,task_id):
print("Dataset {} size: {} ".format(dataset_name, len(self.train_set[task_id])))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# https://github.com/pytorch/vision/blob/8635be94d1216f10fb8302da89233bd86445e449/torchvision/datasets/utils.py
import os
import os.path
import hashlib
import gzip
import errno
import tarfile
import zipfile
import numpy as np
import torch
import codecs
from torch.utils.model_zoo import tqdm
def gen_bar_updater():
pbar = tqdm(total=None)
def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)
return bar_update
def calculate_md5(fpath, chunk_size=1024 * 1024):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter(lambda: f.read(chunk_size), b''):
md5.update(chunk)
return md5.hexdigest()
def check_md5(fpath, md5, **kwargs):
return md5 == calculate_md5(fpath, **kwargs)
def check_integrity(fpath, md5=None):
if not os.path.isfile(fpath):
return False
if md5 is None:
return True
return check_md5(fpath, md5)
def makedir_exist_ok(dirpath):
"""
Python2 support for os.makedirs(.., exist_ok=True)
"""
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root.
Args:
url (str): URL to download file from
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the basename of the URL
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
from six.moves import urllib
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
# downloads file
if check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
except (urllib.error.URLError, IOError) as e:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(
url, fpath,
reporthook=gen_bar_updater()
)
else:
raise e
def list_dir(root, prefix=False):
"""List all directories at a given root
Args:
root (str): Path to directory whose folders need to be listed
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the directories found
"""
root = os.path.expanduser(root)
directories = list(
filter(
lambda p: os.path.isdir(os.path.join(root, p)),
os.listdir(root)
)
)
if prefix is True:
directories = [os.path.join(root, d) for d in directories]
return directories
def list_files(root, suffix, prefix=False):
"""List all files ending with a suffix at a given root
Args:
root (str): Path to directory whose folders need to be listed
suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').
It uses the Python "str.endswith" method and is passed directly
prefix (bool, optional): If true, prepends the path to each result, otherwise
only returns the name of the files found
"""
root = os.path.expanduser(root)
files = list(
filter(
lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),
os.listdir(root)
)
)
if prefix is True:
files = [os.path.join(root, d) for d in files]
return files
def download_file_from_google_drive(file_id, root, filename=None, md5=None):
"""Download a Google Drive file from and place it in root.
Args:
file_id (str): id of file to be downloaded
root (str): Directory to place downloaded file in
filename (str, optional): Name to save the file under. If None, use the id of the file.
md5 (str, optional): MD5 checksum of the download. If None, do not check
"""
# Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url
import requests
url = "https://docs.google.com/uc?export=download"
root = os.path.expanduser(root)
if not filename:
filename = file_id
fpath = os.path.join(root, filename)
makedir_exist_ok(root)
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
session = requests.Session()
response = session.get(url, params={'id': file_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response, fpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def _save_response_content(response, destination, chunk_size=32768):
with open(destination, "wb") as f:
pbar = tqdm(total=None)
progress = 0
for chunk in response.iter_content(chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress += len(chunk)
pbar.update(progress - pbar.n)
pbar.close()
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False):
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
def download_and_extract_archive(url, download_root, extract_root=None, filename=None,
md5=None, remove_finished=False):
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print("Extracting {} to {}".format(archive, extract_root))
extract_archive(archive, extract_root, remove_finished)
def iterable_to_str(iterable):
return "'" + "', '".join([str(item) for item in iterable]) + "'"
def verify_str_arg(value, arg=None, valid_values=None, custom_msg=None):
if not isinstance(value, torch._six.string_classes):
if arg is None:
msg = "Expected type str, but got type {type}."
else:
msg = "Expected type str for argument {arg}, but got type {type}."
msg = msg.format(type=type(value), arg=arg)
raise ValueError(msg)
if valid_values is None:
return value
if value not in valid_values:
if custom_msg is not None:
msg = custom_msg
else:
msg = ("Unknown value '{value}' for argument {arg}. "
"Valid values are {{{valid_values}}}.")
msg = msg.format(value=value, arg=arg,
valid_values=iterable_to_str(valid_values))
raise ValueError(msg)
return value
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def open_maybe_compressed_file(path):
"""Return a file object that possibly decompresses 'path' on the fly.
Decompression occurs when argument `path` is a string and ends with '.gz' or '.xz'.
"""
if not isinstance(path, torch._six.string_classes):
return path
if path.endswith('.gz'):
import gzip
return gzip.open(path, 'rb')
if path.endswith('.xz'):
import lzma
return lzma.open(path, 'rb')
return open(path, 'rb')
def read_sn3_pascalvincent_tensor(path, strict=True):
"""Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh').
Argument may be a filename, compressed filename, or file object.
"""
# typemap
if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'):
read_sn3_pascalvincent_tensor.typemap = {
8: (torch.uint8, np.uint8, np.uint8),
9: (torch.int8, np.int8, np.int8),
11: (torch.int16, np.dtype('>i2'), 'i2'),
12: (torch.int32, np.dtype('>i4'), 'i4'),
13: (torch.float32, np.dtype('>f4'), 'f4'),
14: (torch.float64, np.dtype('>f8'), 'f8')}
# read
with open_maybe_compressed_file(path) as f:
data = f.read()
# parse
magic = get_int(data[0:4])
nd = magic % 256
ty = magic // 256
assert nd >= 1 and nd <= 3
assert ty >= 8 and ty <= 14
m = read_sn3_pascalvincent_tensor.typemap[ty]
s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)]
parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1)))
assert parsed.shape[0] == np.prod(s) or not strict
return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
def read_label_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 1)
return x.long()
def read_image_file(path):
with open(path, 'rb') as f:
x = read_sn3_pascalvincent_tensor(f, strict=False)
assert(x.dtype == torch.uint8)
assert(x.ndimension() == 3)
return x |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import os
import os.path
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import transforms
from utils import *
class MiniImageNet(torch.utils.data.Dataset):
def __init__(self, root, train):
super(MiniImageNet, self).__init__()
if train:
self.name='train'
else:
self.name='test'
root = os.path.join(root, 'miniimagenet')
with open(os.path.join(root,'{}.pkl'.format(self.name)), 'rb') as f:
data_dict = pickle.load(f)
self.data = data_dict['images']
self.labels = data_dict['labels']
def __len__(self):
return len(self.data)
def __getitem__(self, i):
img, label = self.data[i], self.labels[i]
return img, label
class iMiniImageNet(MiniImageNet):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None):
super(iMiniImageNet, self).__init__(root=root, train=train)
self.transform = transform
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
data = []
labels = []
tt = [] # task module labels
td = [] # disctiminator labels
for i in range(len(self.data)):
if self.labels[i] in classes:
data.append(self.data[i])
labels.append(self.class_mapping[self.labels[i]])
tt.append(task_num)
td.append(task_num+1)
self.class_indices[self.class_mapping[self.labels[i]]].append(i)
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
data.append(memory[task_id]['x'][i])
labels.append(memory[task_id]['y'][i])
tt.append(memory[task_id]['tt'][i])
td.append(memory[task_id]['td'][i])
self.data = np.array(data)
self.labels = labels
self.tt = tt
self.td = td
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.labels[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
if not torch.is_tensor(img):
img = Image.fromarray(img)
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.use_memory = args.use_memory
self.num_tasks = args.ntasks
self.num_classes = 100
self.num_samples = args.samples
self.inputsize = [3,84,84]
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
self.transformation = transforms.Compose([
transforms.Resize((84,84)),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
task_ids = np.split(np.random.permutation(self.num_classes),self.num_tasks)
self.task_ids = [list(arr) for arr in task_ids]
self.train_set = {}
self.train_split = {}
self.test_set = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id],
memory_classes=memory_classes, memory=memory,
task_num=task_id, train=True, transform=self.transformation)
self.test_set[task_id] = iMiniImageNet(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
self.train_split[task_id] = train_split
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'iMiniImageNet-{}-{}'.format(task_id,self.task_ids[task_id])
self.dataloaders[task_id]['tsne'] = torch.utils.data.DataLoader(self.test_set[task_id],
batch_size=len(test_loader.dataset),
num_workers=self.num_workers,
pin_memory=self.pin_memory, shuffle=True)
print ("Task ID: ", task_id)
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
if self.use_memory == 'yes' and self.num_samples > 0 :
self.update_memory(task_id)
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
for i in range(len(self.task_ids[task_id])):
data_loader = torch.utils.data.DataLoader(self.train_split[task_id], batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class] # randomly sample some data
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3])
print ('Memory updated by adding {} images'.format(len(self.task_memory[task_id]['x']))) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
from PIL import Image
import torch
import numpy as np
import os.path
import sys
import torch.utils.data as data
from torchvision import datasets, transforms
class iMNIST(datasets.MNIST):
def __init__(self, root, classes, memory_classes, memory, task_num, train, transform=None, target_transform=None, download=True):
super(iMNIST, self).__init__(root, task_num, transform=transform,
target_transform=target_transform, download=download)
self.train = train # training set or test set
self.root = root
self.target_transform=target_transform
self.transform=transform
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' + ' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data=np.array(self.data).astype(np.float32)
self.targets=list(np.array(self.targets))
self.train = train # training set or test set
if not isinstance(classes, list):
classes = [classes]
self.class_mapping = {c: i for i, c in enumerate(classes)}
self.class_indices = {}
for cls in classes:
self.class_indices[self.class_mapping[cls]] = []
data = []
targets = []
tt = [] # task module labels
td = [] # discriminator labels
for i in range(len(self.data)):
if self.targets[i] in classes:
data.append(self.data[i])
targets.append(self.class_mapping[self.targets[i]])
tt.append(task_num)
td.append(task_num+1)
self.class_indices[self.class_mapping[self.targets[i]]].append(i)
if self.train:
if memory_classes:
for task_id in range(task_num):
for i in range(len(memory[task_id]['x'])):
if memory[task_id]['y'][i] in range(len(memory_classes[task_id])):
data.append(memory[task_id]['x'][i])
targets.append(memory[task_id]['y'][i])
tt.append(memory[task_id]['tt'][i])
td.append(memory[task_id]['td'][i])
self.data = data.copy()
self.targets = targets.copy()
self.tt = tt.copy()
self.td = td.copy()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img.numpy(), mode='L')
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
class DatasetGen(object):
"""docstring for DatasetGen"""
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.root = args.data_dir
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.num_classes = 10
self.num_samples = args.samples
self.inputsize = [1,28,28]
mean = (0.1307,)
std = (0.3081,)
self.transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
self.taskcla = [[t, int(self.num_classes/self.num_tasks)] for t in range(self.num_tasks)]
self.indices = {}
self.dataloaders = {}
self.idx={}
self.num_workers = args.workers
self.pin_memory = True
np.random.seed(self.seed)
self.task_ids = [[0,1], [2,3], [4,5], [6,7], [8,9]]
self.train_set = {}
self.test_set = {}
self.task_memory = {}
for i in range(self.num_tasks):
self.task_memory[i] = {}
self.task_memory[i]['x'] = []
self.task_memory[i]['y'] = []
self.task_memory[i]['tt'] = []
self.task_memory[i]['td'] = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
memory_classes = None
memory=None
else:
memory_classes = self.task_ids
memory = self.task_memory
self.train_set[task_id] = iMNIST(root=self.root, classes=self.task_ids[task_id], memory_classes=memory_classes,
memory=memory, task_num=task_id, train=True,
download=True, transform=self.transformation)
self.test_set[task_id] = iMNIST(root=self.root, classes=self.task_ids[task_id], memory_classes=None,
memory=None, task_num=task_id, train=False,
download=True, transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id], [len(self.train_set[task_id]) - split, split])
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, drop_last=True,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=int(self.batch_size * self.pc_valid),shuffle=True,
num_workers=self.num_workers, pin_memory=self.pin_memory, drop_last=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size, num_workers=self.num_workers,
pin_memory=self.pin_memory, drop_last=True,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = '5Split-MNIST-{}-{}'.format(task_id,self.task_ids[task_id])
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
return self.dataloaders
def update_memory(self, task_id):
num_samples_per_class = self.num_samples // len(self.task_ids[task_id])
mem_class_mapping = {i: i for i, c in enumerate(self.task_ids[task_id])}
# Looping over each class in the current task
for i in range(len(self.task_ids[task_id])):
dataset = iMNIST(root=self.root, classes=self.task_ids[task_id][i], memory_classes=None, memory=None,
task_num=task_id, train=True, download=True, transform=self.transformation)
data_loader = torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory)
# Randomly choosing num_samples_per_class for this class
randind = torch.randperm(len(data_loader.dataset))[:num_samples_per_class]
# Adding the selected samples to memory
for ind in randind:
self.task_memory[task_id]['x'].append(data_loader.dataset[ind][0])
self.task_memory[task_id]['y'].append(mem_class_mapping[i])
self.task_memory[task_id]['tt'].append(data_loader.dataset[ind][2])
self.task_memory[task_id]['td'].append(data_loader.dataset[ind][3]) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sys, os
import numpy as np
from PIL import Image
import torch.utils.data as data
from torchvision import datasets, transforms
from sklearn.utils import shuffle
from utils import *
class PermutedMNIST(datasets.MNIST):
def __init__(self, root, task_num, train=True, permute_idx=None, transform=None):
super(PermutedMNIST, self).__init__(root, train, download=True)
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data = torch.stack([img.float().view(-1)[permute_idx] for img in self.data])
self.tl = (task_num) * torch.ones(len(self.data),dtype=torch.long)
self.td = (task_num+1) * torch.ones(len(self.data),dtype=torch.long)
def __getitem__(self, index):
img, target, tl, td = self.data[index], self.targets[index], self.tl[index], self.td[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
print ("We are transforming")
target = self.target_transform(target)
return img, target, tl, td
def __len__(self):
return self.data.size(0)
class DatasetGen(object):
def __init__(self, args):
super(DatasetGen, self).__init__()
self.seed = args.seed
self.batch_size=args.batch_size
self.pc_valid=args.pc_valid
self.num_samples = args.samples
self.num_tasks = args.ntasks
self.root = args.data_dir
self.use_memory = args.use_memory
self.inputsize = [1, 28, 28]
mean = (0.1307,)
std = (0.3081,)
self.transformation = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean, std)])
self.taskcla = [[t, 10] for t in range(self.num_tasks)]
self.train_set, self.test_set = {}, {}
self.indices = {}
self.dataloaders = {}
self.idx={}
self.get_idx()
self.pin_memory = True
self.num_workers = args.workers
self.task_memory = []
def get(self, task_id):
self.dataloaders[task_id] = {}
sys.stdout.flush()
if task_id == 0:
self.train_set[task_id] = PermutedMNIST(root=self.root, task_num=task_id, train=True,
permute_idx=self.idx[task_id], transform=self.transformation)
if self.use_memory == 'yes' and self.num_samples > 0:
indices=torch.randperm(len(self.train_set[task_id]))[:self.num_samples]
rand_subset=torch.utils.data.Subset(self.train_set[task_id], indices)
self.task_memory.append(rand_subset)
else:
if self.use_memory == 'yes' and self.num_samples > 0:
current_dataset = PermutedMNIST(root=self.root, task_num=task_id, train=True,
permute_idx=self.idx[task_id], transform=self.transformation)
d = []
d.append(current_dataset)
for m in self.task_memory:
d.append(m)
self.train_set[task_id] = torch.utils.data.ConcatDataset(d)
indices=torch.randperm(len(current_dataset))[:self.num_samples]
rand_subset=torch.utils.data.Subset(current_dataset, indices)
self.task_memory.append(rand_subset)
else:
self.train_set[task_id] = PermutedMNIST(root=self.root, task_num=task_id, train=True,
permute_idx=self.idx[task_id], transform=self.transformation)
self.test_set[task_id] = PermutedMNIST(root=self.root, task_num=task_id, train=False,
permute_idx=self.idx[task_id], transform=self.transformation)
split = int(np.floor(self.pc_valid * len(self.train_set[task_id])))
train_split, valid_split = torch.utils.data.random_split(self.train_set[task_id],
[len(self.train_set[task_id]) - split, split])
train_loader = torch.utils.data.DataLoader(train_split, batch_size=self.batch_size,
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_split, batch_size=self.batch_size,
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
test_loader = torch.utils.data.DataLoader(self.test_set[task_id], batch_size=self.batch_size,
num_workers=self.num_workers, pin_memory=self.pin_memory,shuffle=True)
self.dataloaders[task_id]['train'] = train_loader
self.dataloaders[task_id]['valid'] = valid_loader
self.dataloaders[task_id]['test'] = test_loader
self.dataloaders[task_id]['name'] = 'pmnist-{}'.format(task_id+1)
print ("Training set size: {} images of {}x{}".format(len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Validation set size: {} images of {}x{}".format(len(valid_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Train+Val set size: {} images of {}x{}".format(len(valid_loader.dataset)+len(train_loader.dataset),self.inputsize[1],self.inputsize[1]))
print ("Test set size: {} images of {}x{}".format(len(test_loader.dataset),self.inputsize[1],self.inputsize[1]))
return self.dataloaders
def get_idx(self):
for i in range(len(self.taskcla)):
idx = list(range(self.inputsize[1] * self.inputsize[2]))
self.idx[i] = shuffle(idx, random_state=self.seed * 100 + i)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import os.path
import sys
import warnings
import urllib.request
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
import numpy as np
import torch
from torchvision import datasets, transforms
from .utils import *
# from scipy.imageio import imread
import pandas as pd
import os
import torch
from PIL import Image
import scipy.io as sio
from collections import defaultdict
from itertools import chain
from collections import OrderedDict
class CIFAR10_(datasets.CIFAR10):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
num_classes = 10
def __init__(self, root, task_num, num_samples_per_class, train, transform, target_transform, download=True):
# root, task_num, train, transform = None, download = False):
super(CIFAR10_, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
if not num_samples_per_class:
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
else:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
self._load_meta()
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img)
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
# if self.train:
return len(self.data)
# else:
# return len(self.test_data)
def report_size(self):
print("CIFAR10 size at train={} time: {} ".format(self.train,self.__len__()))
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError('Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
class CIFAR100_(CIFAR10_):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
num_classes = 100
class SVHN_(torch.utils.data.Dataset):
url = ""
filename = ""
file_md5 = ""
split_list = {
'train': ["http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"],
'test': ["http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3"],
'extra': ["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"]}
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=True):
self.root = os.path.expanduser(root)
# root, task_num, train, transform = None, download = False):
# print(self.train)
# self.train = train # training set or test set
self.train = train # training set or test set
self.transform = transform
self.target_transform=target_transform
if self.train:
split="train"
else:
split="test"
self.num_classes = 10
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat['X']
# loading from the .mat file gives an np array of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.targets = loaded_mat['y'].astype(np.int64).squeeze()
self.data = np.transpose(self.data, (3, 2, 0, 1))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes+1):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [y_with_label_l[item] for item in shuffled_indices]
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = np.array(sum(y,[])).astype(np.int64)
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.targets, self.targets == 10, 0)
# print ("svhn: ", self.data.shape)
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self):
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
class MNIST_RGB(datasets.MNIST):
def __init__(self, root, task_num, num_samples_per_class, train=True, transform=None, target_transform=None, download=False):
super(MNIST_RGB, self).__init__(root, task_num, transform=transform,
target_transform=target_transform,
download=download)
self.train = train # training set or test set
self.target_transform=target_transform
self.transform=transform
self.num_classes=10
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
# self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data, self.targets = torch.load(os.path.join(self.processed_folder, data_file))
self.data=np.array(self.data).astype(np.float32)
self.targets=list(np.array(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# y_with_label_l = [l]*len(x_with_label_l)
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.targets = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num+1 for _ in range(len(self.data))]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, tt, td = self.data[index], int(self.targets[index]), self.tt[index], self.td[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
try:
img = Image.fromarray(img, mode='L').convert('RGB')
except:
pass
try:
if self.transform is not None: img = self.transform(img)
except:
pass
try:
if self.target_transform is not None: tt = self.target_transform(tt)
if self.target_transform is not None: td = self.target_transform(td)
except:
pass
return img, target, tt, td
def __len__(self):
return len(self.data)
@property
def raw_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self):
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self):
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self):
return (os.path.exists(os.path.join(self.processed_folder,
self.training_file)) and
os.path.exists(os.path.join(self.processed_folder,
self.test_file)))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
if self._check_exists():
return
makedir_exist_ok(self.raw_folder)
makedir_exist_ok(self.processed_folder)
# download files
for url in self.urls:
filename = url.rpartition('/')[2]
download_and_extract_archive(url, download_root=self.raw_folder, filename=filename)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class FashionMNIST_(MNIST_RGB):
"""`Fashion MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
class notMNIST_(torch.utils.data.Dataset):
def __init__(self, root, task_num, num_samples_per_class, train,transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform=target_transform
self.train = train
self.url = "https://github.com/facebookresearch/Adversarial-Continual-Learning/raw/master/data/notMNIST.zip"
self.filename = 'notMNIST.zip'
fpath = os.path.join(root, self.filename)
if not os.path.isfile(fpath):
if not download:
raise RuntimeError('Dataset not found. You can use download=True to download it')
else:
print('Downloading from '+self.url)
download_url(self.url, root, filename=self.filename)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
if self.train:
fpath = os.path.join(root, 'notMNIST', 'Train')
else:
fpath = os.path.join(root, 'notMNIST', 'Test')
X, Y = [], []
folders = os.listdir(fpath)
for folder in folders:
folder_path = os.path.join(fpath, folder)
for ims in os.listdir(folder_path):
try:
img_path = os.path.join(folder_path, ims)
X.append(np.array(Image.open(img_path).convert('RGB')))
Y.append(ord(folder) - 65) # Folders are A-J so labels will be 0-9
except:
print("File {}/{} is broken".format(folder, ims))
self.data = np.array(X)
self.targets = Y
self.num_classes = len(set(self.targets))
if num_samples_per_class:
x, y, tt, td = [], [], [], []
for l in range(self.num_classes):
indices_with_label_l = np.where(np.array(self.targets)==l)
x_with_label_l = [self.data[item] for item in indices_with_label_l[0]]
# If we need a subset of the dataset with num_samples_per_class we use this and then concatenate it with a complete dataset
shuffled_indices = np.random.permutation(len(x_with_label_l))[:num_samples_per_class]
x_with_label_l = [x_with_label_l[item] for item in shuffled_indices]
y_with_label_l = [l]*len(shuffled_indices)
x.append(x_with_label_l)
y.append(y_with_label_l)
self.data = np.array(sum(x,[]))
self.labels = sum(y,[])
self.tt = [task_num for _ in range(len(self.data))]
self.td = [task_num + 1 for _ in range(len(self.data))]
def __getitem__(self, index):
img, target, tt, td = self.data[index], self.targets[index], self.tt[index], self.td[index]
img = Image.fromarray(img)#.convert('RGB')
img = self.transform(img)
return img, target, tt, td
def __len__(self):
return len(self.data)
def download(self):
"""Download the notMNIST data if it doesn't exist in processed_folder already."""
import errno
root = os.path.expanduser(self.root)
fpath = os.path.join(root, self.filename)
try:
os.makedirs(root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
urllib.request.urlretrieve(self.url, fpath)
import zipfile
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(root)
zip_ref.close()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
if args.experiment == 'cifar100':
hiddens = [64, 128, 256, 1024, 1024, 512]
elif args.experiment == 'miniimagenet':
hiddens = [64, 128, 256, 512, 512, 512]
# ----------------------------------
elif args.experiment == 'multidatasets':
hiddens = [64, 128, 256, 1024, 1024, 512]
else:
raise NotImplementedError
self.conv1=torch.nn.Conv2d(self.ncha,hiddens[0],kernel_size=size//8)
s=utils.compute_conv_output_size(size,size//8)
s=s//2
self.conv2=torch.nn.Conv2d(hiddens[0],hiddens[1],kernel_size=size//10)
s=utils.compute_conv_output_size(s,size//10)
s=s//2
self.conv3=torch.nn.Conv2d(hiddens[1],hiddens[2],kernel_size=2)
s=utils.compute_conv_output_size(s,2)
s=s//2
self.maxpool=torch.nn.MaxPool2d(2)
self.relu=torch.nn.ReLU()
self.drop1=torch.nn.Dropout(0.2)
self.drop2=torch.nn.Dropout(0.5)
self.fc1=torch.nn.Linear(hiddens[2]*s*s,hiddens[3])
self.fc2=torch.nn.Linear(hiddens[3],hiddens[4])
self.fc3=torch.nn.Linear(hiddens[4],hiddens[5])
self.fc4=torch.nn.Linear(hiddens[5], self.latent_dim)
def forward(self, x_s):
x_s = x_s.view_as(x_s)
h = self.maxpool(self.drop1(self.relu(self.conv1(x_s))))
h = self.maxpool(self.drop1(self.relu(self.conv2(h))))
h = self.maxpool(self.drop2(self.relu(self.conv3(h))))
h = h.view(x_s.size(0), -1)
h = self.drop2(self.relu(self.fc1(h)))
h = self.drop2(self.relu(self.fc2(h)))
h = self.drop2(self.relu(self.fc3(h)))
h = self.drop2(self.relu(self.fc4(h)))
return h
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'cifar100':
hiddens=[32,32]
flatten=1152
elif args.experiment == 'miniimagenet':
# hiddens=[8,8]
# flatten=1800
hiddens=[16,16]
flatten=3600
elif args.experiment == 'multidatasets':
hiddens=[32,32]
flatten=1152
else:
raise NotImplementedError
self.task_out = torch.nn.ModuleList()
for _ in range(self.num_tasks):
self.conv = torch.nn.Sequential()
self.conv.add_module('conv1',torch.nn.Conv2d(self.ncha, hiddens[0], kernel_size=self.size // 8))
self.conv.add_module('relu1', torch.nn.ReLU(inplace=True))
self.conv.add_module('drop1', torch.nn.Dropout(0.2))
self.conv.add_module('maxpool1', torch.nn.MaxPool2d(2))
self.conv.add_module('conv2', torch.nn.Conv2d(hiddens[0], hiddens[1], kernel_size=self.size // 10))
self.conv.add_module('relu2', torch.nn.ReLU(inplace=True))
self.conv.add_module('dropout2', torch.nn.Dropout(0.5))
self.conv.add_module('maxpool2', torch.nn.MaxPool2d(2))
self.task_out.append(self.conv)
self.linear = torch.nn.Sequential()
self.linear.add_module('linear1', torch.nn.Linear(flatten,self.latent_dim))
self.linear.add_module('relu3', torch.nn.ReLU(inplace=True))
self.task_out.append(self.linear)
def forward(self, x, task_id):
x = x.view_as(x)
out = self.task_out[2*task_id].forward(x)
out = out.view(out.size(0),-1)
out = self.task_out[2*task_id+1].forward(out)
return out
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self.ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.samples = args.samples
self.image_size = self.ncha*size*size
self.args=args
self.hidden1 = args.head_units
self.hidden2 = args.head_units//2
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.ModuleList()
for i in range(self.num_tasks):
self.head.append(
torch.nn.Sequential(
torch.nn.Linear(2*self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[i][1])
))
def forward(self, x_s, x_p, tt, task_id):
x_s = x_s.view_as(x_s)
x_p = x_p.view_as(x_p)
x_s = self.shared(x_s)
x_p = self.private(x_p, task_id)
x = torch.cat([x_p, x_s], dim=1)
if self.args.experiment == 'multidatasets':
# if no memory is used this is faster:
y=[]
for i,_ in self.taskcla:
y.append(self.head[i](x))
return y[task_id]
else:
return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
def get_encoded_ftrs(self, x_s, x_p, task_id):
return self.shared(x_s), self.private(x_p, task_id)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s, per task = %s ' % (self.pretty_print(count_P),self.pretty_print(count_P/self.num_tasks)))
print('Num parameters in p = %s, per task = %s ' % (self.pretty_print(count_H),self.pretty_print(count_H/self.num_tasks)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P+count_H))
print('--------------------------> Architecture size: %s parameters (%sB)' % (self.pretty_print(count_S + count_P + count_H),
self.pretty_print(4*(count_S + count_P + count_H))))
print("--------------------------> Memory size: %s samples per task (%sB)" % (self.samples,
self.pretty_print(self.num_tasks*4*self.samples*self.image_size)))
print("------------------------------------------------------------------------------")
print(" TOTAL: %sB" % self.pretty_print(4*(count_S + count_P + count_H)+self.num_tasks*4*self.samples*self.image_size))
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
class Private(torch.nn.Module):
def __init__(self, args):
super(Private, self).__init__()
self.ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.nhid = args.units
self.device = args.device
self.task_out = torch.nn.ModuleList()
for _ in range(self.num_tasks):
self.linear = torch.nn.Sequential()
self.linear.add_module('linear', torch.nn.Linear(self.ncha*self.size*self.size, self.latent_dim))
self.linear.add_module('relu', torch.nn.ReLU(inplace=True))
self.task_out.append(self.linear)
def forward(self, x_p, task_id):
x_p = x_p.view(x_p.size(0), -1)
return self.task_out[task_id].forward(x_p)
class Shared(torch.nn.Module):
def __init__(self,args):
super(Shared, self).__init__()
ncha,self.size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.nhid = args.units
self.nlayers = args.nlayers
self.relu=torch.nn.ReLU()
self.drop=torch.nn.Dropout(0.2)
self.fc1=torch.nn.Linear(ncha*self.size*self.size, self.nhid)
if self.nlayers == 3:
self.fc2 = torch.nn.Linear(self.nhid, self.nhid)
self.fc3=torch.nn.Linear(self.nhid,self.latent_dim)
else:
self.fc2 = torch.nn.Linear(self.nhid,self.latent_dim)
def forward(self, x_s):
h = x_s.view(x_s.size(0), -1)
h = self.drop(self.relu(self.fc1(h)))
h = self.drop(self.relu(self.fc2(h)))
if self.nlayers == 3:
h = self.drop(self.relu(self.fc3(h)))
return h
class Net(torch.nn.Module):
def __init__(self, args):
super(Net, self).__init__()
ncha,size,_=args.inputsize
self.taskcla=args.taskcla
self.latent_dim = args.latent_dim
self.num_tasks = args.ntasks
self.device = args.device
if args.experiment == 'mnist5':
self.hidden1 = 28
self.hidden2 = 14
elif args.experiment == 'pmnist':
self.hidden1 = 28
self.hidden2 = 28
self.samples = args.samples
self.shared = Shared(args)
self.private = Private(args)
self.head = torch.nn.ModuleList()
for i in range(self.num_tasks):
self.head.append(
torch.nn.Sequential(
torch.nn.Linear(2 * self.latent_dim, self.hidden1),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(),
torch.nn.Linear(self.hidden1, self.hidden2),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(self.hidden2, self.taskcla[i][1])
))
def forward(self,x_s, x_p, tt, task_id):
h_s = x_s.view(x_s.size(0), -1)
h_p = x_s.view(x_p.size(0), -1)
x_s = self.shared(h_s)
x_p = self.private(h_p, task_id)
x = torch.cat([x_p, x_s], dim=1)
return torch.stack([self.head[tt[i]].forward(x[i]) for i in range(x.size(0))])
def get_encoded_ftrs(self, x_s, x_p, task_id):
return self.shared(x_s), self.private(x_p, task_id)
def print_model_size(self):
count_P = sum(p.numel() for p in self.private.parameters() if p.requires_grad)
count_S = sum(p.numel() for p in self.shared.parameters() if p.requires_grad)
count_H = sum(p.numel() for p in self.head.parameters() if p.requires_grad)
print('Num parameters in S = %s ' % (self.pretty_print(count_S)))
print('Num parameters in P = %s, per task = %s ' % (self.pretty_print(count_P),self.pretty_print(count_P/self.num_tasks)))
print('Num parameters in p = %s, per task = %s ' % (self.pretty_print(count_H),self.pretty_print(count_H/self.num_tasks)))
print('Num parameters in P+p = %s ' % self.pretty_print(count_P+count_H))
print('--------------------------> Total architecture size: %s parameters (%sB)' % (self.pretty_print(count_S + count_P + count_H),
self.pretty_print(4*(count_S + count_P + count_H))))
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import utils
class Discriminator(torch.nn.Module):
def __init__(self,args,task_id):
super(Discriminator, self).__init__()
self.num_tasks=args.ntasks
self.units=args.units
self.latent_dim=args.latent_dim
if args.diff == 'yes':
self.dis = torch.nn.Sequential(
GradientReversal(args.lam),
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
else:
self.dis = torch.nn.Sequential(
torch.nn.Linear(self.latent_dim, args.units),
torch.nn.LeakyReLU(),
torch.nn.Linear(args.units, args.units),
torch.nn.Linear(args.units, task_id + 2)
)
def forward(self, z, labels, task_id):
return self.dis(z)
def pretty_print(self, num):
magnitude=0
while abs(num) >= 1000:
magnitude+=1
num/=1000.0
return '%.1f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])
def get_size(self):
count=sum(p.numel() for p in self.dis.parameters() if p.requires_grad)
print('Num parameters in D = %s ' % (self.pretty_print(count)))
class GradientReversalFunction(torch.autograd.Function):
"""
From:
https://github.com/jvanvugt/pytorch-domain-adaptation/blob/cb65581f20b71ff9883dd2435b2275a1fd4b90df/utils.py#L26
Gradient Reversal Layer from:
Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015)
Forward pass is the identity function. In the backward pass,
the upstream gradients are multiplied by -lambda (i.e. gradient is reversed)
"""
@staticmethod
def forward(ctx, x, lambda_):
ctx.lambda_ = lambda_
return x.clone()
@staticmethod
def backward(ctx, grads):
lambda_ = ctx.lambda_
lambda_ = grads.new_tensor(lambda_)
dx = -lambda_ * grads
return dx, None
class GradientReversal(torch.nn.Module):
def __init__(self, lambda_):
super(GradientReversal, self).__init__()
self.lambda_ = lambda_
def forward(self, x):
return GradientReversalFunction.apply(x, self.lambda_) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.