Dataset Viewer
python_code
stringlengths 0
66.4k
|
---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import torch
import torch.nn.functional as F
from torch.autograd import grad
def gPenalty(inputs, loss, lam, q):
# Gradient penalty
bs, c, h, w = inputs.size()
d_in = c * h * w
g = grad(loss, inputs, create_graph=True)[0] * bs
g = g.view(bs, -1)
qnorms = g.norm(q, 1).mean()
lam = lam * math.pow(d_in, 1. - 1. / q)
return lam * qnorms.mean() / 2.
def advAugment(net, inputs, targets, loss, lam, q):
# Single-step adversarial augmentation (e.g. FGSM)
bs, c, h, w = inputs.size()
d_in = c * h * w
g = grad(loss, inputs, retain_graph=True)[0] * bs
g = g.view(bs, -1).detach()
if q == 1:
lam = lam
dx = lam * g.sign()
else:
p = 1. / (1. - 1. / q)
lam = lam * math.pow(d_in, 1. - 1. / q)
dx = g.sign() * g.abs().pow(q - 1) # sign when q uneven
pnorms = dx.norm(p, 1, keepdim=True)
dx = lam * dx / pnorms
dx = dx.view_as(inputs)
advInputs = (inputs + dx).detach()
advOutputs = net(advInputs)
advLoss = F.cross_entropy(advOutputs, targets)
return (advLoss - loss) / 2.
def pgd(net, inputs, targets, loss, lam, steps, step_size,
random_start=True, train=True):
# Projected gradient descent (i.e. iterative FGSM) with random starts
bs, c, h, w = inputs.size()
if random_start:
if torch.cuda.is_available():
noise = torch.cuda.FloatTensor(bs, c, h, w).uniform_(-lam, lam)
else:
noise = torch.FloatTensor(bs, c, h, w).uniform_(-lam, lam)
else:
if torch.cuda.is_available():
noise = torch.cuda.FloatTensor(bs, c, h, w).fill_(0)
else:
noise = torch.FloatTensor(bs, c, h, w).fill_(0)
advInputs = (inputs + noise).detach()
advInputs.requires_grad = True
advOutputs = net(advInputs)
advLoss = F.cross_entropy(advOutputs, targets)
for i in range(steps):
retain_graph = ((i + 1 == steps) and train)
g = grad(advLoss, advInputs, retain_graph=retain_graph)[0] * bs
g = g.view(bs, -1).detach()
dx = step_size * g.sign()
dx = dx.view_as(advInputs)
advInputs = advInputs + dx
advInputs = inputs + torch.clamp(advInputs - inputs, -lam, lam)
advInputs = advInputs.detach()
advInputs.requires_grad = True
advOutputs = net(advInputs)
advLoss = F.cross_entropy(advOutputs, targets)
return advLoss - loss, advOutputs
def crossLip(inputs, outputs, lam):
gk = []
n, K, cLpen = outputs.size(0), outputs.size(1), 0.
for k in range(K):
gk.append(grad(outputs[:, k].sum(), inputs, create_graph=True)[0])
for l in range(K):
for m in range(l + 1, K):
cLpen += (gk[l] - gk[m]) ** 2
cLpen = 2. / n / K ** 2 * cLpen.sum()
return lam * cLpen
def addPenalty(net, inputs, outputs, targets, loss, args):
if args.penalty == 'grad':
penalty = gPenalty(inputs, loss, args.lam, args.q)
elif args.penalty == 'adv':
penalty = advAugment(net, inputs, targets, loss, args.lam, args.q)
elif args.penalty == 'pgd':
penalty, _ = pgd( # uses linf attacks
net, inputs, targets, loss, args.lam,
args.steps, step_size=args.lam / (.75 * args.steps))
elif args.penalty == 'crossLip':
penalty = crossLip(inputs, outputs, args.lam)
else:
raise NotImplementedError("Unknown penalty %r" % args.penalty)
return penalty
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
""" Some utilities """
import os
import math
import warnings
import configargparse
import torch
from nets import ConvNet
def argument_parser():
parser = configargparse.ArgParser(
description='First-order vulnerability and input dimension')
parser.add(
'--config', required=True, is_config_file=True,
help='configuration file path')
parser.add_argument(
'--name', type=str,
help='Experiment name. Results will be saved/loaded from directory '
'./results/name (which will be created if needed).')
parser.add_argument(
'--datapath', type=str, default=None,
help="Data location. Default: '~/datasets/' + `dataset`")
parser.add_argument(
'--dataset', type=str, default='cifar',
help='mnist, cifar, imgnet12 (default: cifar)')
parser.add_argument(
'--img_size', type=int, default=None,
help='only for imgnet. Resize img to 32, 64, 128 or 256.')
parser.add_argument(
'--n_layers', type=int, default=5,
help='number of hidden layers')
parser.add_argument(
'--bs', type=int, default=128,
help='batch size')
parser.add_argument(
'--epochs', type=int, default=200,
help='number of training epochs')
parser.add_argument(
'--no_BN', action='store_true',
help='Do not use batch norms (except before the very 1st layer)')
parser.add_argument(
'--no_act', action='store_true',
help='No activation functions (f.ex. no ReLUs)')
parser.add_argument(
'--raw_inputs', action='store_true',
help='Do not normalize inputs (hence no bn as first network layer)')
parser.add_argument(
'--log_step', type=int, default=None,
help='print training info every log_step batches (default: None)')
# training
parser.add_argument(
'--lr', type=float, default=.01,
help='Initial learning rate')
parser.add_argument(
'--no_training', action='store_true',
help='Do not train the network')
parser.add_argument(
'--crop', action='store_true',
help='Use cropping instead of resizing image.')
# Penalties/Regularizers
penalties = ['grad', 'adv', 'pgd', 'crossLip']
parser.add_argument(
'--lam', type=float, default=0.,
help='global regularization weight')
parser.add_argument(
'--penalty', type=str, choices=penalties, default=None,
help='penalty type:' + ' | '.join(penalties))
parser.add_argument(
'--q', type=int, default=None,
help="defense-norm q; dual of attack-norm p. "
"For FGSM, use penalty='adv' and 'q=1'")
parser.add_argument(
'--steps', type=int, default=None,
help='number of optimization steps per attack when using PGD')
# Vulnerability.py specific
parser.add_argument(
'--n_attacks', type=int, default=-1,
help='number of attack iterations; -1 for whole dataset')
parser.add_argument(
'--log_vul', action='store_true',
help='Print detailed logs of vulnerability computation')
# ConvNet specific
pooltypes = ['avgpool', 'maxpool', 'weightpool', 'subsamp']
last_layers = ['maxpool', 'avgpool', 'fc', 'weightpool']
parser.add_argument(
'--poolings', nargs='*', type=int, default=[],
help='Where to do poolings. Should be a list of '
'integers smaller than n_layers. Defaults to None. (ConvNet)')
parser.add_argument(
'--pooltype', type=str,
choices=pooltypes, default='subsamp',
help='penalty type:' + ' | '.join(penalties) + 'default: subsamp')
parser.add_argument(
'--dilations', nargs='*', type=int, default=None,
help='Dilations to use for each layer. List of n_layers int. '
'Defaults to 1 for all layers. (ConvNet)')
parser.add_argument(
'--last_layers', type=str, choices=last_layers,
default='avgpool', help='penalty type:' + ' | '.join(last_layers))
args = parser.parse_args()
if args.datapath is None:
args.datapath = os.path.join('~/datasets/', args.dataset)
args.datapath = os.path.expanduser(args.datapath)
# DATASET SPECIFIC SETTINGS
if args.dataset == 'mnist':
if args.img_size is None:
args.img_size = 32
elif args.img_size not in {32, 64, 128, 256, 512}:
raise Exception(
"img_size must be 32, 64, 128, 256. "
"But provided %r" % args.img_size)
args.categories = 10
args.in_planes = 1
elif args.dataset == 'cifar':
if args.img_size is None:
args.img_size = 32
elif args.img_size not in {32, 64, 128, 256, 512}:
raise Exception(
"img_size must be 32, 64, 128, 256, or 512. "
"But provided %r" % args.img_size)
args.categories = 10
args.in_planes = 3
elif args.dataset == 'imgnet12':
if args.img_size is None:
args.img_size = 256
elif args.img_size not in {32, 64, 128, 256}:
raise Exception(
"img_size must be 32, 64, 128, or 256. "
"But provided %r" % args.img_size)
if args.bs > 32:
raise Exception(
"With imgnet12, Batchsize bs should be <= 32. "
"Otherwise, you'll probably run out of GPU memory")
args.categories = 12
args.in_planes = 3
else:
raise NotImplementedError("Dataset unknown")
# NETWORK DOUBLE-CHECKS/WARNINGS
if args.no_BN and args.raw_inputs:
warnings.warn(
"no_BN also removes the first BN layer before the net "
"which serves as normalization of data when using raw_inputs. "
"Thus data input data stays unnormalized between 0 and 1")
if args.dilations is None:
dilation = 1 if args.crop else int(args.img_size / 32)
args.dilations = [dilation] * args.n_layers
elif len(args.dilations) == 1:
args.dilations = args.dilations * args.n_layers
elif len(args.dilations) != args.n_layers:
raise Exception(
'Argument dilations must be single integer, or a list of '
'integers of length n_layers')
# PENALTY/REGULARIZATION WARNINGS
if (args.lam, args.penalty, args.q) != (0., None, None):
if args.lam == 0.:
warnings.warn(
"Arguments penalty and/or q are given, but lam = 0. "
"Set lam > 0., otherwise not penalty is used")
elif args.penalty is None:
raise Exception("Argument lam > 0., but no penalty is defined.")
elif (args.penalty in {'adv', 'grad'}) and (args.q is None):
raise Exception(
"If argument penalty is 'adv' or 'grad', q must be in "
"[1, infty]")
if (args.penalty == 'pgd') and (args.steps is None):
raise Exception(
"Arguments steps must be specified with "
"penalty-option pgd")
return parser, args
def create_net(args):
net = ConvNet(
args.categories, args.n_layers, args.img_size, args.poolings,
args.pooltype, args.no_BN, args.no_act, args.dilations,
normalize_inputs=(not args.raw_inputs),
last_layers=args.last_layers, in_planes=args.in_planes)
return net
def initialize_params(m, no_act=False, distribution='normal'):
# gain = sqrt 2 for ReLU
gain = 1. if no_act else math.sqrt(2)
try: # if last layer, then gain = 1.
if m.unit_gain: # test if module as attribute 'last'
gain = 1.
except AttributeError:
pass
if type(m) in {torch.nn.Conv2d, torch.nn.Linear}:
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0.)
out_ = m.weight.data.size(0)
in_ = m.weight.data.view(out_, -1).size(1)
sigma = gain / math.sqrt(in_)
if distribution is 'uniform':
xmax = math.sqrt(3) * sigma
torch.nn.init.uniform_(m.weight, a=-xmax, b=xmax)
elif distribution is 'normal':
torch.nn.init.normal_(m.weight, std=sigma)
else:
raise NotImplementedError(
"Argument distribution must be 'uniform' or 'normal'. "
"Got: '%r'" % distribution)
elif type(m) == torch.nn.BatchNorm2d:
if m.affine:
torch.nn.init.constant_(m.bias, 0.)
torch.nn.init.constant_(m.weight, 1.)
if m.track_running_stats:
torch.nn.init.constant_(m.running_mean, 0.)
torch.nn.init.constant_(m.running_var, 1.)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import time
import torch
import torch.nn.functional as F
from torch.autograd import grad
from data import CIFAR10, IMGNET12, MNIST
from vulnerability import compute_vulnerability
from utils import argument_parser, create_net, initialize_params
from penalties import addPenalty, pgd
# NB: Logger cannot be pushed to utils.py, because of eval(name)
class Logger(object):
def __init__(self):
self.logs = dict()
def log(self, step, delta_time, *args):
for name in args:
if type(name) != str:
raise Exception(
"Logger takes strings as inputs. "
"But got %s" % type(name))
if name not in self.logs:
self.logs[name] = []
self.logs[name].append([eval(name), step, delta_time])
def get_logs(self):
return self.logs
def set_logs(self, logs):
self.logs = logs # logs : dict
return
def grad_norms(loss, inputs, train=False):
bs = inputs.size(0)
g = grad(loss, inputs, retain_graph=train)[0] * bs
g = g.view(bs, -1)
norm1, norm2 = g.norm(1, 1).mean(), g.norm(2, 1).mean()
return norm1.item(), norm2.item()
def do_epoch(epoch, net, optimizer, loader, mode, args):
if mode not in {'train', 'eval', 'test', 'init'}:
# 'init' -> for initialization of batchnorms
# 'train' -> training (but no logging of vul & dam)
# 'eval' -> compute acc & gnorms but not vul & dam on validation
# 'test' -> compute all logged values on test set
raise Exception('Argument mode must be train, eval or init')
net.eval() if mode in {'eval', 'test'} else net.train()
device = next(net.parameters()).device
cum_loss = cum_pen = cum_norm1 = cum_norm2 = total = correct = 0.
advVul = advCorrect = cum_dam = 0.
predictedAdv = None
for i, (inputs, targets) in enumerate(loader):
optimizer.zero_grad()
inputs, targets = inputs.to(device), targets.to(device)
inputs.requires_grad = True
outputs = net(inputs)
loss = F.cross_entropy(outputs, targets)
norm1, norm2 = grad_norms(loss, inputs, mode == 'train')
if mode == 'train':
if args.lam > 0.:
penalty = addPenalty(net, inputs, outputs, targets, loss, args)
loss += penalty
cum_pen += penalty.item()
cum_loss += loss.item()
loss.backward()
optimizer.step()
elif mode == 'test': # compute adv vul & damage using custom PGD
eps = .004
advDam, advOutputs = pgd(
net, inputs, targets, loss, lam=eps, steps=10,
step_size=eps / (.75 * 10), random_start=False, train=False)
# Compute logging info
cum_norm1 += norm1
cum_norm2 += norm2
cum_loss += loss.item()
total += targets.size(0)
_, predicted = torch.max(outputs.data, 1)
correct += predicted.eq(targets.data).float().cpu().sum().item()
if mode == 'test':
cum_dam += advDam.item() / eps
_, predictedAdv = torch.max(advOutputs.data, 1)
advVul += predicted.size(0) - (
predictedAdv.eq(predicted.data).float().cpu().sum().item())
advCorrect += predictedAdv.eq(
targets.data).float().cpu().sum().item()
results = {
'acc': 100 * correct / total, # accuracy
'loss': cum_loss / (i + 1), # loss
'pen': cum_pen / (i + 1), # penalty
'norm1': cum_norm1 / (i + 1), # avg l1-gradient norm
'norm2': cum_norm2 / (i + 1), # avg l2-gradient norm
'av': 100 * advVul / total, # adversarial vulnerability
'da': cum_dam / (i + 1), # adversarial damage
'aa': 100 * advCorrect / total # adversarial accuracy
}
if args.log_step is not None and i % args.log_step == 0:
print("Epoch: %03d Batch: %04d Mode: %-5s Acc: %4.1f Loss: %4.2f "
"Pen: %5.3f gNorm1: %6.2f gNorm2: %6.3f Vul: %4.1f "
"Dam: %6.2f AdAcc %4.1f" % (
epoch, i, mode, *[
results[i] for i in ['acc', 'loss', 'pen', 'norm1',
'norm2', 'av', 'da', 'aa']]))
return results
if __name__ == '__main__':
parser, args = argument_parser()
logger = Logger()
args.path = os.path.join('results', args.name)
net = create_net(args)
# print(net)
if not os.path.exists(args.path):
os.makedirs(args.path, exist_ok=True) # requires Python >= 3.2
if os.path.isfile(os.path.join(args.path, 'last.pt')):
print('> Loading last saved state/network...')
state = torch.load(os.path.join(args.path, 'last.pt'))
net.load_state_dict(state['state_dict'])
lr = state['lr']
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
optimizer.load_state_dict(state['optimizer'])
best_va_acc = state['best_va_acc']
start_ep = state['epoch'] + 1
logger.set_logs(state['logs'])
else: # initialize new net
print('> Initializing new network...')
net.apply(lambda m: initialize_params(m, args.no_act, 'normal'))
lr = args.lr
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
best_va_acc = -1.
start_ep = -1
print('> Done.')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = net.to(device)
torch.backends.cudnn.benchmark = True
print('> Loading dataset...')
if args.dataset == 'mnist':
tr_loader, va_loader, te_loader = MNIST(
root=args.datapath, bs=args.bs, valid_size=.1,
size=args.img_size, normalize=(not args.raw_inputs))
elif args.dataset == 'cifar':
tr_loader, va_loader, te_loader = CIFAR10(
root=args.datapath, bs=args.bs, valid_size=.1,
size=args.img_size, normalize=(not args.raw_inputs))
elif args.dataset == 'imgnet12':
tr_loader, va_loader, te_loader = IMGNET12(
root=args.datapath, bs=args.bs, valid_size=.1,
size=args.img_size, normalize=(not args.raw_inputs))
else:
raise NotImplementedError
print('> Done.')
print('> Starting training.')
time_start = time.time()
epochs = 0 if args.no_training else args.epochs
for epoch in range(start_ep, epochs):
time_start = time.time()
if epoch % 30 == 0 and epoch > 0:
# reload best parameters on validation set
net.load_state_dict(
torch.load(os.path.join(
args.path, 'best.pt'))['state_dict'])
# update learning rate
lr *= .5
for param_group in optimizer.param_groups:
param_group['lr'] = lr
mode = 'init' if epoch < 0 else 'train'
tr_res = do_epoch(epoch, net, optimizer, tr_loader, mode, args)
va_res = do_epoch(epoch, net, optimizer, va_loader, 'eval', args)
te_res = do_epoch(epoch, net, optimizer, te_loader, 'test', args)
time_per_epoch = time.time() - time_start
print("epoch %3d lr %.1e te_norm1 %7.3f te_norm2 %6.4f tr_loss %6.3f "
"tr_acc %5.2f te_acc %5.2f te_aa %5.2f te_av %5.2f te_da %6.3f "
"va_acc %5.2f be_va_acc %5.2f time %d" % (
epoch, lr, te_res['norm1'], te_res['norm2'], tr_res['loss'],
tr_res['acc'], te_res['acc'], te_res['aa'], te_res['av'],
te_res['da'], va_res['acc'], best_va_acc,
time_per_epoch))
# Log and save results
logger.log(epoch, time_per_epoch, 'lr', 'tr_res', 'va_res', 'te_res')
state = {
'lr': lr,
'epoch': epoch,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
'args': args,
'logs': logger.get_logs(),
'best_va_acc': best_va_acc
}
torch.save(state, os.path.join(args.path, 'last.pt'))
if va_res['acc'] > best_va_acc:
best_va_acc = va_res['acc']
torch.save(state, os.path.join(args.path, 'best.pt'))
print('> Finished Training')
# Compute adversarial vulnerability with foolbox
print('\n> Starting attacks.')
attacks = {'l1'}
# attacks = {'l1', 'l2', 'itl1', 'itl2', 'deepFool', 'pgd', 'boundary'}
for attack in attacks:
vulnerability = compute_vulnerability(
args, attack, net, args.n_attacks)
torch.save(vulnerability,
os.path.join(args.path, 'vulnerability_%s.pt' % attack))
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import time
import numpy as np
import scipy.stats as st
from functools import partial
import torch
from torch.autograd import grad
import foolbox
from foolbox.distances import Linfinity, MSE
from data import CIFAR10, IMGNET12, MNIST
def do_pass(net, loader, args, means, stds):
correct = total = 0.
device = next(net.parameters()).device
means = torch.FloatTensor(means).to(device)
stds = torch.FloatTensor(stds).to(device)
for i, (inputs, targets) in enumerate(loader):
inputs, targets = inputs.to(device), targets.to(device)
inputs = (inputs - means) / stds
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).float().sum().item()
if args.log_step is not None and i % args.log_step == 0:
print("Batch: %03d Acc: %4.1f" % (i, 100 * correct / total))
return 100 * correct / total
def classify(net, x, args, means, stds):
device = next(net.parameters()).device
x = x.to(device).view(1, 3, args.img_size, args.img_size)
means = torch.FloatTensor(means).to(device)
stds = torch.FloatTensor(stds).to(device)
x = ((x - means) / stds).detach()
x.requires_grad = True
y = net(x)
g = grad(y.sum(), x)[0].view(x.size(0), -1).norm().item()
_, top_indices = y.data.cpu().view(-1).topk(2)
return top_indices[0].item(), g
def myPrint(string, args):
if args.log_vul:
print(string)
def conf95(a):
return st.t.interval(
0.95, len(a) - 1, loc=np.nanmean(a),
scale=st.sem(a, nan_policy='omit'))
def compute_vulnerability(args, attack_name, net, n_attacks=-1):
"""
Computes vulnerability using foolbox package of net
Parameters
----------
args : :class:`argparse.ArgumentParser`
The arguments passed to main.py
attack_name : string
The attack type. Must be one of
{'l1', 'l2', 'itl1', 'itl2', 'pgd', 'deepfool'}
net : :class:`torch.nn.Module`
The network whose vulnerability is computed.
n_attacks : int
The number of attacks to use for the computation of vulnerbaility.
If -1 or greater than dataset-size, uses the entire dataset.
Default: -1.
"""
print('\nStarting attacks of type ' + attack_name)
# Reload data without normalizing it
print('> Loading dataset %s...' % args.dataset)
if args.dataset == 'mnist':
_, loader = MNIST(
root=args.datapath, bs=args.bs, valid_size=0.,
size=args.img_size, normalize=False)
elif args.dataset == 'cifar':
_, loader = CIFAR10(
root=args.datapath, bs=args.bs, valid_size=0.,
size=args.img_size, normalize=False)
elif args.dataset == 'imgnet12':
_, loader = IMGNET12(
root=args.datapath, bs=args.bs, valid_size=0.,
size=args.img_size, normalize=False)
else:
raise NotImplementedError
print('> Done.')
# Image-normalizations (must be same as in data.py)
if args.raw_inputs:
means = [0., 0., 0.]
stds = [1., 1., 1.]
elif args.dataset == "mnist":
means = [0.1307]
stds = [0.3081]
elif args.dataset == "cifar":
means = [0.4914, 0.4822, 0.4465]
stds = [0.2023, 0.1994, 0.2010]
elif args.dataset == "imgnet12":
means = [.453, .443, .403]
stds = {
256: [.232, .226, .225],
128: [.225, .218, .218],
64: [.218, .211, .211],
32: [.206, .200, .200]
}[args.img_size]
else:
raise NotImplementedError
means = np.array(means).reshape(-1, 1, 1)
stds = np.array(stds).reshape(-1, 1, 1)
net.eval()
print('> Computing test accuracy...')
te_acc = do_pass(net, loader, args, means, stds)
print('> Done. Computed test accuracy: %5.2f' % te_acc)
# construct attack
bounds = (0, 1)
model = foolbox.models.PyTorchModel(net, bounds=bounds,
preprocessing=(means, stds),
num_classes=args.categories)
# Choosing attack type
if attack_name == 'l1':
# vulnerability increases like sqrt(d) \propto img_size
# therefore, we divide the linfty-threshold by img_size
attack = partial(foolbox.attacks.FGSM(model, distance=Linfinity),
epsilons=1000, max_epsilon=1. / args.img_size)
elif attack_name == 'l2':
# to be visually constant, the l2-threshold increases like sqrt d;
# but vulnerability also increases like sqrt d;
# therefore, use constant max_epsilon accross dimension d
attack = partial(foolbox.attacks.GradientAttack(model, distance=MSE),
epsilons=1000, max_epsilon=1.)
elif attack_name == 'itl1':
it, eps = 10, 1. / args.img_size
attack = partial(
foolbox.attacks.LinfinityBasicIterativeAttack(
model, distance=Linfinity),
iterations=it, epsilon=eps,
stepsize=eps / (.75 * it), binary_search=True)
elif attack_name == 'itl2':
it, eps = 10, 1.
attack = partial(
foolbox.attacks.L2BasicIterativeAttack(
model, distance=MSE),
iterations=it, epsilon=eps,
stepsize=eps / (.75 * it), binary_search=True)
elif attack_name == 'pgd':
it, eps = 10, 1. / args.img_size
attack = partial(foolbox.attacks.RandomPGD(model, distance=Linfinity),
iterations=it, epsilon=eps,
stepsize=eps / (.75 * it), binary_search=True)
elif attack_name == 'deepFool':
attack = foolbox.attacks.DeepFoolAttack(model, distance=MSE)
elif attack_name == 'boundary':
attack = partial(foolbox.attacks.BoundaryAttack(model, distance=MSE),
iterations=2000, log_every_n_steps=np.Infinity,
verbose=False)
else:
raise NotImplementedError(
"attack_name must be 'l1', 'l2', 'itl1', 'itl2', "
"'deepFool' or 'boundary'")
n_iterations = 0
results = {}
results['l2_snr'] = []
results['clean_grad'] = []
results['dirty_grad'] = []
results['l2_norm'] = []
results['linf_norm'] = []
n_fooled = 0
print('> Creating empty image-tensors')
n_saved = 64 if (n_attacks == -1) else min(n_attacks, 64)
clean_images = torch.zeros(n_saved, 3, args.img_size, args.img_size)
dirty_images = torch.zeros(n_saved, 3, args.img_size, args.img_size)
print('> Done.')
myPrint(("{:>15} " * 5).format(
"clean_grad", "dirty_grad", "linf_norm", "l2_norm", "l2_snr"), args)
t0 = time.time()
for i, (images, labels) in enumerate(loader):
if n_iterations == n_attacks:
break
for i, clean_image in enumerate(images):
clean_label, clean_grad = classify(net, clean_image,
args, means, stds)
dirty_image_np = attack(clean_image.numpy(), clean_label)
if dirty_image_np is not None: # i.e. if adversarial was found
dirty_image = torch.Tensor(dirty_image_np)
_, dirty_grad = classify(net, dirty_image,
args, means, stds)
if i < n_saved: # only save n_saved first images
dirty_images[i] = dirty_image.clone()
clean_images[i] = clean_image.clone()
l2_norm = (clean_image - dirty_image).norm().item()
linf_norm = (clean_image - dirty_image).abs().max().item()
l2_snr = 20. * math.log10(
clean_image.norm().item() / (l2_norm + 1e-6))
else:
l2_snr = dirty_grad = l2_norm = linf_norm = np.NaN
results['l2_snr'].append(l2_snr)
results['clean_grad'].append(clean_grad)
results['dirty_grad'].append(dirty_grad)
results['l2_norm'].append(l2_norm)
results['linf_norm'].append(linf_norm)
fmt_str = "{:>15.6f} " * 5
if ((attack.func._default_distance == MSE and
l2_norm < .005 * np.sqrt(args.img_size)) or
(attack.func._default_distance == Linfinity and
linf_norm < .005)):
fmt_str += " * fooled!"
n_fooled += 1
myPrint(fmt_str.format(clean_grad, dirty_grad, linf_norm,
l2_norm, l2_snr),
args)
n_iterations += 1
if n_iterations == n_attacks:
break
# Printing summary
summary = {}
print("\n Summary for network in '{}' of test accuracy {}".format(
args.path, te_acc))
for key, value in results.items():
low95, high95 = conf95(value)
print("{:>10} mean:{:>10.5f} std:{:>10.5f} conf95:({:>10.5f}, "
"{:>10.5f}) minmax:({:>10.5f}, {:>10.5f})".format(
key, np.nanmean(value), np.nanstd(value), low95, high95,
np.nanmin(value), np.nanmax(value)))
summary[key] = [np.nanmean(value), np.nanstd(value), low95, high95]
percent = 100 * n_fooled / float(n_iterations)
print("{:>10} {:10d}s".format("Time", int(time.time() - t0)))
print("{:>10} {:10.1f}%".format("percent", percent))
# Preparing the output
output = dict()
output['summary'] = summary
output['results'] = results
output['clean_images'] = clean_images
output['dirty_images'] = dirty_images
output['percent'] = percent
output['te_acc'] = te_acc
return output
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
from PIL import Image
import torch
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision.transforms as transforms
import torchvision.datasets as datasets
def IMGNET12(root='~/datasets/imgnet12/', bs=32, bs_test=None, num_workers=32,
valid_size=.1, size=256, crop=False, normalize=False):
# Datafolder '~/datasets/imgnet12/' should contain folders train/ and val/,
# each of which whould contain 12 subfolders (1 per class) with .jpg files
root = os.path.expanduser(root)
# original means = [.485, .456, .406]
# original stds = [0.229, 0.224, 0.225]
means = [.453, .443, .403]
stds = {
256: [.232, .226, .225],
128: [.225, .218, .218],
64: [.218, .211, .211],
32: [.206, .200, .200]
}
if normalize:
normalize = transforms.Normalize(mean=means,
std=stds[size])
else:
normalize = transforms.Normalize((0., 0., 0),
(1., 1., 1.))
if bs_test is None:
bs_test = bs
if crop:
tr_downsamplingOp = transforms.RandomCrop(size)
te_downsamplingOp = transforms.CenterCrop(size)
else:
tr_downsamplingOp = transforms.Resize(size)
te_downsamplingOp = transforms.Resize(size)
preprocess = [transforms.Resize(256), transforms.CenterCrop(256)]
tr_transforms = transforms.Compose([
*preprocess,
tr_downsamplingOp,
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize, ])
te_transforms = transforms.Compose([
*preprocess,
te_downsamplingOp,
transforms.ToTensor(),
normalize, ])
tr_dataset = datasets.ImageFolder(root + '/train', transform=tr_transforms)
te_dataset = datasets.ImageFolder(root + '/val', transform=te_transforms)
# Split training in train and valid set
num_train = len(tr_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
np.random.seed(42)
np.random.shuffle(indices)
tr_idx, va_idx = indices[split:], indices[:split]
tr_sampler = SubsetRandomSampler(tr_idx)
va_sampler = SubsetRandomSampler(va_idx)
tr_loader = torch.utils.data.DataLoader(
tr_dataset, batch_size=bs,
num_workers=num_workers, pin_memory=True, sampler=tr_sampler)
va_loader = torch.utils.data.DataLoader(
tr_dataset, batch_size=bs_test,
num_workers=num_workers, pin_memory=True, sampler=va_sampler)
te_loader = torch.utils.data.DataLoader(
te_dataset, batch_size=bs_test, shuffle=False,
num_workers=num_workers, pin_memory=True)
if valid_size > 0.:
return tr_loader, va_loader, te_loader
else:
return tr_loader, te_loader
def CIFAR10(root='~/datasets/cifar10/', bs=128, bs_test=None,
augment_training=True, valid_size=0., size=32, num_workers=1,
normalize=False):
root = os.path.expanduser(root)
if bs_test is None:
bs_test = bs
if normalize:
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))
else:
normalize = transforms.Normalize((0., 0., 0),
(1., 1., 1.))
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.Resize(size, Image.NEAREST),
transforms.ToTensor(),
normalize
])
transform_test = transforms.Compose([
transforms.Resize(size, Image.NEAREST),
transforms.ToTensor(),
normalize
])
transform_valid = transform_test
if augment_training is False:
transform_train = transform_test
dataset_tr = datasets.CIFAR10(root=root,
train=True,
transform=transform_train)
dataset_va = datasets.CIFAR10(root=root,
train=True,
transform=transform_valid)
dataset_te = datasets.CIFAR10(root=root,
train=False,
transform=transform_test)
# Split training in train and valid set
num_train = len(dataset_tr)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
np.random.seed(42)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
loader_tr = torch.utils.data.DataLoader(dataset_tr,
batch_size=bs,
sampler=train_sampler,
num_workers=num_workers)
loader_va = torch.utils.data.DataLoader(dataset_va,
batch_size=bs,
sampler=valid_sampler,
num_workers=num_workers)
# add pin_memory
loader_te = torch.utils.data.DataLoader(dataset_te,
batch_size=bs_test,
shuffle=False,
num_workers=num_workers)
if valid_size > 0:
return loader_tr, loader_va, loader_te
else:
return loader_tr, loader_te
def MNIST(root='~/datasets/mnist/', bs=128, bs_test=None,
augment_training=True, valid_size=0., size=32, num_workers=1,
normalize=False):
root = os.path.expanduser(root)
if bs_test is None:
bs_test = bs
if normalize:
normalize = transforms.Normalize((0.1307,), (0.3081,))
else:
normalize = transforms.Normalize((0.,), (1.,))
transform = transforms.Compose([
transforms.Resize(32, Image.BILINEAR),
transforms.Resize(size, Image.NEAREST),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
normalize
])
dataset_tr = datasets.MNIST(root=root,
train=True,
transform=transform)
dataset_va = datasets.MNIST(root=root,
train=True,
transform=transform)
dataset_te = datasets.MNIST(root=root,
train=False,
transform=transform)
# Split training in train and valid set
num_train = len(dataset_tr)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
np.random.seed(42)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
loader_tr = torch.utils.data.DataLoader(dataset_tr,
batch_size=bs,
sampler=train_sampler,
num_workers=num_workers)
loader_va = torch.utils.data.DataLoader(dataset_va,
batch_size=bs,
sampler=valid_sampler,
num_workers=num_workers)
# add pin_memory
loader_te = torch.utils.data.DataLoader(dataset_te,
batch_size=bs_test,
shuffle=False,
num_workers=num_workers)
if valid_size > 0:
return loader_tr, loader_va, loader_te
else:
return loader_tr, loader_te
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import reduce
import torch.nn as nn
import torch.nn.functional as F
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class FlexibleAvgPool2d(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs):
return F.avg_pool2d(inputs, kernel_size=inputs.size(2))
class WeightPool(nn.Module):
def __init__(self, in_planes, kernel_size):
super(WeightPool, self).__init__()
self.conv = nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size,
stride=kernel_size, groups=in_planes, bias=False)
self.conv.unit_gain = True
def forward(self, x):
return self.conv(x)
class WeightPoolOut(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(WeightPoolOut, self).__init__()
self.in_planes = in_planes
self.conv = nn.Conv2d(in_planes, in_planes, kernel_size=plane_size,
groups=in_planes, bias=False)
self.linear = nn.Linear(in_planes, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = self.conv(x)
out = out.view(-1, self.in_planes)
return self.linear(out)
class MaxPoolOut(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(MaxPoolOut, self).__init__()
self.in_planes = in_planes
self.maxpool = nn.MaxPool2d(kernel_size=plane_size)
self.linear = nn.Linear(in_planes, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = self.maxpool(x)
out = out.view(-1, self.in_planes)
return self.linear(out)
class AvgPoolOut(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(AvgPoolOut, self).__init__()
self.in_planes = in_planes
self.avgpool = nn.AvgPool2d(kernel_size=plane_size)
self.linear = nn.Linear(in_planes, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = self.avgpool(x)
out = out.view(-1, self.in_planes)
return self.linear(out)
class FCout(nn.Module):
def __init__(self, in_planes, plane_size, categories, unit_gain=False):
super(FCout, self).__init__()
if type(plane_size) == tuple and len(plane_size) == 2:
plane_size = reduce(lambda x, y: x * y, plane_size)
else:
plane_size = plane_size ** 2
print('Plane size = ', plane_size)
self.in_planes = in_planes
self.plane_size = plane_size
self.linear = nn.Linear(in_planes * plane_size, categories)
self.linear.unit_gain = unit_gain
def forward(self, x):
out = x.view(-1, self.in_planes * self.plane_size)
return self.linear(out)
class ConvLayer(nn.Module):
def __init__(self, in_planes, planes, pooltype=None, no_BN=False,
no_act=False, dilation=1):
super(ConvLayer, self).__init__()
self.pad = nn.ReflectionPad2d(dilation)
if pooltype is None: # Usual conv
self.conv = nn.Conv2d(in_planes, planes, 3, padding=0,
stride=1, dilation=dilation)
elif pooltype == 'avgpool': # Average Pool
self.conv = nn.Sequential(
nn.Conv2d(in_planes, planes, 3, dilation=dilation),
nn.AvgPool2d(2))
elif pooltype == 'subsamp': # Strided Conv
self.conv = nn.Conv2d(
in_planes, planes, 3, stride=2, dilation=dilation)
elif pooltype == 'maxpool': # Max Pool
self.conv = nn.Sequential(
nn.Conv2d(in_planes, planes, 3, dilation=dilation),
nn.MaxPool2d(2))
elif pooltype == 'weightpool':
self.conv = nn.Sequential(
nn.Conv2d(in_planes, planes, 3, dilation=dilation),
WeightPool(planes, 2))
else:
raise NotImplementedError
if no_act:
self.act = lambda x: x
else:
self.act = nn.ReLU()
if no_BN:
self.bn = lambda x: x # Identity()
else:
self.bn = nn.BatchNorm2d(planes)
def forward(self, x):
out = self.act(self.bn(self.conv(self.pad(x))))
return out
class ConvNet(nn.Module):
def __init__(
self, categories=10, n_layers=3, in_size=32, poolings=None,
pooltype='avgpool', no_BN=False, no_act=False, dilations=1,
normalize_inputs=False, last_layers='maxpool', in_planes=3):
# last_layers in {'maxpool', 'fc', 'weightpool'}
super(ConvNet, self).__init__()
poolings = [] if poolings is None else poolings
if type(dilations) != list:
dilations = [dilations] * n_layers
self.in_planes = in_planes
if normalize_inputs or no_BN:
self.bn = (lambda x: x)
else:
self.bn = nn.BatchNorm2d(self.in_planes)
self.layers = self._make_layers(
ConvLayer, 64, n_layers, poolings, pooltype,
no_BN, no_act, dilations)
# compute input-size to last layers from input-size of the net
# self.in_planes is changed by _make_layers to the nbr of out-planes
out_size = int(in_size / (2 ** (len(poolings))))
self.last_layers = self._make_last_layers(
out_size, categories, last_layers)
def _make_layers(self, block, planes, num_blocks, poolings,
pooltype, no_BN, no_act, dilations):
# pooltypes = [0] + [0] * (num_blocks - 1)
pooltypes = [None] * num_blocks
for pool in poolings:
pooltypes[pool] = pooltype
layers = []
for pool, dilation in zip(pooltypes, dilations):
layers.append(block(self.in_planes, planes, pool, no_BN, no_act,
dilation))
self.in_planes = planes
return nn.Sequential(*layers)
def _make_last_layers(self, in_size, categories, last_layers):
if last_layers == 'maxpool':
last_layers = MaxPoolOut(
self.in_planes, in_size, categories, unit_gain=True)
elif last_layers == 'avgpool':
last_layers = AvgPoolOut(
self.in_planes, in_size, categories, unit_gain=True)
elif last_layers == 'weightpool':
last_layers = WeightPoolOut(
self.in_planes, in_size, categories, unit_gain=True)
elif last_layers == 'fc':
last_layers = FCout(
self.in_planes, in_size, categories, unit_gain=True)
else:
raise NotImplementedError(
'Argument last_layers must be maxpool, fc, weightpool. '
'But got: %s' % last_layers)
return last_layers
def forward(self, x):
out = self.layers(self.bn(x))
out = self.last_layers(out)
return out
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import default_argument_parser, default_setup, launch
from adapteacher import add_ateacher_config
from adapteacher.engine.trainer import ATeacherTrainer, BaselineTrainer
# hacky way to register
from adapteacher.modeling.meta_arch.rcnn import TwoStagePseudoLabGeneralizedRCNN, DAobjTwoStagePseudoLabGeneralizedRCNN
from adapteacher.modeling.meta_arch.vgg import build_vgg_backbone # noqa
from adapteacher.modeling.proposal_generator.rpn import PseudoLabRPN
from adapteacher.modeling.roi_heads.roi_heads import StandardROIHeadsPseudoLab
import adapteacher.data.datasets.builtin
from adapteacher.modeling.meta_arch.ts_ensemble import EnsembleTSModel
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_ateacher_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if cfg.SEMISUPNET.Trainer == "ateacher":
Trainer = ATeacherTrainer
elif cfg.SEMISUPNET.Trainer == "baseline":
Trainer = BaselineTrainer
else:
raise ValueError("Trainer Name is not found.")
if args.eval_only:
if cfg.SEMISUPNET.Trainer == "ateacher":
model = Trainer.build_model(cfg)
model_teacher = Trainer.build_model(cfg)
ensem_ts_model = EnsembleTSModel(model_teacher, model)
DetectionCheckpointer(
ensem_ts_model, save_dir=cfg.OUTPUT_DIR
).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, ensem_ts_model.modelTeacher)
else:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.config import CfgNode as CN
def add_ateacher_config(cfg):
"""
Add config for semisupnet.
"""
_C = cfg
_C.TEST.VAL_LOSS = True
_C.MODEL.RPN.UNSUP_LOSS_WEIGHT = 1.0
_C.MODEL.RPN.LOSS = "CrossEntropy"
_C.MODEL.ROI_HEADS.LOSS = "CrossEntropy"
_C.SOLVER.IMG_PER_BATCH_LABEL = 1
_C.SOLVER.IMG_PER_BATCH_UNLABEL = 1
_C.SOLVER.FACTOR_LIST = (1,)
_C.DATASETS.TRAIN_LABEL = ("coco_2017_train",)
_C.DATASETS.TRAIN_UNLABEL = ("coco_2017_train",)
_C.DATASETS.CROSS_DATASET = True
_C.TEST.EVALUATOR = "COCOeval"
_C.SEMISUPNET = CN()
# Output dimension of the MLP projector after `res5` block
_C.SEMISUPNET.MLP_DIM = 128
# Semi-supervised training
_C.SEMISUPNET.Trainer = "ateacher"
_C.SEMISUPNET.BBOX_THRESHOLD = 0.7
_C.SEMISUPNET.PSEUDO_BBOX_SAMPLE = "thresholding"
_C.SEMISUPNET.TEACHER_UPDATE_ITER = 1
_C.SEMISUPNET.BURN_UP_STEP = 12000
_C.SEMISUPNET.EMA_KEEP_RATE = 0.0
_C.SEMISUPNET.UNSUP_LOSS_WEIGHT = 4.0
_C.SEMISUPNET.SUP_LOSS_WEIGHT = 0.5
_C.SEMISUPNET.LOSS_WEIGHT_TYPE = "standard"
_C.SEMISUPNET.DIS_TYPE = "res4"
_C.SEMISUPNET.DIS_LOSS_WEIGHT = 0.1
# dataloader
# supervision level
_C.DATALOADER.SUP_PERCENT = 100.0 # 5 = 5% dataset as labeled set
_C.DATALOADER.RANDOM_DATA_SEED = 0 # random seed to read data
_C.DATALOADER.RANDOM_DATA_SEED_PATH = "dataseed/COCO_supervision.txt"
_C.EMAMODEL = CN()
_C.EMAMODEL.SUP_CONSIST = True
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import add_ateacher_config
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.checkpoint.c2_model_loading import align_and_update_state_dicts
from detectron2.checkpoint import DetectionCheckpointer
# for load_student_model
from typing import Any
from fvcore.common.checkpoint import _strip_prefix_if_present, _IncompatibleKeys
class DetectionTSCheckpointer(DetectionCheckpointer):
def _load_model(self, checkpoint):
if checkpoint.get("__author__", None) == "Caffe2":
# pretrained model weight: only update student model
if checkpoint.get("matching_heuristics", False):
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
model_state_dict = self.model.modelStudent.state_dict()
align_and_update_state_dicts(
model_state_dict,
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
)
checkpoint["model"] = model_state_dict
# for non-caffe2 models, use standard ways to load it
incompatible = self._load_student_model(checkpoint)
model_buffers = dict(self.model.modelStudent.named_buffers(recurse=False))
for k in ["pixel_mean", "pixel_std"]:
# Ignore missing key message about pixel_mean/std.
# Though they may be missing in old checkpoints, they will be correctly
# initialized from config anyway.
if k in model_buffers:
try:
incompatible.missing_keys.remove(k)
except ValueError:
pass
return incompatible
else: # whole model
if checkpoint.get("matching_heuristics", False):
self._convert_ndarray_to_tensor(checkpoint["model"])
# convert weights by name-matching heuristics
model_state_dict = self.model.state_dict()
align_and_update_state_dicts(
model_state_dict,
checkpoint["model"],
c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
)
checkpoint["model"] = model_state_dict
# for non-caffe2 models, use standard ways to load it
incompatible = super()._load_model(checkpoint)
model_buffers = dict(self.model.named_buffers(recurse=False))
for k in ["pixel_mean", "pixel_std"]:
# Ignore missing key message about pixel_mean/std.
# Though they may be missing in old checkpoints, they will be correctly
# initialized from config anyway.
if k in model_buffers:
try:
incompatible.missing_keys.remove(k)
except ValueError:
pass
return incompatible
def _load_student_model(self, checkpoint: Any) -> _IncompatibleKeys: # pyre-ignore
checkpoint_state_dict = checkpoint.pop("model")
self._convert_ndarray_to_tensor(checkpoint_state_dict)
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching.
_strip_prefix_if_present(checkpoint_state_dict, "module.")
# work around https://github.com/pytorch/pytorch/issues/24139
model_state_dict = self.model.modelStudent.state_dict()
incorrect_shapes = []
for k in list(checkpoint_state_dict.keys()):
if k in model_state_dict:
shape_model = tuple(model_state_dict[k].shape)
shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
if shape_model != shape_checkpoint:
incorrect_shapes.append((k, shape_checkpoint, shape_model))
checkpoint_state_dict.pop(k)
# pyre-ignore
incompatible = self.model.modelStudent.load_state_dict(
checkpoint_state_dict, strict=False
)
return _IncompatibleKeys(
missing_keys=incompatible.missing_keys,
unexpected_keys=incompatible.unexpected_keys,
incorrect_shapes=incorrect_shapes,
)
# class DetectionCheckpointer(Checkpointer):
# """
# Same as :class:`Checkpointer`, but is able to handle models in detectron & detectron2
# model zoo, and apply conversions for legacy models.
# """
# def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
# is_main_process = comm.is_main_process()
# super().__init__(
# model,
# save_dir,
# save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
# **checkpointables,
# )
# def _load_file(self, filename):
# if filename.endswith(".pkl"):
# with PathManager.open(filename, "rb") as f:
# data = pickle.load(f, encoding="latin1")
# if "model" in data and "__author__" in data:
# # file is in Detectron2 model zoo format
# self.logger.info("Reading a file from '{}'".format(data["__author__"]))
# return data
# else:
# # assume file is from Caffe2 / Detectron1 model zoo
# if "blobs" in data:
# # Detection models have "blobs", but ImageNet models don't
# data = data["blobs"]
# data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
# return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
# loaded = super()._load_file(filename) # load native pth checkpoint
# if "model" not in loaded:
# loaded = {"model": loaded}
# return loaded
# def _load_model(self, checkpoint):
# if checkpoint.get("matching_heuristics", False):
# self._convert_ndarray_to_tensor(checkpoint["model"])
# # convert weights by name-matching heuristics
# model_state_dict = self.model.state_dict()
# align_and_update_state_dicts(
# model_state_dict,
# checkpoint["model"],
# c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
# )
# checkpoint["model"] = model_state_dict
# # for non-caffe2 models, use standard ways to load it
# super()._load_model(checkpoint) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from detectron2.config import CfgNode
from detectron2.solver.lr_scheduler import WarmupCosineLR, WarmupMultiStepLR
from .lr_scheduler import WarmupTwoStageMultiStepLR
def build_lr_scheduler(
cfg: CfgNode, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
"""
Build a LR scheduler from config.
"""
name = cfg.SOLVER.LR_SCHEDULER_NAME
if name == "WarmupMultiStepLR":
return WarmupMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
elif name == "WarmupCosineLR":
return WarmupCosineLR(
optimizer,
cfg.SOLVER.MAX_ITER,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
elif name == "WarmupTwoStageMultiStepLR":
return WarmupTwoStageMultiStepLR(
optimizer,
cfg.SOLVER.STEPS,
factor_list=cfg.SOLVER.FACTOR_LIST,
gamma=cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
)
else:
raise ValueError("Unknown LR scheduler: {}".format(name))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from bisect import bisect_right
from typing import List
import torch
from detectron2.solver.lr_scheduler import _get_warmup_factor_at_iter
class WarmupTwoStageMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
milestones: List[int],
factor_list: List[int],
gamma: float = 0.1,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if len(milestones) + 1 != len(factor_list):
raise ValueError("Length of milestones should match length of factor_list.")
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.factor_list = factor_list
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor
)
return [
base_lr
* warmup_factor
* self.factor_list[bisect_right(self.milestones, self.last_epoch)]
for base_lr in self.base_lrs
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
from detectron2.config import configurable
# from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
# from detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN
import logging
from typing import Dict, Tuple, List, Optional
from collections import OrderedDict
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.modeling.backbone import build_backbone, Backbone
from detectron2.modeling.roi_heads import build_roi_heads
from detectron2.utils.events import get_event_storage
from detectron2.structures import ImageList
############### Image discriminator ##############
class FCDiscriminator_img(nn.Module):
def __init__(self, num_classes, ndf1=256, ndf2=128):
super(FCDiscriminator_img, self).__init__()
self.conv1 = nn.Conv2d(num_classes, ndf1, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(ndf1, ndf2, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(ndf2, ndf2, kernel_size=3, padding=1)
self.classifier = nn.Conv2d(ndf2, 1, kernel_size=3, padding=1)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.classifier(x)
return x
#################################
################ Gradient reverse function
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
return GradReverse.apply(x)
#######################
@META_ARCH_REGISTRY.register()
class DAobjTwoStagePseudoLabGeneralizedRCNN(GeneralizedRCNN):
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
input_format: Optional[str] = None,
vis_period: int = 0,
dis_type: str,
# dis_loss_weight: float = 0,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super(GeneralizedRCNN, self).__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.input_format = input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
# @yujheli: you may need to build your discriminator here
self.dis_type = dis_type
self.D_img = None
# self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels['res4']) # Need to know the channel
# self.D_img = None
self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels[self.dis_type]) # Need to know the channel
# self.bceLoss_func = nn.BCEWithLogitsLoss()
def build_discriminator(self):
self.D_img = FCDiscriminator_img(self.backbone._out_feature_channels[self.dis_type]).to(self.device) # Need to know the channel
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
"dis_type": cfg.SEMISUPNET.DIS_TYPE,
# "dis_loss_ratio": cfg.xxx,
}
def preprocess_image_train(self, batched_inputs: List[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
images_t = [x["image_unlabeled"].to(self.device) for x in batched_inputs]
images_t = [(x - self.pixel_mean) / self.pixel_std for x in images_t]
images_t = ImageList.from_tensors(images_t, self.backbone.size_divisibility)
return images, images_t
def forward(
self, batched_inputs, branch="supervised", given_proposals=None, val_mode=False
):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if self.D_img == None:
self.build_discriminator()
if (not self.training) and (not val_mode): # only conduct when testing mode
return self.inference(batched_inputs)
source_label = 0
target_label = 1
if branch == "domain":
# self.D_img.train()
# source_label = 0
# target_label = 1
# images = self.preprocess_image(batched_inputs)
images_s, images_t = self.preprocess_image_train(batched_inputs)
features = self.backbone(images_s.tensor)
# import pdb
# pdb.set_trace()
features_s = grad_reverse(features[self.dis_type])
D_img_out_s = self.D_img(features_s)
loss_D_img_s = F.binary_cross_entropy_with_logits(D_img_out_s, torch.FloatTensor(D_img_out_s.data.size()).fill_(source_label).to(self.device))
features_t = self.backbone(images_t.tensor)
features_t = grad_reverse(features_t[self.dis_type])
# features_t = grad_reverse(features_t['p2'])
D_img_out_t = self.D_img(features_t)
loss_D_img_t = F.binary_cross_entropy_with_logits(D_img_out_t, torch.FloatTensor(D_img_out_t.data.size()).fill_(target_label).to(self.device))
# import pdb
# pdb.set_trace()
losses = {}
losses["loss_D_img_s"] = loss_D_img_s
losses["loss_D_img_t"] = loss_D_img_t
return losses, [], [], None
# self.D_img.eval()
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
# TODO: remove the usage of if else here. This needs to be re-organized
if branch == "supervised":
features_s = grad_reverse(features[self.dis_type])
D_img_out_s = self.D_img(features_s)
loss_D_img_s = F.binary_cross_entropy_with_logits(D_img_out_s, torch.FloatTensor(D_img_out_s.data.size()).fill_(source_label).to(self.device))
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
compute_loss=True,
targets=gt_instances,
branch=branch,
)
# visualization
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals_rpn, branch)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
losses["loss_D_img_s"] = loss_D_img_s*0.001
return losses, [], [], None
elif branch == "supervised_target":
# features_t = grad_reverse(features_t[self.dis_type])
# D_img_out_t = self.D_img(features_t)
# loss_D_img_t = F.binary_cross_entropy_with_logits(D_img_out_t, torch.FloatTensor(D_img_out_t.data.size()).fill_(target_label).to(self.device))
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
compute_loss=True,
targets=gt_instances,
branch=branch,
)
# visualization
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals_rpn, branch)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
# losses["loss_D_img_t"] = loss_D_img_t*0.001
# losses["loss_D_img_s"] = loss_D_img_s*0.001
return losses, [], [], None
elif branch == "unsup_data_weak":
"""
unsupervised weak branch: input image without any ground-truth label; output proposals of rpn and roi-head
"""
# Region proposal network
proposals_rpn, _ = self.proposal_generator(
images, features, None, compute_loss=False
)
# roi_head lower branch (keep this for further production)
# notice that we do not use any target in ROI head to do inference!
proposals_roih, ROI_predictions = self.roi_heads(
images,
features,
proposals_rpn,
targets=None,
compute_loss=False,
branch=branch,
)
# if self.vis_period > 0:
# storage = get_event_storage()
# if storage.iter % self.vis_period == 0:
# self.visualize_training(batched_inputs, proposals_rpn, branch)
return {}, proposals_rpn, proposals_roih, ROI_predictions
elif branch == "unsup_data_strong":
raise NotImplementedError()
elif branch == "val_loss":
raise NotImplementedError()
def visualize_training(self, batched_inputs, proposals, branch=""):
"""
This function different from the original one:
- it adds "branch" to the `vis_name`.
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 predicted object
proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = (
"Left: GT bounding boxes "
+ branch
+ "; Right: Predicted proposals "
+ branch
)
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
@META_ARCH_REGISTRY.register()
class TwoStagePseudoLabGeneralizedRCNN(GeneralizedRCNN):
def forward(
self, batched_inputs, branch="supervised", given_proposals=None, val_mode=False
):
if (not self.training) and (not val_mode):
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if branch == "supervised":
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances
)
# # roi_head lower branch
_, detector_losses = self.roi_heads(
images, features, proposals_rpn, gt_instances, branch=branch
)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, [], [], None
elif branch == "unsup_data_weak":
# Region proposal network
proposals_rpn, _ = self.proposal_generator(
images, features, None, compute_loss=False
)
# roi_head lower branch (keep this for further production) # notice that we do not use any target in ROI head to do inference !
proposals_roih, ROI_predictions = self.roi_heads(
images,
features,
proposals_rpn,
targets=None,
compute_loss=False,
branch=branch,
)
return {}, proposals_rpn, proposals_roih, ROI_predictions
elif branch == "val_loss":
# Region proposal network
proposals_rpn, proposal_losses = self.proposal_generator(
images, features, gt_instances, compute_val_loss=True
)
# roi_head lower branch
_, detector_losses = self.roi_heads(
images,
features,
proposals_rpn,
gt_instances,
branch=branch,
compute_val_loss=True,
)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, [], [], None
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.nn as nn
import copy
import torch
from typing import Union, List, Dict, Any, cast
from detectron2.modeling.backbone import (
ResNet,
Backbone,
build_resnet_backbone,
BACKBONE_REGISTRY
)
from detectron2.modeling.backbone.fpn import FPN, LastLevelMaxPool, LastLevelP6P7
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
layers: List[nn.Module] = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs: Dict[str, List[Union[str, int]]] = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class vgg_backbone(Backbone):
"""
Backbone (bottom-up) for FBNet.
Hierarchy:
trunk0:
xif0_0
xif0_1
...
trunk1:
xif1_0
xif1_1
...
...
Output features:
The outputs from each "stage", i.e. trunkX.
"""
def __init__(self, cfg):
super().__init__()
self.vgg = make_layers(cfgs['vgg16'],batch_norm=True)
self._initialize_weights()
# self.stage_names_index = {'vgg1':3, 'vgg2':8 , 'vgg3':15, 'vgg4':22, 'vgg5':29}
_out_feature_channels = [64, 128, 256, 512, 512]
_out_feature_strides = [2, 4, 8, 16, 32]
# stages, shape_specs = build_fbnet(
# cfg,
# name="trunk",
# in_channels=cfg.MODEL.FBNET_V2.STEM_IN_CHANNELS
# )
# nn.Sequential(*list(self.vgg.features._modules.values())[:14])
self.stages = [nn.Sequential(*list(self.vgg._modules.values())[0:7]),\
nn.Sequential(*list(self.vgg._modules.values())[7:14]),\
nn.Sequential(*list(self.vgg._modules.values())[14:24]),\
nn.Sequential(*list(self.vgg._modules.values())[24:34]),\
nn.Sequential(*list(self.vgg._modules.values())[34:]),]
self._out_feature_channels = {}
self._out_feature_strides = {}
self._stage_names = []
for i, stage in enumerate(self.stages):
name = "vgg{}".format(i)
self.add_module(name, stage)
self._stage_names.append(name)
self._out_feature_channels[name] = _out_feature_channels[i]
self._out_feature_strides[name] = _out_feature_strides[i]
self._out_features = self._stage_names
del self.vgg
def forward(self, x):
features = {}
for name, stage in zip(self._stage_names, self.stages):
x = stage(x)
# if name in self._out_features:
# outputs[name] = x
features[name] = x
# import pdb
# pdb.set_trace()
return features
def _initialize_weights(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
@BACKBONE_REGISTRY.register() #already register in baseline model
def build_vgg_backbone(cfg, _):
return vgg_backbone(cfg)
@BACKBONE_REGISTRY.register() #already register in baseline model
def build_vgg_fpn_backbone(cfg, _):
# backbone = FPN(
# bottom_up=build_vgg_backbone(cfg),
# in_features=cfg.MODEL.FPN.IN_FEATURES,
# out_channels=cfg.MODEL.FPN.OUT_CHANNELS,
# norm=cfg.MODEL.FPN.NORM,
# top_block=LastLevelMaxPool(),
# )
bottom_up = vgg_backbone(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
# fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
# return backbone
return backbone
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from torch.nn.parallel import DataParallel, DistributedDataParallel
import torch.nn as nn
class EnsembleTSModel(nn.Module):
def __init__(self, modelTeacher, modelStudent):
super(EnsembleTSModel, self).__init__()
if isinstance(modelTeacher, (DistributedDataParallel, DataParallel)):
modelTeacher = modelTeacher.module
if isinstance(modelStudent, (DistributedDataParallel, DataParallel)):
modelStudent = modelStudent.module
self.modelTeacher = modelTeacher
self.modelStudent = modelStudent |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, Optional
import torch
from detectron2.structures import ImageList, Instances
from detectron2.modeling.proposal_generator import RPN
from detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY
@PROPOSAL_GENERATOR_REGISTRY.register()
class PseudoLabRPN(RPN):
"""
Region Proposal Network, introduced by :paper:`Faster R-CNN`.
"""
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
gt_instances: Optional[Instances] = None,
compute_loss: bool = True,
compute_val_loss: bool = False,
):
features = [features[f] for f in self.in_features]
anchors = self.anchor_generator(features)
pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
pred_objectness_logits = [
# (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A)
score.permute(0, 2, 3, 1).flatten(1)
for score in pred_objectness_logits
]
pred_anchor_deltas = [
# (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N, Hi*Wi*A, B)
x.view(
x.shape[0], -1, self.anchor_generator.box_dim, x.shape[-2], x.shape[-1]
)
.permute(0, 3, 4, 1, 2)
.flatten(1, -2)
for x in pred_anchor_deltas
]
if (self.training and compute_loss) or compute_val_loss:
gt_labels, gt_boxes = self.label_and_sample_anchors(anchors, gt_instances)
losses = self.losses(
anchors, pred_objectness_logits, gt_labels, pred_anchor_deltas, gt_boxes
)
losses = {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}
else: # inference
losses = {}
proposals = self.predict_proposals(
anchors, pred_objectness_logits, pred_anchor_deltas, images.image_sizes
)
return proposals, losses |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.modeling.roi_heads.fast_rcnn import (
FastRCNNOutputLayers,
FastRCNNOutputs,
)
# focal loss
class FastRCNNFocaltLossOutputLayers(FastRCNNOutputLayers):
def __init__(self, cfg, input_shape):
super(FastRCNNFocaltLossOutputLayers, self).__init__(cfg, input_shape)
self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
def losses(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features
that were used to compute predictions.
"""
scores, proposal_deltas = predictions
losses = FastRCNNFocalLoss(
self.box2box_transform,
scores,
proposal_deltas,
proposals,
self.smooth_l1_beta,
self.box_reg_loss_type,
num_classes=self.num_classes,
).losses()
return losses
class FastRCNNFocalLoss(FastRCNNOutputs):
"""
A class that stores information about outputs of a Fast R-CNN head.
It provides methods that are used to decode the outputs of a Fast R-CNN head.
"""
def __init__(
self,
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta=0.0,
box_reg_loss_type="smooth_l1",
num_classes=80,
):
super(FastRCNNFocalLoss, self).__init__(
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta,
box_reg_loss_type,
)
self.num_classes = num_classes
def losses(self):
return {
"loss_cls": self.comput_focal_loss(),
"loss_box_reg": self.box_reg_loss(),
}
def comput_focal_loss(self):
if self._no_instances:
return 0.0 * self.pred_class_logits.sum()
else:
FC_loss = FocalLoss(
gamma=1.5,
num_classes=self.num_classes,
)
total_loss = FC_loss(input=self.pred_class_logits, target=self.gt_classes)
total_loss = total_loss / self.gt_classes.shape[0]
return total_loss
class FocalLoss(nn.Module):
def __init__(
self,
weight=None,
gamma=1.0,
num_classes=80,
):
super(FocalLoss, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
self.num_classes = num_classes
def forward(self, input, target):
# focal loss
CE = F.cross_entropy(input, target, reduction="none")
p = torch.exp(-CE)
loss = (1 - p) ** self.gamma * CE
return loss.sum()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from typing import Dict, List, Optional, Tuple, Union
from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
from detectron2.modeling.proposal_generator.proposal_utils import (
add_ground_truth_to_proposals,
)
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads.box_head import build_box_head
from detectron2.layers import ShapeSpec
from detectron2.modeling.roi_heads import (
ROI_HEADS_REGISTRY,
StandardROIHeads,
)
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from adapteacher.modeling.roi_heads.fast_rcnn import FastRCNNFocaltLossOutputLayers
import numpy as np
from detectron2.modeling.poolers import ROIPooler
@ROI_HEADS_REGISTRY.register()
class StandardROIHeadsPseudoLab(StandardROIHeads):
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
box_head = build_box_head(
cfg,
ShapeSpec(
channels=in_channels, height=pooler_resolution, width=pooler_resolution
),
)
if cfg.MODEL.ROI_HEADS.LOSS == "CrossEntropy":
box_predictor = FastRCNNOutputLayers(cfg, box_head.output_shape)
elif cfg.MODEL.ROI_HEADS.LOSS == "FocalLoss":
box_predictor = FastRCNNFocaltLossOutputLayers(cfg, box_head.output_shape)
else:
raise ValueError("Unknown ROI head loss.")
return {
"box_in_features": in_features,
"box_pooler": box_pooler,
"box_head": box_head,
"box_predictor": box_predictor,
}
def forward(
self,
images: ImageList,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
targets: Optional[List[Instances]] = None,
compute_loss=True,
branch="",
compute_val_loss=False,
) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
del images
if self.training and compute_loss: # apply if training loss
assert targets
# 1000 --> 512
proposals = self.label_and_sample_proposals(
proposals, targets, branch=branch
)
elif compute_val_loss: # apply if val loss
assert targets
# 1000 --> 512
temp_proposal_append_gt = self.proposal_append_gt
self.proposal_append_gt = False
proposals = self.label_and_sample_proposals(
proposals, targets, branch=branch
) # do not apply target on proposals
self.proposal_append_gt = temp_proposal_append_gt
del targets
if (self.training and compute_loss) or compute_val_loss:
losses, _ = self._forward_box(
features, proposals, compute_loss, compute_val_loss, branch
)
return proposals, losses
else:
pred_instances, predictions = self._forward_box(
features, proposals, compute_loss, compute_val_loss, branch
)
return pred_instances, predictions
def _forward_box(
self,
features: Dict[str, torch.Tensor],
proposals: List[Instances],
compute_loss: bool = True,
compute_val_loss: bool = False,
branch: str = "",
) -> Union[Dict[str, torch.Tensor], List[Instances]]:
features = [features[f] for f in self.box_in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
del box_features
if (
self.training and compute_loss
) or compute_val_loss: # apply if training loss or val loss
losses = self.box_predictor.losses(predictions, proposals)
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(
proposals, pred_boxes
):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
return losses, predictions
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
return pred_instances, predictions
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances], branch: str = ""
) -> List[Instances]:
gt_boxes = [x.gt_boxes for x in targets]
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes
)
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(
trg_name
):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
else:
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))
)
proposals_per_image.gt_boxes = gt_boxes
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
storage = get_event_storage()
storage.put_scalar(
"roi_head/num_target_fg_samples_" + branch, np.mean(num_fg_samples)
)
storage.put_scalar(
"roi_head/num_target_bg_samples_" + branch, np.mean(num_bg_samples)
)
return proposals_with_gt
|
# Copyright (c) Facebook, Inc. and its affiliates.
from .coco_evaluation import COCOEvaluator
from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
# __all__ = [k for k in globals().keys() if not k.startswith("_")]
__all__ = [
"COCOEvaluator",
"PascalVOCDetectionEvaluator"
]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pickle
from collections import OrderedDict
import pycocotools.mask as mask_util
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_dict
from detectron2.evaluation.fast_eval_api import COCOeval_opt
from detectron2.structures import Boxes, BoxMode, pairwise_iou
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table
from detectron2.evaluation import DatasetEvaluator
from iopath.common.file_io import file_lock
logger = logging.getLogger(__name__)
def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
"""
Converts dataset into COCO format and saves it to a json file.
dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
Args:
dataset_name:
reference from the config file to the catalogs
must be registered in DatasetCatalog and in detectron2's standard format
output_file: path of json file that will be saved to
allow_cached: if json file is already present then skip conversion
"""
# TODO: The dataset or the conversion script *may* change,
# a checksum would be useful for validating the cached data
PathManager.mkdirs(os.path.dirname(output_file))
with file_lock(output_file):
if PathManager.exists(output_file) and allow_cached:
logger.warning(
f"Using previously cached COCO format annotations at '{output_file}'. "
"You need to clear the cache file if your dataset has been modified."
)
else:
logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)")
coco_dict = convert_to_coco_dict(dataset_name)
logger.info(f"Caching COCO format annotations at '{output_file}' ...")
tmp_file = output_file #+ ".tmp"
# with PathManager.open(tmp_file, "w") as f:
# json.dump(coco_dict, f)
# shutil.move(tmp_file, output_file)
with PathManager.open(tmp_file, "w") as f:
json.dump(coco_dict, f)
class COCOEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
the metric cannot be computed (e.g. due to no predictions made).
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
kpt_oks_sigmas=(),
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
contains all the results in the format they are produced by the model.
2. "coco_instances_results.json" a json file in COCO's result format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
See http://cocodataset.org/#keypoints-eval
When empty, it will use the defaults in COCO.
Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
kpt_oks_sigmas = (
tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
)
self._logger.warn(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
if not hasattr(self._metadata, "json_file"):
self._logger.info(
f"'{dataset_name}' is not registered by `register_coco_instances`."
" Therefore trying to convert it to COCO format ..."
)
cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
self._metadata.json_file = cache_path
convert_to_coco_json(dataset_name, cache_path)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._coco_api = COCO(json_file)
# Test set json files do not contain annotations (evaluation must be
# performed using the COCO evaluation server).
self._do_evaluation = "annotations" in self._coco_api.dataset
if self._do_evaluation:
self._kpt_oks_sigmas = kpt_oks_sigmas
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
if "proposals" in output:
prediction["proposals"] = output["proposals"].to(self._cpu_device)
if len(prediction) > 1:
self._predictions.append(prediction)
def evaluate(self, img_ids=None):
"""
Args:
img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
"""
if self._distributed:
comm.synchronize()
predictions = comm.gather(self._predictions, dst=0)
predictions = list(itertools.chain(*predictions))
if not comm.is_main_process():
return {}
else:
predictions = self._predictions
if len(predictions) == 0:
self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(predictions, f)
self._results = OrderedDict()
if "proposals" in predictions[0]:
self._eval_box_proposals(predictions)
if "instances" in predictions[0]:
self._eval_predictions(predictions, img_ids=img_ids)
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _tasks_from_predictions(self, predictions):
"""
Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
"""
tasks = {"bbox"}
for pred in predictions:
if "segmentation" in pred:
tasks.add("segm")
if "keypoints" in pred:
tasks.add("keypoints")
return sorted(tasks)
def _eval_predictions(self, predictions, img_ids=None):
"""
Evaluate predictions. Fill self._results with the metrics of the tasks.
"""
self._logger.info("Preparing results for COCO format ...")
coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
tasks = self._tasks or self._tasks_from_predictions(coco_results)
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
num_classes = len(all_contiguous_ids)
assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
for result in coco_results:
category_id = result["category_id"]
assert category_id < num_classes, (
f"A prediction has class={category_id}, "
f"but the dataset only has {num_classes} classes and "
f"predicted class id should be in [0, {num_classes - 1}]."
)
result["category_id"] = reverse_id_mapping[category_id]
if self._output_dir:
file_path = os.path.join(self._output_dir, "coco_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(coco_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info(
"Evaluating predictions with {} COCO API...".format(
"unofficial" if self._use_fast_impl else "official"
)
)
for task in sorted(tasks):
assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
coco_eval = (
_evaluate_predictions_on_coco(
self._coco_api,
coco_results,
task,
kpt_oks_sigmas=self._kpt_oks_sigmas,
use_fast_impl=self._use_fast_impl,
img_ids=img_ids,
)
if len(coco_results) > 0
else None # cocoapi does not handle empty results very well
)
res = self._derive_coco_results(
coco_eval, task, class_names=self._metadata.get("thing_classes")
)
self._results[task] = res
def _eval_box_proposals(self, predictions):
"""
Evaluate the box proposals in predictions.
Fill self._results with the metrics for "box_proposals" task.
"""
if self._output_dir:
# Saving generated box proposals to file.
# Predicted box_proposals are in XYXY_ABS mode.
bbox_mode = BoxMode.XYXY_ABS.value
ids, boxes, objectness_logits = [], [], []
for prediction in predictions:
ids.append(prediction["image_id"])
boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
proposal_data = {
"boxes": boxes,
"objectness_logits": objectness_logits,
"ids": ids,
"bbox_mode": bbox_mode,
}
with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
pickle.dump(proposal_data, f)
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info("Evaluating bbox proposals ...")
res = {}
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
key = "AR{}@{:d}".format(suffix, limit)
res[key] = float(stats["ar"].item() * 100)
self._logger.info("Proposal metrics: \n" + create_small_table(res))
self._results["box_proposals"] = res
def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}[iou_type]
if coco_eval is None:
self._logger.warn("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
for idx, metric in enumerate(metrics)
}
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
if not np.isfinite(sum(results.values())):
self._logger.info("Some metrics cannot be computed and is shown as NaN.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
# results.update({"AP-" + name: ap for name, ap in results_per_category})
results_per_category_AP50 = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
t = np.where(.5 == coco_eval.params.iouThrs)[0]
precisions_50 = precisions[t]
precisions_50 = precisions_50[:, :, idx, 0, -1]
precisions_50 = precisions_50[precisions_50 > -1]
ap = np.mean(precisions_50) if precisions_50.size else float("nan")
results_per_category_AP50.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category_AP50) * 2)
results_flatten = list(itertools.chain(*results_per_category_AP50))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP50"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP50: \n".format(iou_type) + table)
results.update({"AP50-" + name: ap for name, ap in results_per_category_AP50})
return results
def instances_to_coco_json(instances, img_id):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances):
img_id (int): the image id
Returns:
list[dict]: list of json annotations in COCO format.
"""
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes.tensor.numpy()
boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
boxes = boxes.tolist()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
has_mask = instances.has("pred_masks")
if has_mask:
# use RLE to encode the masks, because they are too large and takes memory
# since this evaluator stores outputs of the entire dataset
rles = [
mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
for mask in instances.pred_masks
]
for rle in rles:
# "counts" is an array encoded by mask_util as a byte-stream. Python3's
# json writer which always produces strings cannot serialize a bytestream
# unless you decode it. Thankfully, utf-8 works out (which is also what
# the pycocotools/_mask.pyx does).
rle["counts"] = rle["counts"].decode("utf-8")
has_keypoints = instances.has("pred_keypoints")
if has_keypoints:
keypoints = instances.pred_keypoints
results = []
for k in range(num_instance):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
}
if has_mask:
result["segmentation"] = rles[k]
if has_keypoints:
# In COCO annotations,
# keypoints coordinates are pixel indices.
# However our predictions are floating point coordinates.
# Therefore we subtract 0.5 to be consistent with the annotation format.
# This is the inverse of data loading logic in `datasets/coco.py`.
keypoints[k][:, :2] -= 0.5
result["keypoints"] = keypoints[k].flatten().tolist()
results.append(result)
return results
# inspired from Detectron:
# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
"""
Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for prediction_dict in dataset_predictions:
predictions = prediction_dict["proposals"]
# sort predictions in descending order
# TODO maybe remove this and make it explicit in the documentation
inds = predictions.objectness_logits.sort(descending=True)[1]
predictions = predictions[inds]
ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
anno = coco_api.loadAnns(ann_ids)
gt_boxes = [
BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
for obj in anno
if obj["iscrowd"] == 0
]
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
gt_boxes = Boxes(gt_boxes)
gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
if len(gt_boxes) == 0 or len(predictions) == 0:
continue
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if limit is not None and len(predictions) > limit:
predictions = predictions[:limit]
overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(predictions), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = (
torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def _evaluate_predictions_on_coco(
coco_gt, coco_results, iou_type, kpt_oks_sigmas=None, use_fast_impl=True, img_ids=None
):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(coco_results) > 0
if iou_type == "segm":
coco_results = copy.deepcopy(coco_results)
# When evaluating mask AP, if the results contain bbox, cocoapi will
# use the box area as the area of the instance, instead of the mask area.
# This leads to a different definition of small/medium/large.
# We remove the bbox field to let mask AP use mask area.
for c in coco_results:
c.pop("bbox", None)
coco_dt = coco_gt.loadRes(coco_results)
coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type)
if img_ids is not None:
coco_eval.params.imgIds = img_ids
if iou_type == "keypoints":
# Use the COCO default keypoint OKS sigmas unless overrides are specified
if kpt_oks_sigmas:
assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
# COCOAPI requires every detection and every gt to have keypoints, so
# we just take the first entry from both
num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
f"Ground truth contains {num_keypoints_gt} keypoints. "
f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
"They have to agree with each other. For meaning of OKS, please refer to "
"http://cocodataset.org/#keypoints-eval."
)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import os
import tempfile
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from functools import lru_cache
import torch
from detectron2.data import MetadataCatalog
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from detectron2.evaluation import DatasetEvaluator
class PascalVOCDetectionEvaluator(DatasetEvaluator):
"""
Evaluate Pascal VOC style AP for Pascal VOC dataset.
It contains a synchronization, therefore has to be called from all ranks.
Note that the concept of AP can be implemented in different ways and may not
produce identical results. This class mimics the implementation of the official
Pascal VOC Matlab API, and should produce similar but not identical results to the
official API.
"""
def __init__(self, dataset_name, target_classnames=None):
"""
Args:
dataset_name (str): name of the dataset, e.g., "voc_2007_test"
"""
self._dataset_name = dataset_name
meta = MetadataCatalog.get(dataset_name)
# Too many tiny files, download all to local for speed.
annotation_dir_local = PathManager.get_local_path(
os.path.join(meta.dirname, "Annotations/")
)
self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml")
self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt")
self._class_names = meta.thing_classes
assert meta.year in [2007, 2012], meta.year
self._is_2007 = meta.year == 2007
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
if target_classnames == None:
self.target_classnames = self._class_names
else:
self.target_classnames = target_classnames
def reset(self):
self._predictions = defaultdict(list) # class name -> list of prediction strings
def process(self, inputs, outputs):
for input, output in zip(inputs, outputs):
image_id = input["image_id"]
instances = output["instances"].to(self._cpu_device)
boxes = instances.pred_boxes.tensor.numpy()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
for box, score, cls in zip(boxes, scores, classes):
xmin, ymin, xmax, ymax = box
# The inverse of data loading logic in `datasets/pascal_voc.py`
xmin += 1
ymin += 1
self._predictions[cls].append(
f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}"
)
def evaluate(self):
"""
Returns:
dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75".
"""
all_predictions = comm.gather(self._predictions, dst=0)
if not comm.is_main_process():
return
predictions = defaultdict(list)
for predictions_per_rank in all_predictions:
for clsid, lines in predictions_per_rank.items():
predictions[clsid].extend(lines)
del all_predictions
self._logger.info(
"Evaluating {} using {} metric. "
"Note that results do not use the official Matlab API.".format(
self._dataset_name, 2007 if self._is_2007 else 2012
)
)
with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname:
res_file_template = os.path.join(dirname, "{}.txt")
aps = defaultdict(list) # iou -> ap per class
for cls_id, cls_name in enumerate(self._class_names):
if cls_name not in self.target_classnames:
continue
lines = predictions.get(cls_id, [""])
with open(res_file_template.format(cls_name), "w") as f:
f.write("\n".join(lines))
for thresh in range(50, 100, 5):
rec, prec, ap = voc_eval(
res_file_template,
self._anno_file_template,
self._image_set_path,
cls_name,
ovthresh=thresh / 100.0,
use_07_metric=self._is_2007,
)
aps[thresh].append(ap * 100)
ret = OrderedDict()
mAP = {iou: np.mean(x) for iou, x in aps.items()}
ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]}
#Add the codes for AP50
for idx, name in enumerate(self.target_classnames):
ret["bbox"].update({"AP50-" + name: aps[50][idx]})
return ret
##############################################################################
#
# Below code is modified from
# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
"""Python implementation of the PASCAL VOC devkit's AP evaluation code."""
@lru_cache(maxsize=None)
def parse_rec(filename):
"""Parse a PASCAL VOC xml file."""
with PathManager.open(filename) as f:
tree = ET.parse(f)
objects = []
for obj in tree.findall("object"):
obj_struct = {}
obj_struct["name"] = obj.find("name").text
obj_struct["pose"] = obj.find("pose").text
obj_struct["truncated"] = int(obj.find("truncated").text)
obj_struct["difficult"] = int(obj.find("difficult").text)
bbox = obj.find("bndbox")
obj_struct["bbox"] = [
int(bbox.find("xmin").text),
int(bbox.find("ymin").text),
int(bbox.find("xmax").text),
int(bbox.find("ymax").text),
]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# first load gt
# read list of images
with PathManager.open(imagesetfile, "r") as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
# load annots
recs = {}
for imagename in imagenames:
recs[imagename] = parse_rec(annopath.format(imagename))
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj["name"] == classname]
bbox = np.array([x["bbox"] for x in R])
difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
# difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
# read dets
detfile = detpath.format(classname)
with open(detfile, "r") as f:
lines = f.readlines()
splitlines = [x.strip().split(" ") for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
# sort by confidence
sorted_ind = np.argsort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R["bbox"].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
# union
uni = (
(bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R["difficult"][jmax]:
if not R["det"][jmax]:
tp[d] = 1.0
R["det"][jmax] = 1
else:
fp[d] = 1.0
else:
fp[d] = 1.0
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import numpy as np
import operator
import json
import torch.utils.data
from detectron2.utils.comm import get_world_size
from detectron2.data.common import (
DatasetFromList,
MapDataset,
)
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import (
InferenceSampler,
RepeatFactorTrainingSampler,
TrainingSampler,
)
from detectron2.data.build import (
trivial_batch_collator,
worker_init_reset_seed,
get_detection_dataset_dicts,
build_batch_data_loader,
)
from adapteacher.data.common import (
AspectRatioGroupedSemiSupDatasetTwoCrop,
)
"""
This file contains the default logic to build a dataloader for training or testing.
"""
def divide_label_unlabel(
dataset_dicts, SupPercent, random_data_seed, random_data_seed_path
):
num_all = len(dataset_dicts)
num_label = int(SupPercent / 100.0 * num_all)
# read from pre-generated data seed
with open(random_data_seed_path) as COCO_sup_file:
coco_random_idx = json.load(COCO_sup_file)
labeled_idx = np.array(coco_random_idx[str(SupPercent)][str(random_data_seed)])
assert labeled_idx.shape[0] == num_label, "Number of READ_DATA is mismatched."
label_dicts = []
unlabel_dicts = []
labeled_idx = set(labeled_idx)
for i in range(len(dataset_dicts)):
if i in labeled_idx:
label_dicts.append(dataset_dicts[i])
else:
unlabel_dicts.append(dataset_dicts[i])
return label_dicts, unlabel_dicts
# uesed by supervised-only baseline trainer
def build_detection_semisup_train_loader(cfg, mapper=None):
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
# Divide into labeled and unlabeled sets according to supervision percentage
label_dicts, unlabel_dicts = divide_label_unlabel(
dataset_dicts,
cfg.DATALOADER.SUP_PERCENT,
cfg.DATALOADER.RANDOM_DATA_SEED,
cfg.DATALOADER.RANDOM_DATA_SEED_PATH,
)
dataset = DatasetFromList(label_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
repeat_factors = (
RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
label_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
# list num of labeled and unlabeled
logger.info("Number of training samples " + str(len(dataset)))
logger.info("Supervision percentage " + str(cfg.DATALOADER.SUP_PERCENT))
return build_batch_data_loader(
dataset,
sampler,
cfg.SOLVER.IMS_PER_BATCH,
aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
num_workers=cfg.DATALOADER.NUM_WORKERS,
)
# uesed by evaluation
def build_detection_test_loader(cfg, dataset_name, mapper=None):
dataset_dicts = get_detection_dataset_dicts(
[dataset_name],
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[
list(cfg.DATASETS.TEST).index(dataset_name)
]
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
dataset = DatasetFromList(dataset_dicts)
if mapper is None:
mapper = DatasetMapper(cfg, False)
dataset = MapDataset(dataset, mapper)
sampler = InferenceSampler(len(dataset))
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
# uesed by unbiased teacher trainer
def build_detection_semisup_train_loader_two_crops(cfg, mapper=None):
if cfg.DATASETS.CROSS_DATASET: # cross-dataset (e.g., coco-additional)
label_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN_LABEL,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
unlabel_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN_UNLABEL,
filter_empty=False,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
else: # different degree of supervision (e.g., COCO-supervision)
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
# Divide into labeled and unlabeled sets according to supervision percentage
label_dicts, unlabel_dicts = divide_label_unlabel(
dataset_dicts,
cfg.DATALOADER.SUP_PERCENT,
cfg.DATALOADER.RANDOM_DATA_SEED,
cfg.DATALOADER.RANDOM_DATA_SEED_PATH,
)
label_dataset = DatasetFromList(label_dicts, copy=False)
# exclude the labeled set from unlabeled dataset
unlabel_dataset = DatasetFromList(unlabel_dicts, copy=False)
# include the labeled set in unlabel dataset
# unlabel_dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
label_dataset = MapDataset(label_dataset, mapper)
unlabel_dataset = MapDataset(unlabel_dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
label_sampler = TrainingSampler(len(label_dataset))
unlabel_sampler = TrainingSampler(len(unlabel_dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
raise NotImplementedError("{} not yet supported.".format(sampler_name))
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
return build_semisup_batch_data_loader_two_crop(
(label_dataset, unlabel_dataset),
(label_sampler, unlabel_sampler),
cfg.SOLVER.IMG_PER_BATCH_LABEL,
cfg.SOLVER.IMG_PER_BATCH_UNLABEL,
aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
num_workers=cfg.DATALOADER.NUM_WORKERS,
)
# batch data loader
def build_semisup_batch_data_loader_two_crop(
dataset,
sampler,
total_batch_size_label,
total_batch_size_unlabel,
*,
aspect_ratio_grouping=False,
num_workers=0
):
world_size = get_world_size()
assert (
total_batch_size_label > 0 and total_batch_size_label % world_size == 0
), "Total label batch size ({}) must be divisible by the number of gpus ({}).".format(
total_batch_size_label, world_size
)
assert (
total_batch_size_unlabel > 0 and total_batch_size_unlabel % world_size == 0
), "Total unlabel batch size ({}) must be divisible by the number of gpus ({}).".format(
total_batch_size_label, world_size
)
batch_size_label = total_batch_size_label // world_size
batch_size_unlabel = total_batch_size_unlabel // world_size
label_dataset, unlabel_dataset = dataset
label_sampler, unlabel_sampler = sampler
if aspect_ratio_grouping:
label_data_loader = torch.utils.data.DataLoader(
label_dataset,
sampler=label_sampler,
num_workers=num_workers,
batch_sampler=None,
collate_fn=operator.itemgetter(
0
), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
unlabel_data_loader = torch.utils.data.DataLoader(
unlabel_dataset,
sampler=unlabel_sampler,
num_workers=num_workers,
batch_sampler=None,
collate_fn=operator.itemgetter(
0
), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
return AspectRatioGroupedSemiSupDatasetTwoCrop(
(label_data_loader, unlabel_data_loader),
(batch_size_label, batch_size_unlabel),
)
else:
raise NotImplementedError("ASPECT_RATIO_GROUPING = False is not supported yet") |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .build import (
build_detection_test_loader,
build_detection_semisup_train_loader,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import torchvision.transforms as transforms
from adapteacher.data.transforms.augmentation_impl import (
GaussianBlur,
)
def build_strong_augmentation(cfg, is_train):
"""
Create a list of :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
logger = logging.getLogger(__name__)
augmentation = []
if is_train:
# This is simialr to SimCLR https://arxiv.org/abs/2002.05709
augmentation.append(
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8)
)
augmentation.append(transforms.RandomGrayscale(p=0.2))
augmentation.append(transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5))
randcrop_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.RandomErasing(
p=0.7, scale=(0.05, 0.2), ratio=(0.3, 3.3), value="random"
),
transforms.RandomErasing(
p=0.5, scale=(0.02, 0.2), ratio=(0.1, 6), value="random"
),
transforms.RandomErasing(
p=0.3, scale=(0.02, 0.2), ratio=(0.05, 8), value="random"
),
transforms.ToPILImage(),
]
)
augmentation.append(randcrop_transform)
logger.info("Augmentations used in training: " + str(augmentation))
return transforms.Compose(augmentation) |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
from PIL import Image
import torch
import detectron2.data.detection_utils as utils
import detectron2.data.transforms as T
from detectron2.data.dataset_mapper import DatasetMapper
from adapteacher.data.detection_utils import build_strong_augmentation
class DatasetMapperTwoCropSeparate(DatasetMapper):
"""
This customized mapper produces two augmented images from a single image
instance. This mapper makes sure that the two augmented images have the same
cropping and thus the same size.
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic,
such as a different way to read or transform images.
See :doc:`/tutorials/data_loading` for details.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
def __init__(self, cfg, is_train=True):
self.augmentation = utils.build_augmentation(cfg, is_train)
# include crop into self.augmentation
if cfg.INPUT.CROP.ENABLED and is_train:
self.augmentation.insert(
0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
)
logging.getLogger(__name__).info(
"Cropping used in training: " + str(self.augmentation[0])
)
self.compute_tight_boxes = True
else:
self.compute_tight_boxes = False
self.strong_augmentation = build_strong_augmentation(cfg, is_train)
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.mask_on = cfg.MODEL.MASK_ON
self.mask_format = cfg.INPUT.MASK_FORMAT
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
# fmt: on
if self.keypoint_on and is_train:
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
cfg.DATASETS.TRAIN
)
else:
self.keypoint_hflip_indices = None
if self.load_proposals:
self.proposal_min_box_size = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
self.proposal_topk = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
# utils.check_image_size(dataset_dict, image)
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = utils.read_image(
dataset_dict.pop("sem_seg_file_name"), "L"
).squeeze(2)
else:
sem_seg_gt = None
aug_input = T.StandardAugInput(image, sem_seg=sem_seg_gt)
transforms = aug_input.apply_augmentations(self.augmentation)
image_weak_aug, sem_seg_gt = aug_input.image, aug_input.sem_seg
image_shape = image_weak_aug.shape[:2] # h, w
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
if self.load_proposals:
utils.transform_proposals(
dataset_dict,
image_shape,
transforms,
proposal_topk=self.proposal_topk,
min_box_size=self.proposal_min_box_size,
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
annos = [
utils.transform_instance_annotations(
obj,
transforms,
image_shape,
keypoint_hflip_indices=self.keypoint_hflip_indices,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(
annos, image_shape, mask_format=self.mask_format
)
if self.compute_tight_boxes and instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
bboxes_d2_format = utils.filter_empty_instances(instances)
dataset_dict["instances"] = bboxes_d2_format
# apply strong augmentation
# We use torchvision augmentation, which is not compatiable with
# detectron2, which use numpy format for images. Thus, we need to
# convert to PIL format first.
image_pil = Image.fromarray(image_weak_aug.astype("uint8"), "RGB")
image_strong_aug = np.array(self.strong_augmentation(image_pil))
dataset_dict["image"] = torch.as_tensor(
np.ascontiguousarray(image_strong_aug.transpose(2, 0, 1))
)
dataset_dict_key = copy.deepcopy(dataset_dict)
dataset_dict_key["image"] = torch.as_tensor(
np.ascontiguousarray(image_weak_aug.transpose(2, 0, 1))
)
assert dataset_dict["image"].size(1) == dataset_dict_key["image"].size(1)
assert dataset_dict["image"].size(2) == dataset_dict_key["image"].size(2)
return (dataset_dict, dataset_dict_key)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from detectron2.data.common import MapDataset, AspectRatioGroupedDataset
class MapDatasetTwoCrop(MapDataset):
"""
Map a function over the elements in a dataset.
This customized MapDataset transforms an image with two augmentations
as two inputs (queue and key).
Args:
dataset: a dataset where map function is applied.
map_func: a callable which maps the element in dataset. map_func is
responsible for error handling, when error happens, it needs to
return None so the MapDataset will randomly use other
elements from the dataset.
"""
def __getitem__(self, idx):
retry_count = 0
cur_idx = int(idx)
while True:
data = self._map_func(self._dataset[cur_idx])
if data is not None:
self._fallback_candidates.add(cur_idx)
return data
# _map_func fails for this idx, use a random new index from the pool
retry_count += 1
self._fallback_candidates.discard(cur_idx)
cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
if retry_count >= 3:
logger = logging.getLogger(__name__)
logger.warning(
"Failed to apply `_map_func` for idx: {}, retry count: {}".format(
idx, retry_count
)
)
class AspectRatioGroupedDatasetTwoCrop(AspectRatioGroupedDataset):
"""
Batch data that have similar aspect ratio together.
In this implementation, images whose aspect ratio < (or >) 1 will
be batched together.
This improves training speed because the images then need less padding
to form a batch.
It assumes the underlying dataset produces dicts with "width" and "height" keys.
It will then produce a list of original dicts with length = batch_size,
all with similar aspect ratios.
"""
def __init__(self, dataset, batch_size):
"""
Args:
dataset: an iterable. Each element must be a dict with keys
"width" and "height", which will be used to batch data.
batch_size (int):
"""
self.dataset = dataset
self.batch_size = batch_size
self._buckets = [[] for _ in range(2)]
self._buckets_key = [[] for _ in range(2)]
# Hard-coded two aspect ratio groups: w > h and w < h.
# Can add support for more aspect ratio groups, but doesn't seem useful
def __iter__(self):
for d in self.dataset:
# d is a tuple with len = 2
# It's two images (same size) from the same image instance
w, h = d[0]["width"], d[0]["height"]
bucket_id = 0 if w > h else 1
# bucket = bucket for normal images
bucket = self._buckets[bucket_id]
bucket.append(d[0])
# buckets_key = bucket for augmented images
buckets_key = self._buckets_key[bucket_id]
buckets_key.append(d[1])
if len(bucket) == self.batch_size:
yield (bucket[:], buckets_key[:])
del bucket[:]
del buckets_key[:]
class AspectRatioGroupedSemiSupDatasetTwoCrop(AspectRatioGroupedDataset):
"""
Batch data that have similar aspect ratio together.
In this implementation, images whose aspect ratio < (or >) 1 will
be batched together.
This improves training speed because the images then need less padding
to form a batch.
It assumes the underlying dataset produces dicts with "width" and "height" keys.
It will then produce a list of original dicts with length = batch_size,
all with similar aspect ratios.
"""
def __init__(self, dataset, batch_size):
"""
Args:
dataset: a tuple containing two iterable generators. (labeled and unlabeled data)
Each element must be a dict with keys "width" and "height", which will be used
to batch data.
batch_size (int):
"""
self.label_dataset, self.unlabel_dataset = dataset
self.batch_size_label = batch_size[0]
self.batch_size_unlabel = batch_size[1]
self._label_buckets = [[] for _ in range(2)]
self._label_buckets_key = [[] for _ in range(2)]
self._unlabel_buckets = [[] for _ in range(2)]
self._unlabel_buckets_key = [[] for _ in range(2)]
# Hard-coded two aspect ratio groups: w > h and w < h.
# Can add support for more aspect ratio groups, but doesn't seem useful
def __iter__(self):
label_bucket, unlabel_bucket = [], []
for d_label, d_unlabel in zip(self.label_dataset, self.unlabel_dataset):
# d is a tuple with len = 2
# It's two images (same size) from the same image instance
# d[0] is with strong augmentation, d[1] is with weak augmentation
# because we are grouping images with their aspect ratio
# label and unlabel buckets might not have the same number of data
# i.e., one could reach batch_size, while the other is still not
if len(label_bucket) != self.batch_size_label:
w, h = d_label[0]["width"], d_label[0]["height"]
label_bucket_id = 0 if w > h else 1
label_bucket = self._label_buckets[label_bucket_id]
label_bucket.append(d_label[0])
label_buckets_key = self._label_buckets_key[label_bucket_id]
label_buckets_key.append(d_label[1])
if len(unlabel_bucket) != self.batch_size_unlabel:
w, h = d_unlabel[0]["width"], d_unlabel[0]["height"]
unlabel_bucket_id = 0 if w > h else 1
unlabel_bucket = self._unlabel_buckets[unlabel_bucket_id]
unlabel_bucket.append(d_unlabel[0])
unlabel_buckets_key = self._unlabel_buckets_key[unlabel_bucket_id]
unlabel_buckets_key.append(d_unlabel[1])
# yield the batch of data until all buckets are full
if (
len(label_bucket) == self.batch_size_label
and len(unlabel_bucket) == self.batch_size_unlabel
):
# label_strong, label_weak, unlabed_strong, unlabled_weak
yield (
label_bucket[:],
label_buckets_key[:],
unlabel_bucket[:],
unlabel_buckets_key[:],
)
del label_bucket[:]
del label_buckets_key[:]
del unlabel_bucket[:]
del unlabel_buckets_key[:]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import contextlib
from detectron2.data import DatasetCatalog, MetadataCatalog
from fvcore.common.timer import Timer
# from fvcore.common.file_io import PathManager
from iopath.common.file_io import PathManager
from detectron2.data.datasets.pascal_voc import register_pascal_voc
from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
from .cityscapes_foggy import load_cityscapes_instances
import io
import logging
logger = logging.getLogger(__name__)
JSON_ANNOTATIONS_DIR = ""
_SPLITS_COCO_FORMAT = {}
_SPLITS_COCO_FORMAT["coco"] = {
"coco_2017_unlabel": (
"coco/unlabeled2017",
"coco/annotations/image_info_unlabeled2017.json",
),
"coco_2017_for_voc20": (
"coco",
"coco/annotations/google/instances_unlabeledtrainval20class.json",
),
}
def register_coco_unlabel(root):
for _, splits_per_dataset in _SPLITS_COCO_FORMAT.items():
for key, (image_root, json_file) in splits_per_dataset.items():
meta = {}
register_coco_unlabel_instances(
key, meta, os.path.join(root, json_file), os.path.join(root, image_root)
)
def register_coco_unlabel_instances(name, metadata, json_file, image_root):
"""
Register a dataset in COCO's json annotation format for
instance detection, instance segmentation and keypoint detection.
(i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
`instances*.json` and `person_keypoints*.json` in the dataset).
This is an example of how to register a new dataset.
You can do something similar to this function, to register new datasets.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(
name, lambda: load_coco_unlabel_json(json_file, image_root, name)
)
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
)
def load_coco_unlabel_json(
json_file, image_root, dataset_name=None, extra_annotation_keys=None
):
from pycocotools.coco import COCO
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info(
"Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())
)
id_map = None
# sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
imgs = coco_api.loadImgs(img_ids)
logger.info("Loaded {} images in COCO format from {}".format(len(imgs), json_file))
dataset_dicts = []
for img_dict in imgs:
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
image_id = record["image_id"] = img_dict["id"]
dataset_dicts.append(record)
return dataset_dicts
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
register_coco_unlabel(_root)
# ==== Predefined splits for raw cityscapes foggy images ===========
_RAW_CITYSCAPES_SPLITS = {
# "cityscapes_foggy_{task}_train": ("cityscape_foggy/leftImg8bit/train/", "cityscape_foggy/gtFine/train/"),
# "cityscapes_foggy_{task}_val": ("cityscape_foggy/leftImg8bit/val/", "cityscape_foggy/gtFine/val/"),
# "cityscapes_foggy_{task}_test": ("cityscape_foggy/leftImg8bit/test/", "cityscape_foggy/gtFine/test/"),
"cityscapes_foggy_train": ("cityscapes_foggy/leftImg8bit/train/", "cityscapes_foggy/gtFine/train/"),
"cityscapes_foggy_val": ("cityscapes_foggy/leftImg8bit/val/", "cityscapes_foggy/gtFine/val/"),
"cityscapes_foggy_test": ("cityscapes_foggy/leftImg8bit/test/", "cityscapes_foggy/gtFine/test/"),
}
def register_all_cityscapes_foggy(root):
# root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
meta = _get_builtin_metadata("cityscapes")
image_dir = os.path.join(root, image_dir)
gt_dir = os.path.join(root, gt_dir)
# inst_key = key.format(task="instance_seg")
inst_key = key
# DatasetCatalog.register(
# inst_key,
# lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
# x, y, from_json=True, to_polygons=True
# ),
# )
DatasetCatalog.register(
inst_key,
lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
x, y, from_json=False, to_polygons=False
),
)
# MetadataCatalog.get(inst_key).set(
# image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
# )
# MetadataCatalog.get(inst_key).set(
# image_dir=image_dir, gt_dir=gt_dir, evaluator_type="pascal_voc", **meta
# )
MetadataCatalog.get(inst_key).set(
image_dir=image_dir, gt_dir=gt_dir, evaluator_type="coco", **meta
)
# ==== Predefined splits for Clipart (PASCAL VOC format) ===========
def register_all_clipart(root):
# root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
SPLITS = [
("Clipart1k_train", "clipart", "train"),
("Clipart1k_test", "clipart", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# MetadataCatalog.get(name).evaluator_type = "coco"
# ==== Predefined splits for Watercolor (PASCAL VOC format) ===========
def register_all_water(root):
# root = "manifold://mobile_vision_dataset/tree/yujheli/dataset"
SPLITS = [
("Watercolor_train", "watercolor", "train"),
("Watercolor_test", "watercolor", "test"),
]
for name, dirname, split in SPLITS:
year = 2012
# register_pascal_voc(name, os.path.join(root, dirname), split, year, class_names=["person", "dog","bicycle", "bird", "car", "cat"])
register_pascal_voc(name, os.path.join(root, dirname), split, year)
MetadataCatalog.get(name).evaluator_type = "pascal_voc_water"
# MetadataCatalog.get(name).thing_classes = ["person", "dog","bike", "bird", "car", "cat"]
# MetadataCatalog.get(name).thing_classes = ["person", "dog","bicycle", "bird", "car", "cat"]
# MetadataCatalog.get(name).evaluator_type = "coco"
register_all_cityscapes_foggy(_root)
register_all_clipart(_root)
register_all_water(_root)
|
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import json
import logging
import multiprocessing as mp
import numpy as np
import os
from itertools import chain
import pycocotools.mask as mask_util
from PIL import Image
from detectron2.structures import BoxMode
from detectron2.utils.comm import get_world_size
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
logger = logging.getLogger(__name__)
load_only_002 = False
def _get_cityscapes_files(image_dir, gt_dir):
files = []
# scan through the directory
cities = PathManager.ls(image_dir)
logger.info(f"{len(cities)} cities found in '{image_dir}'.")
for city in cities:
city_img_dir = os.path.join(image_dir, city)
city_gt_dir = os.path.join(gt_dir, city)
for basename in PathManager.ls(city_img_dir):
if load_only_002 and '0.02.png' not in basename:
continue
image_file = os.path.join(city_img_dir, basename)
# suffix = "leftImg8bit.png"
# assert basename.endswith(suffix), basename
# basename = basename[: -len(suffix)]
suffix = 'leftImg8bit_foggy'
basename = basename.split(suffix)[0]
instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png")
label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
files.append((image_file, instance_file, label_file, json_file))
assert len(files), "No images found in {}".format(image_dir)
for f in files[0]:
assert PathManager.isfile(f), f
return files
def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
"""
if from_json:
assert to_polygons, (
"Cityscapes's json annotations are in polygon format. "
"Converting to mask format is not supported now."
)
files = _get_cityscapes_files(image_dir, gt_dir)
logger.info("Preprocessing cityscapes annotations ...")
# This is still not fast: all workers will execute duplicate works and will
# take up to 10m on a 8GPU server.
pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
ret = pool.map(
functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
files,
)
logger.info("Loaded {} images from {}".format(len(ret), image_dir))
pool.close()
# Map cityscape ids to contiguous ids
from cityscapesscripts.helpers.labels import labels
labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
for dict_per_image in ret:
for anno in dict_per_image["annotations"]:
anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
return ret
def load_cityscapes_semantic(image_dir, gt_dir):
"""
Args:
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
Returns:
list[dict]: a list of dict, each has "file_name" and
"sem_seg_file_name".
"""
ret = []
# gt_dir is small and contain many small files. make sense to fetch to local first
gt_dir = PathManager.get_local_path(gt_dir)
for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):
label_file = label_file.replace("labelIds", "labelTrainIds")
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret.append(
{
"file_name": image_file,
"sem_seg_file_name": label_file,
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
)
assert len(ret), f"No images found in {image_dir}!"
assert PathManager.isfile(
ret[0]["sem_seg_file_name"]
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
return ret
def _cityscapes_files_to_dict(files, from_json, to_polygons):
"""
Parse cityscapes annotation files to a instance segmentation dataset dict.
Args:
files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
from_json (bool): whether to read annotations from the raw json file or the png files.
to_polygons (bool): whether to represent the segmentation as polygons
(COCO's format) instead of masks (cityscapes's format).
Returns:
A dict in Detectron2 Dataset format.
"""
from cityscapesscripts.helpers.labels import id2label, name2label
image_file, instance_id_file, _, json_file = files
annos = []
if from_json:
from shapely.geometry import MultiPolygon, Polygon
with PathManager.open(json_file, "r") as f:
jsonobj = json.load(f)
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": jsonobj["imgHeight"],
"width": jsonobj["imgWidth"],
}
# `polygons_union` contains the union of all valid polygons.
polygons_union = Polygon()
# CityscapesScripts draw the polygons in sequential order
# and each polygon *overwrites* existing ones. See
# (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
# We use reverse order, and each polygon *avoids* early ones.
# This will resolve the ploygon overlaps in the same way as CityscapesScripts.
for obj in jsonobj["objects"][::-1]:
if "deleted" in obj: # cityscapes data format specific
continue
label_name = obj["label"]
try:
label = name2label[label_name]
except KeyError:
if label_name.endswith("group"): # crowd area
label = name2label[label_name[: -len("group")]]
else:
raise
if label.id < 0: # cityscapes data format
continue
# Cityscapes's raw annotations uses integer coordinates
# Therefore +0.5 here
poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
# CityscapesScript uses PIL.ImageDraw.polygon to rasterize
# polygons for evaluation. This function operates in integer space
# and draws each pixel whose center falls into the polygon.
# Therefore it draws a polygon which is 0.5 "fatter" in expectation.
# We therefore dilate the input polygon by 0.5 as our input.
poly = Polygon(poly_coord).buffer(0.5, resolution=4)
if not label.hasInstances or label.ignoreInEval:
# even if we won't store the polygon it still contributes to overlaps resolution
polygons_union = polygons_union.union(poly)
continue
# Take non-overlapping part of the polygon
poly_wo_overlaps = poly.difference(polygons_union)
if poly_wo_overlaps.is_empty:
continue
polygons_union = polygons_union.union(poly)
anno = {}
anno["iscrowd"] = label_name.endswith("group")
anno["category_id"] = label.id
if isinstance(poly_wo_overlaps, Polygon):
poly_list = [poly_wo_overlaps]
elif isinstance(poly_wo_overlaps, MultiPolygon):
poly_list = poly_wo_overlaps.geoms
else:
raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
poly_coord = []
for poly_el in poly_list:
# COCO API can work only with exterior boundaries now, hence we store only them.
# TODO: store both exterior and interior boundaries once other parts of the
# codebase support holes in polygons.
poly_coord.append(list(chain(*poly_el.exterior.coords)))
anno["segmentation"] = poly_coord
(xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
anno["bbox"] = (xmin, ymin, xmax, ymax)
anno["bbox_mode"] = BoxMode.XYXY_ABS
annos.append(anno)
else:
# See also the official annotation parsing scripts at
# https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
with PathManager.open(instance_id_file, "rb") as f:
inst_image = np.asarray(Image.open(f), order="F")
# ids < 24 are stuff labels (filtering them first is about 5% faster)
flattened_ids = np.unique(inst_image[inst_image >= 24])
ret = {
"file_name": image_file,
"image_id": os.path.basename(image_file),
"height": inst_image.shape[0],
"width": inst_image.shape[1],
}
for instance_id in flattened_ids:
# For non-crowd annotations, instance_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
label = id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
anno = {}
anno["iscrowd"] = instance_id < 1000
anno["category_id"] = label.id
mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
inds = np.nonzero(mask)
ymin, ymax = inds[0].min(), inds[0].max()
xmin, xmax = inds[1].min(), inds[1].max()
anno["bbox"] = (xmin, ymin, xmax, ymax)
if xmax <= xmin or ymax <= ymin:
continue
anno["bbox_mode"] = BoxMode.XYXY_ABS
if to_polygons:
# This conversion comes from D4809743 and D5171122,
# when Mask-RCNN was first developed.
contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
-2
]
polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
# opencv's can produce invalid polygons
if len(polygons) == 0:
continue
anno["segmentation"] = polygons
else:
anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
annos.append(anno)
ret["annotations"] = annos
return ret
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import random
from PIL import ImageFilter
class GaussianBlur:
"""
Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709
Adapted from MoCo:
https://github.com/facebookresearch/moco/blob/master/moco/loader.py
Note that this implementation does not seem to be exactly the same as
described in SimCLR.
"""
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.engine.hooks import HookBase
import detectron2.utils.comm as comm
import torch
import numpy as np
from contextlib import contextmanager
class LossEvalHook(HookBase):
def __init__(self, eval_period, model, data_loader, model_output, model_name=""):
self._model = model
self._period = eval_period
self._data_loader = data_loader
self._model_output = model_output
self._model_name = model_name
def _do_loss_eval(self):
record_acc_dict = {}
with inference_context(self._model), torch.no_grad():
for _, inputs in enumerate(self._data_loader):
record_dict = self._get_loss(inputs, self._model)
# accumulate the losses
for loss_type in record_dict.keys():
if loss_type not in record_acc_dict.keys():
record_acc_dict[loss_type] = record_dict[loss_type]
else:
record_acc_dict[loss_type] += record_dict[loss_type]
# average
for loss_type in record_acc_dict.keys():
record_acc_dict[loss_type] = record_acc_dict[loss_type] / len(
self._data_loader
)
# divide loss and other metrics
loss_acc_dict = {}
for key in record_acc_dict.keys():
if key[:4] == "loss":
loss_acc_dict[key] = record_acc_dict[key]
# only output the results of major node
if comm.is_main_process():
total_losses_reduced = sum(loss for loss in loss_acc_dict.values())
self.trainer.storage.put_scalar(
"val_total_loss_val" + self._model_name, total_losses_reduced
)
record_acc_dict = {
"val_" + k + self._model_name: record_acc_dict[k]
for k in record_acc_dict.keys()
}
if len(record_acc_dict) > 1:
self.trainer.storage.put_scalars(**record_acc_dict)
def _get_loss(self, data, model):
if self._model_output == "loss_only":
record_dict = model(data)
elif self._model_output == "loss_proposal":
record_dict, _, _, _ = model(data, branch="val_loss", val_mode=True)
elif self._model_output == "meanteacher":
record_dict, _, _, _, _ = model(data)
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in record_dict.items()
}
return metrics_dict
def _write_losses(self, metrics_dict):
# gather metrics among all workers for logging
# This assumes we do DDP-style training, which is currently the only
# supported method in detectron2.
comm.synchronize()
all_metrics_dict = comm.gather(metrics_dict, dst=0)
if comm.is_main_process():
# average the rest metrics
metrics_dict = {
"val_" + k: np.mean([x[k] for x in all_metrics_dict])
for k in all_metrics_dict[0].keys()
}
total_losses_reduced = sum(loss for loss in metrics_dict.values())
self.trainer.storage.put_scalar("val_total_loss_val", total_losses_reduced)
if len(metrics_dict) > 1:
self.trainer.storage.put_scalars(**metrics_dict)
def _detect_anomaly(self, losses, loss_dict):
if not torch.isfinite(losses).all():
raise FloatingPointError(
"Loss became infinite or NaN at iteration={}!\nloss_dict = {}".format(
self.trainer.iter, loss_dict
)
)
def after_step(self):
next_iter = self.trainer.iter + 1
is_final = next_iter == self.trainer.max_iter
if is_final or (self._period > 0 and next_iter % self._period == 0):
self._do_loss_eval()
@contextmanager
def inference_context(model):
"""
A context where the model is temporarily changed to eval mode,
and restored to previous mode afterwards.
Args:
model: a torch Module
"""
training_mode = model.training
model.eval()
yield
model.train(training_mode)
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 28