|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import numpy as np |
|
|
from .cfg import * |
|
|
from .region_layer import RegionLayer |
|
|
from .yolo_layer import YoloLayer |
|
|
|
|
|
|
|
|
class MaxPoolStride1(nn.Module): |
|
|
def __init__(self): |
|
|
super(MaxPoolStride1, self).__init__() |
|
|
|
|
|
def forward(self, x): |
|
|
x = F.max_pool2d(F.pad(x, (0, 1, 0, 1), mode='replicate'), 2, stride=1) |
|
|
return x |
|
|
|
|
|
|
|
|
class Upsample(nn.Module): |
|
|
def __init__(self, stride=2): |
|
|
super(Upsample, self).__init__() |
|
|
self.stride = stride |
|
|
|
|
|
def forward(self, x): |
|
|
stride = self.stride |
|
|
assert (x.data.dim() == 4) |
|
|
B = x.data.size(0) |
|
|
C = x.data.size(1) |
|
|
H = x.data.size(2) |
|
|
W = x.data.size(3) |
|
|
ws = stride |
|
|
hs = stride |
|
|
x = x.view(B, C, H, 1, W, 1).expand(B, C, H, hs, W, ws).contiguous().view(B, C, H * hs, W * ws) |
|
|
return x |
|
|
|
|
|
|
|
|
class Reorg(nn.Module): |
|
|
def __init__(self, stride=2): |
|
|
super(Reorg, self).__init__() |
|
|
self.stride = stride |
|
|
|
|
|
def forward(self, x): |
|
|
stride = self.stride |
|
|
assert (x.data.dim() == 4) |
|
|
B = x.data.size(0) |
|
|
C = x.data.size(1) |
|
|
H = x.data.size(2) |
|
|
W = x.data.size(3) |
|
|
assert (H % stride == 0) |
|
|
assert (W % stride == 0) |
|
|
ws = stride |
|
|
hs = stride |
|
|
x = x.view(B, C, H // hs, hs, W // ws, ws).transpose(3, 4).contiguous() |
|
|
x = x.view(B, C, (H // hs) * (W // ws), hs * ws).transpose(2, 3).contiguous() |
|
|
x = x.view(B, C, hs * ws, H // hs, W // ws).transpose(1, 2).contiguous() |
|
|
x = x.view(B, hs * ws * C, H // hs, W // ws) |
|
|
return x |
|
|
|
|
|
|
|
|
class GlobalAvgPool2d(nn.Module): |
|
|
def __init__(self): |
|
|
super(GlobalAvgPool2d, self).__init__() |
|
|
|
|
|
def forward(self, x): |
|
|
N = x.data.size(0) |
|
|
C = x.data.size(1) |
|
|
H = x.data.size(2) |
|
|
W = x.data.size(3) |
|
|
x = F.avg_pool2d(x, (H, W)) |
|
|
x = x.view(N, C) |
|
|
return x |
|
|
|
|
|
|
|
|
|
|
|
class EmptyModule(nn.Module): |
|
|
def __init__(self): |
|
|
super(EmptyModule, self).__init__() |
|
|
|
|
|
def forward(self, x): |
|
|
return x |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Darknet(nn.Module): |
|
|
def getLossLayers(self): |
|
|
loss_layers = [] |
|
|
for m in self.models: |
|
|
if isinstance(m, RegionLayer) or isinstance(m, YoloLayer): |
|
|
loss_layers.append(m) |
|
|
return loss_layers |
|
|
|
|
|
def __init__(self, cfgfile, use_cuda=True): |
|
|
super(Darknet, self).__init__() |
|
|
self.use_cuda = use_cuda |
|
|
self.blocks = parse_cfg(cfgfile) |
|
|
self.models = self.create_network(self.blocks) |
|
|
self.loss_layers = self.getLossLayers() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if len(self.loss_layers) > 0: |
|
|
last = len(self.loss_layers) - 1 |
|
|
self.anchors = self.loss_layers[last].anchors |
|
|
self.num_anchors = self.loss_layers[last].num_anchors |
|
|
self.anchor_step = self.loss_layers[last].anchor_step |
|
|
self.num_classes = self.loss_layers[last].num_classes |
|
|
|
|
|
|
|
|
self.header = torch.IntTensor([0, 1, 0, 0]) |
|
|
self.seen = 0 |
|
|
|
|
|
def forward(self, x): |
|
|
ind = -2 |
|
|
self.loss_layers = None |
|
|
outputs = dict() |
|
|
out_boxes = dict() |
|
|
outno = 0 |
|
|
for block in self.blocks: |
|
|
ind = ind + 1 |
|
|
|
|
|
if block['type'] == 'net': |
|
|
continue |
|
|
elif block['type'] in ['convolutional', 'maxpool', 'reorg', 'upsample', 'avgpool', 'softmax', 'connected']: |
|
|
x = self.models[ind](x) |
|
|
outputs[ind] = x |
|
|
elif block['type'] == 'route': |
|
|
layers = block['layers'].split(',') |
|
|
layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers] |
|
|
if len(layers) == 1: |
|
|
x = outputs[layers[0]] |
|
|
elif len(layers) == 2: |
|
|
x1 = outputs[layers[0]] |
|
|
x2 = outputs[layers[1]] |
|
|
x = torch.cat((x1, x2), 1) |
|
|
outputs[ind] = x |
|
|
elif block['type'] == 'shortcut': |
|
|
from_layer = int(block['from']) |
|
|
activation = block['activation'] |
|
|
from_layer = from_layer if from_layer > 0 else from_layer + ind |
|
|
x1 = outputs[from_layer] |
|
|
x2 = outputs[ind - 1] |
|
|
x = x1 + x2 |
|
|
if activation == 'leaky': |
|
|
x = F.leaky_relu(x, 0.1, inplace=True) |
|
|
elif activation == 'relu': |
|
|
x = F.relu(x, inplace=True) |
|
|
outputs[ind] = x |
|
|
elif block['type'] in ['region', 'yolo']: |
|
|
boxes = self.models[ind].get_mask_boxes(x) |
|
|
out_boxes[outno] = boxes |
|
|
outno += 1 |
|
|
outputs[ind] = None |
|
|
elif block['type'] == 'cost': |
|
|
continue |
|
|
else: |
|
|
print('unknown type %s' % (block['type'])) |
|
|
return x if outno == 0 else out_boxes |
|
|
|
|
|
def print_network(self): |
|
|
print_cfg(self.blocks) |
|
|
|
|
|
def create_network(self, blocks): |
|
|
models = nn.ModuleList() |
|
|
|
|
|
prev_filters = 3 |
|
|
out_filters = [] |
|
|
prev_stride = 1 |
|
|
out_strides = [] |
|
|
conv_id = 0 |
|
|
ind = -2 |
|
|
for block in blocks: |
|
|
ind += 1 |
|
|
if block['type'] == 'net': |
|
|
prev_filters = int(block['channels']) |
|
|
self.width = int(block['width']) |
|
|
self.height = int(block['height']) |
|
|
continue |
|
|
elif block['type'] == 'convolutional': |
|
|
conv_id = conv_id + 1 |
|
|
batch_normalize = int(block['batch_normalize']) |
|
|
filters = int(block['filters']) |
|
|
kernel_size = int(block['size']) |
|
|
stride = int(block['stride']) |
|
|
is_pad = int(block['pad']) |
|
|
pad = (kernel_size - 1) // 2 if is_pad else 0 |
|
|
activation = block['activation'] |
|
|
model = nn.Sequential() |
|
|
if batch_normalize: |
|
|
model.add_module('conv{0}'.format(conv_id), |
|
|
nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False)) |
|
|
model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters)) |
|
|
|
|
|
else: |
|
|
model.add_module('conv{0}'.format(conv_id), |
|
|
nn.Conv2d(prev_filters, filters, kernel_size, stride, pad)) |
|
|
if activation == 'leaky': |
|
|
model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True)) |
|
|
elif activation == 'relu': |
|
|
model.add_module('relu{0}'.format(conv_id), nn.ReLU(inplace=True)) |
|
|
prev_filters = filters |
|
|
out_filters.append(prev_filters) |
|
|
prev_stride = stride * prev_stride |
|
|
out_strides.append(prev_stride) |
|
|
models.append(model) |
|
|
elif block['type'] == 'maxpool': |
|
|
pool_size = int(block['size']) |
|
|
stride = int(block['stride']) |
|
|
if stride > 1: |
|
|
model = nn.MaxPool2d(pool_size, stride) |
|
|
else: |
|
|
model = MaxPoolStride1() |
|
|
out_filters.append(prev_filters) |
|
|
prev_stride = stride * prev_stride |
|
|
out_strides.append(prev_stride) |
|
|
models.append(model) |
|
|
elif block['type'] == 'avgpool': |
|
|
model = GlobalAvgPool2d() |
|
|
out_filters.append(prev_filters) |
|
|
models.append(model) |
|
|
elif block['type'] == 'softmax': |
|
|
model = nn.Softmax() |
|
|
out_strides.append(prev_stride) |
|
|
out_filters.append(prev_filters) |
|
|
models.append(model) |
|
|
elif block['type'] == 'cost': |
|
|
if block['_type'] == 'sse': |
|
|
model = nn.MSELoss(size_average=True) |
|
|
elif block['_type'] == 'L1': |
|
|
model = nn.L1Loss(size_average=True) |
|
|
elif block['_type'] == 'smooth': |
|
|
model = nn.SmoothL1Loss(size_average=True) |
|
|
out_filters.append(1) |
|
|
out_strides.append(prev_stride) |
|
|
models.append(model) |
|
|
elif block['type'] == 'reorg': |
|
|
stride = int(block['stride']) |
|
|
prev_filters = stride * stride * prev_filters |
|
|
out_filters.append(prev_filters) |
|
|
prev_stride = prev_stride * stride |
|
|
out_strides.append(prev_stride) |
|
|
models.append(Reorg(stride)) |
|
|
elif block['type'] == 'upsample': |
|
|
stride = int(block['stride']) |
|
|
out_filters.append(prev_filters) |
|
|
prev_stride = prev_stride / stride |
|
|
out_strides.append(prev_stride) |
|
|
|
|
|
models.append(Upsample(stride)) |
|
|
elif block['type'] == 'route': |
|
|
layers = block['layers'].split(',') |
|
|
ind = len(models) |
|
|
layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers] |
|
|
if len(layers) == 1: |
|
|
prev_filters = out_filters[layers[0]] |
|
|
prev_stride = out_strides[layers[0]] |
|
|
elif len(layers) == 2: |
|
|
assert (layers[0] == ind - 1) |
|
|
prev_filters = out_filters[layers[0]] + out_filters[layers[1]] |
|
|
prev_stride = out_strides[layers[0]] |
|
|
out_filters.append(prev_filters) |
|
|
out_strides.append(prev_stride) |
|
|
models.append(EmptyModule()) |
|
|
elif block['type'] == 'shortcut': |
|
|
ind = len(models) |
|
|
prev_filters = out_filters[ind - 1] |
|
|
out_filters.append(prev_filters) |
|
|
prev_stride = out_strides[ind - 1] |
|
|
out_strides.append(prev_stride) |
|
|
models.append(EmptyModule()) |
|
|
elif block['type'] == 'connected': |
|
|
filters = int(block['output']) |
|
|
if block['activation'] == 'linear': |
|
|
model = nn.Linear(prev_filters, filters) |
|
|
elif block['activation'] == 'leaky': |
|
|
model = nn.Sequential( |
|
|
nn.Linear(prev_filters, filters), |
|
|
nn.LeakyReLU(0.1, inplace=True)) |
|
|
elif block['activation'] == 'relu': |
|
|
model = nn.Sequential( |
|
|
nn.Linear(prev_filters, filters), |
|
|
nn.ReLU(inplace=True)) |
|
|
prev_filters = filters |
|
|
out_filters.append(prev_filters) |
|
|
out_strides.append(prev_stride) |
|
|
models.append(model) |
|
|
elif block['type'] == 'region': |
|
|
region_layer = RegionLayer(use_cuda=self.use_cuda) |
|
|
anchors = block['anchors'].split(',') |
|
|
region_layer.anchors = [float(i) for i in anchors] |
|
|
region_layer.num_classes = int(block['classes']) |
|
|
region_layer.num_anchors = int(block['num']) |
|
|
region_layer.anchor_step = len(region_layer.anchors) // region_layer.num_anchors |
|
|
region_layer.rescore = int(block['rescore']) |
|
|
region_layer.object_scale = float(block['object_scale']) |
|
|
region_layer.noobject_scale = float(block['noobject_scale']) |
|
|
region_layer.class_scale = float(block['class_scale']) |
|
|
region_layer.coord_scale = float(block['coord_scale']) |
|
|
region_layer.thresh = float(block['thresh']) |
|
|
out_filters.append(prev_filters) |
|
|
out_strides.append(prev_stride) |
|
|
models.append(region_layer) |
|
|
elif block['type'] == 'yolo': |
|
|
yolo_layer = YoloLayer(use_cuda=self.use_cuda) |
|
|
anchors = block['anchors'].split(',') |
|
|
anchor_mask = block['mask'].split(',') |
|
|
yolo_layer.anchor_mask = [int(i) for i in anchor_mask] |
|
|
yolo_layer.anchors = [float(i) for i in anchors] |
|
|
yolo_layer.num_classes = int(block['classes']) |
|
|
yolo_layer.num_anchors = int(block['num']) |
|
|
yolo_layer.anchor_step = len(yolo_layer.anchors) // yolo_layer.num_anchors |
|
|
try: |
|
|
yolo_layer.rescore = int(block['rescore']) |
|
|
except: |
|
|
pass |
|
|
yolo_layer.ignore_thresh = float(block['ignore_thresh']) |
|
|
yolo_layer.truth_thresh = float(block['truth_thresh']) |
|
|
yolo_layer.stride = prev_stride |
|
|
yolo_layer.nth_layer = ind |
|
|
yolo_layer.net_width = self.width |
|
|
yolo_layer.net_height = self.height |
|
|
out_filters.append(prev_filters) |
|
|
out_strides.append(prev_stride) |
|
|
models.append(yolo_layer) |
|
|
else: |
|
|
print('unknown type %s' % (block['type'])) |
|
|
|
|
|
return models |
|
|
|
|
|
def load_binfile(self, weightfile): |
|
|
fp = open(weightfile, 'rb') |
|
|
|
|
|
version = np.fromfile(fp, count=3, dtype=np.int32) |
|
|
version = [int(i) for i in version] |
|
|
if version[0] * 10 + version[1] >= 2 and version[0] < 1000 and version[1] < 1000: |
|
|
seen = np.fromfile(fp, count=1, dtype=np.int64) |
|
|
else: |
|
|
seen = np.fromfile(fp, count=1, dtype=np.int32) |
|
|
self.header = torch.from_numpy(np.concatenate((version, seen), axis=0)) |
|
|
self.seen = int(seen) |
|
|
body = np.fromfile(fp, dtype=np.float32) |
|
|
fp.close() |
|
|
return body |
|
|
|
|
|
def load_weights(self, weightfile): |
|
|
buf = self.load_binfile(weightfile) |
|
|
|
|
|
start = 0 |
|
|
ind = -2 |
|
|
for block in self.blocks: |
|
|
if start >= buf.size: |
|
|
break |
|
|
ind = ind + 1 |
|
|
if block['type'] == 'net': |
|
|
continue |
|
|
elif block['type'] == 'convolutional': |
|
|
model = self.models[ind] |
|
|
batch_normalize = int(block['batch_normalize']) |
|
|
if batch_normalize: |
|
|
start = load_conv_bn(buf, start, model[0], model[1]) |
|
|
else: |
|
|
start = load_conv(buf, start, model[0]) |
|
|
elif block['type'] == 'connected': |
|
|
model = self.models[ind] |
|
|
if block['activation'] != 'linear': |
|
|
start = load_fc(buf, start, model[0]) |
|
|
else: |
|
|
start = load_fc(buf, start, model) |
|
|
elif block['type'] == 'maxpool': |
|
|
pass |
|
|
elif block['type'] == 'reorg': |
|
|
pass |
|
|
elif block['type'] == 'upsample': |
|
|
pass |
|
|
elif block['type'] == 'route': |
|
|
pass |
|
|
elif block['type'] == 'shortcut': |
|
|
pass |
|
|
elif block['type'] == 'region': |
|
|
pass |
|
|
elif block['type'] == 'yolo': |
|
|
pass |
|
|
elif block['type'] == 'avgpool': |
|
|
pass |
|
|
elif block['type'] == 'softmax': |
|
|
pass |
|
|
elif block['type'] == 'cost': |
|
|
pass |
|
|
else: |
|
|
print('unknown type %s' % (block['type'])) |
|
|
|
|
|
def save_weights(self, outfile, cutoff=0): |
|
|
if cutoff <= 0: |
|
|
cutoff = len(self.blocks) - 1 |
|
|
|
|
|
fp = open(outfile, 'wb') |
|
|
self.header[3] = self.seen |
|
|
header = np.array(self.header[0:3].numpy(), np.int32) |
|
|
header.tofile(fp) |
|
|
if (self.header[0] * 10 + self.header[1]) >= 2: |
|
|
seen = np.array(self.seen, np.int64) |
|
|
else: |
|
|
seen = np.array(self.seen, np.int32) |
|
|
seen.tofile(fp) |
|
|
|
|
|
ind = -1 |
|
|
for blockId in range(1, cutoff + 1): |
|
|
ind = ind + 1 |
|
|
block = self.blocks[blockId] |
|
|
if block['type'] == 'convolutional': |
|
|
model = self.models[ind] |
|
|
batch_normalize = int(block['batch_normalize']) |
|
|
if batch_normalize: |
|
|
save_conv_bn(fp, model[0], model[1]) |
|
|
else: |
|
|
save_conv(fp, model[0]) |
|
|
elif block['type'] == 'connected': |
|
|
model = self.models[ind] |
|
|
if block['activation'] != 'linear': |
|
|
save_fc(fc, model) |
|
|
else: |
|
|
save_fc(fc, model[0]) |
|
|
elif block['type'] == 'maxpool': |
|
|
pass |
|
|
elif block['type'] == 'reorg': |
|
|
pass |
|
|
elif block['type'] == 'upsample': |
|
|
pass |
|
|
elif block['type'] == 'route': |
|
|
pass |
|
|
elif block['type'] == 'shortcut': |
|
|
pass |
|
|
elif block['type'] == 'region': |
|
|
pass |
|
|
elif block['type'] == 'yolo': |
|
|
pass |
|
|
elif block['type'] == 'avgpool': |
|
|
pass |
|
|
elif block['type'] == 'softmax': |
|
|
pass |
|
|
elif block['type'] == 'cost': |
|
|
pass |
|
|
else: |
|
|
print('unknown type %s' % (block['type'])) |
|
|
fp.close() |
|
|
|