prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = | F.log(gt_height / bbox_height) | megengine.functional.log |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = | F.stack([target_dx, target_dy, target_dw, target_dh], axis=1) | megengine.functional.stack |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = | F.maximum(inter / union, 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shapeof()[0], gt.shapeof()[0])
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K)) + eps
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
union = b_area_box + b_area_gt - inter + eps
overlaps_normal = | F.maximum(inter / union, 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shapeof()[0], gt.shapeof()[0])
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K)) + eps
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
union = b_area_box + b_area_gt - inter + eps
overlaps_normal = F.maximum(inter / union, 0)
overlaps_ignore = | F.maximum(inter / b_area_box, 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shapeof()[0], gt.shapeof()[0])
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K)) + eps
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
union = b_area_box + b_area_gt - inter + eps
overlaps_normal = F.maximum(inter / union, 0)
overlaps_ignore = F.maximum(inter / b_area_box, 0)
overlaps = | F.maximum(inter / union, 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shapeof()[0], gt.shapeof()[0])
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K)) + eps
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
union = b_area_box + b_area_gt - inter + eps
overlaps_normal = F.maximum(inter / union, 0)
overlaps_ignore = F.maximum(inter / b_area_box, 0)
overlaps = F.maximum(inter / union, 0)
# gt_ignore_mask = F.add_axis(F.equal(gt[:, 4], ignore_label), 0).broadcast(*area_target_shape)
ignore_mask = | F.equal(gt[:, 4], ignore_label) | megengine.functional.equal |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shapeof()[0], gt.shapeof()[0])
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K)) + eps
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
union = b_area_box + b_area_gt - inter + eps
overlaps_normal = F.maximum(inter / union, 0)
overlaps_ignore = F.maximum(inter / b_area_box, 0)
overlaps = F.maximum(inter / union, 0)
# gt_ignore_mask = F.add_axis(F.equal(gt[:, 4], ignore_label), 0).broadcast(*area_target_shape)
ignore_mask = F.equal(gt[:, 4], ignore_label)
gt_ignore_mask = | F.expand_dims(ignore_mask, 0) | megengine.functional.expand_dims |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * | F.exp(dw) | megengine.functional.exp |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * | F.exp(dh) | megengine.functional.exp |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to( | F.expand_dims(box, 1) | megengine.functional.expand_dims |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to( | F.expand_dims(gt, 0) | megengine.functional.expand_dims |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = | F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) | megengine.functional.minimum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = | F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) | megengine.functional.minimum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = | F.maximum(iw, 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * | F.maximum(ih, 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = | F.maximum(box[:, 2] - box[:, 0], 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * | F.maximum(box[:, 3] - box[:, 1], 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = | F.maximum(gt[:, 2] - gt[:, 0], 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * | F.maximum(gt[:, 3] - gt[:, 1], 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to( | F.expand_dims(area_box, 1) | megengine.functional.expand_dims |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to( | F.expand_dims(area_gt, 0) | megengine.functional.expand_dims |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to( | F.expand_dims(box, 1) | megengine.functional.expand_dims |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to( | F.expand_dims(gt, 0) | megengine.functional.expand_dims |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = | F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) | megengine.functional.minimum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = | F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) | megengine.functional.minimum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = | F.maximum(iw, 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * | F.maximum(ih, 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = | F.maximum(box[:, 2] - box[:, 0], 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * | F.maximum(box[:, 3] - box[:, 1], 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = | F.maximum(gt[:, 2] - gt[:, 0], 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * | F.maximum(gt[:, 3] - gt[:, 1], 0) | megengine.functional.maximum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shapeof()[0], gt.shapeof()[0])
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K)) + eps
b_area_gt = F.broadcast_to( | F.expand_dims(area_gt, 0) | megengine.functional.expand_dims |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = F.log(gt_width / bbox_width)
target_dh = F.log(gt_height / bbox_height)
target = F.stack([target_dx, target_dy, target_dw, target_dh], axis=1)
return target
def box_overlap_opr(box: Tensor, gt: Tensor) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shape[0], boxes2.shape[0], 4)
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1),(N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_gt = F.expand_dims(gt, 0).broadcast_to(N, K, gt.shape[1])
# b_box = F.expand_dims(boxes1, 1).broadcast(*target_shape)
# b_gt = F.expand_dims(boxes2, 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shape[0], gt.shapeof()[0])
b_area_box = F.broadcast_to(F.expand_dims(area_box, 1), (N, K))
b_area_gt = F.broadcast_to(F.expand_dims(area_gt, 0), (N, K))
# b_area_box = F.expand_dims(area_box, 1).broadcast_to(N, K)
# b_area_gt = F.expand_dims(area_gt, 0).broadcast_to(N, K)
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
union = b_area_box + b_area_gt - inter
overlaps = F.maximum(inter / union, 0)
return overlaps
def box_overlap_ignore_opr(box: Tensor, gt: Tensor, ignore_label=-1) -> Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
# box = boxes1
# gt = boxes2
# target_shape = (boxes1.shapeof()[0], boxes2.shapeof()[0], 4)
eps = 1e-5
N, K = box.shape[0], gt.shape[0]
b_box = F.broadcast_to(F.expand_dims(box, 1), (N, K, box.shape[1]))
b_gt = F.broadcast_to(F.expand_dims(gt, 0), (N, K, gt.shape[1]))
# b_box = F.add_axis(boxes1, 1).broadcast(*target_shape)
# b_gt = F.add_axis(boxes2[:, :4], 0).broadcast(*target_shape)
iw = F.minimum(b_box[:, :, 2], b_gt[:, :, 2]) - F.maximum(
b_box[:, :, 0], b_gt[:, :, 0]
)
ih = F.minimum(b_box[:, :, 3], b_gt[:, :, 3]) - F.maximum(
b_box[:, :, 1], b_gt[:, :, 1]
)
inter = F.maximum(iw, 0) * F.maximum(ih, 0)
area_box = F.maximum(box[:, 2] - box[:, 0], 0) * F.maximum(box[:, 3] - box[:, 1], 0)
area_gt = F.maximum(gt[:, 2] - gt[:, 0], 0) * F.maximum(gt[:, 3] - gt[:, 1], 0)
# area_target_shape = (box.shapeof()[0], gt.shapeof()[0])
# b_area_box = F.add_axis(area_box, 1).broadcast(*area_target_shape)
# b_area_gt = F.add_axis(area_gt, 0).broadcast(*area_target_shape)
b_area_box = F.broadcast_to( | F.expand_dims(area_box, 1) | megengine.functional.expand_dims |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to( | F.expand_dims(rois, 1) | megengine.functional.expand_dims |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from collections import OrderedDict
from enum import Enum
from functools import cmp_to_key
from typing import Set # pylint: disable=unused-import
from typing import Callable, Dict, Sequence
import numpy as np
from megengine import Tensor
from megengine.functional import sqrt
from ..converter_ir.ir_graph import IRGraph
from .ir_op import (
AddOpr,
Conv2dOpr,
ConvRelu2dOpr,
Deconv2dOpr,
DropoutOpr,
ExpOpr,
FlattenOpr,
FuseMulAdd3Opr,
GetSubTensorOpr,
HardSigmoidOpr,
HardSwishOpr,
IdentityOpr,
LeakyReluOpr,
MulOpr,
OpBase,
PadOpr,
ReduceOpr,
ReluOpr,
ReshapeOpr,
ResizeOpr,
SoftmaxOpr,
SqueezeOpr,
SubOpr,
TanHOpr,
TransposeOpr,
TrueDivOpr,
_PoolOpr,
)
from .ir_tensor import AxisOrder, IRTensor
class TransformerRule(Enum):
# general rules
NOPE = 1
# for TFLite
REDUCE_AXIS_AS_INPUT = 100
REMOVE_RESHAPE_INPUT = 101
# FUSE_FOR_RELU6 pass should happen before FUSE_ACTIVATION
FUSE_FOR_RELU6 = 102 ##
EXPAND_CONVRELU = 102.1
CONV_ADD_ZERO_BIAS = 103
FUSE_FOR_CONV_BIAS = 103.1
FUSE_CONV_BN = 104
DECONV_ADD_ZERO_BIAS = 105
# DEPTHWISE_CONV_RESHAPE_WEIGHT requirs RESHAPE_BIAS_TO_1DIM
DEPTHWISE_CONV_RESHAPE_WEIGHT = 106
FUSE_SOFTMAX = 107
# RESHAPE_BIAS_TO_1DIM should happen before DECONV_SHAPE_AS_INPUT
RESHAPE_BIAS_TO_1DIM = 108
DECONV_SHAPE_AS_INPUT = 109
FUSE_ASTYPE = 110 ##
PADDING_FOR_CONV_AND_POOLING = 111
TRANSPOSE_PATTERN_AS_INPUT = 112
# FUSE_FOR_LEAKY_RELU should happen before EXPAND_MUL_ADD3
FUSE_FOR_LEAKY_RELU = 113
EXPAND_MUL_ADD3 = 114
EXPAND_ADD_SIGMOID = 115 ##
FUSE_FOR_DECONV_BIAS = 117
FUSE_FOR_FULLY_CONNECTED = 118 ##
# for TFLite Converter
SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE = 119
RESIZE_PARAMS_AS_INPUT = 120
REPLACE_FLATTEN_TO_RESHAPE = 120.1
# remove reshape
REMOVE_RESHAPE_REALTED_OP = 121
REMOVE_DROPOUT = 122
FUSE_ACTIVATION = 123
REMOVE_IDENTITY = 124
REMOVE_RELU = 125
REMOVE_UNRELATED_IROP = 130
ADD_FAKE_HSIGMOID_OUT = 131
RENAME_CAFFE_LAYER_TENSOR = 132
def cmp_rules(a, b):
if a.value < b.value:
return -1
if a.value > b.value:
return 1
return 0
class IRTransform:
def __init__(self, transformer_options):
if not isinstance(transformer_options, Sequence):
transformer_options = [
transformer_options,
]
# bias of depthwise_conv must be 1 dim
if TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT in transformer_options:
if TransformerRule.RESHAPE_BIAS_TO_1DIM not in transformer_options:
transformer_options.append(TransformerRule.RESHAPE_BIAS_TO_1DIM)
self.trans_options = sorted(transformer_options, key=cmp_to_key(cmp_rules))
def transform(self, ir_graph):
for option in self.trans_options:
TRANSFORMMAP[option](ir_graph)
return ir_graph
TRANSFORMMAP: Dict[Enum, Callable] = {}
def _register_tranformation_rule(transformer_option):
def callback(impl):
TRANSFORMMAP[transformer_option] = impl
return callback
def cal_pad_mode(tm_opr):
out_shape = tm_opr.out_tensors[0].shape
inp_shape = tm_opr.inp_tensors[0].shape
if out_shape[2:] == inp_shape[2:]:
return "SAME"
else:
return "VALID"
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_INPUT)
def _remove_reshape_input(net):
for op in net.all_oprs:
if not isinstance(op, ReshapeOpr):
continue
if len(op.inp_tensors) == 2:
del op.inp_tensors[1]
@_register_tranformation_rule(TransformerRule.TRANSPOSE_PATTERN_AS_INPUT)
def _transpose_pattern_as_input(net):
for op in net.all_oprs:
if not isinstance(op, TransposeOpr):
continue
perm_tensor = IRTensor(
name=op.inp_tensors[0].name + "_perm",
shape=np.array(op.pattern).shape,
dtype=np.int32,
np_data=np.array(op.pattern, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(perm_tensor)
@_register_tranformation_rule(TransformerRule.REDUCE_AXIS_AS_INPUT)
def _reduce_axis_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ReduceOpr):
continue
axis_tensor = IRTensor(
name=op.inp_tensors[0].name + "_axis",
shape=[1],
dtype=np.int32,
np_data=np.array(op.axis, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(axis_tensor)
@_register_tranformation_rule(TransformerRule.PADDING_FOR_CONV_AND_POOLING)
def _make_padding(net: IRGraph):
def have_padding(opr):
if isinstance(opr, Conv2dOpr):
if cal_pad_mode(opr) == "SAME":
return False
if hasattr(opr, "padding") and (opr.padding[0] > 0 or opr.padding[1] > 0):
return True
return False
insert_intended = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, (Conv2dOpr, _PoolOpr)):
continue
if have_padding(op):
assert op.inp_tensors[0].ndim == 4, "ERROR: unsupported padding mode"
np_data = np.array(
[
0,
0,
op.padding[0],
op.padding[0],
op.padding[1],
op.padding[1],
0,
0,
],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
pad_in_tensor = IRTensor(
name=op.inp_tensors[0].name + "_paddings",
shape=[4, 2],
dtype=np.int32,
owner_opr=None,
np_data=np_data,
q_type=np.int32,
axis=None,
)
net.add_tensor(new_tensor_id, pad_in_tensor)
shape = list(op.inp_tensors[0].shape)
new_tensor_id = max(net._tensor_ids) + 1
pad_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_pad_out",
shape=[
shape[0],
shape[1],
shape[2] + op.padding[0] * 2,
shape[3] + op.padding[1] * 2,
],
dtype=op.inp_tensors[0].dtype,
)
if (
hasattr(op.inp_tensors[0], "scale")
and op.inp_tensors[0].scale is not None
):
pad_out_tensor.scale = op.inp_tensors[0].scale
pad_out_tensor.q_dtype = op.inp_tensors[0].q_dtype
if hasattr(op.inp_tensors[0], "zero_point"):
pad_out_tensor.zero_point = op.inp_tensors[0].zero_point
net.add_tensor(new_tensor_id, pad_out_tensor)
pad_opr = PadOpr()
pad_opr.inp_tensors = [op.inp_tensors[0], pad_in_tensor]
index = op.inp_tensors[0].user_opr.index(op)
op.inp_tensors[0].user_opr[index] = pad_opr
pad_opr.out_tensors = [pad_out_tensor]
pad_out_tensor.owner_opr = pad_opr
op.inp_tensors = [pad_out_tensor] + op.inp_tensors[1:]
pad_out_tensor.user_opr.append(op)
index = net._opr_ids.index(id(op))
insert_intended[index] = (id(pad_opr), pad_opr)
for index, generated_pair in list(insert_intended.items())[::-1]:
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.DECONV_SHAPE_AS_INPUT)
def _deconv_shape_as_input(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
result_shape = op.out_tensors[0].shape
np_data = np.array(
[result_shape[0], result_shape[2], result_shape[3], result_shape[1],],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
shape_symvar = IRTensor(
name=op.inp_tensors[0].name + "_deconv_out_shape",
shape=[4],
dtype=np.int32,
owner_opr=op,
np_data=np_data,
q_type=np.int32,
axis=None,
)
shape_tensor = net.get_tensor(new_tensor_id, shape_symvar)
if len(op.inp_tensors) == 2:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
]
else:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
op.inp_tensors[2],
]
@_register_tranformation_rule(TransformerRule.RESIZE_PARAMS_AS_INPUT)
def _resize_params_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ResizeOpr):
continue
if len(op.inp_tensors) == 2:
continue
out_size_tensor = IRTensor(
name=op.inp_tensors[0].name + "_out_size",
shape=(2,),
dtype=np.int32,
np_data=np.array(op.out_size, dtype=np.int32),
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(out_size_tensor)
@_register_tranformation_rule(TransformerRule.CONV_ADD_ZERO_BIAS)
def _add_bias_for_conv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[0]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[1]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.DECONV_ADD_ZERO_BIAS)
def _add_bias_for_deconv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[1]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[2]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.RESHAPE_BIAS_TO_1DIM)
def _reshape_bias_to_1dim(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, (Deconv2dOpr, Conv2dOpr)):
continue
if len(op.inp_tensors) == 2:
continue
bias = op.inp_tensors[2]
if bias.ndim == 4:
bias.shape = (bias.shape[1],)
bias.np_data = bias.np_data.reshape(-1)
@_register_tranformation_rule(TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT)
def _depthwise_conv_reshape_weight(net: IRGraph):
# general group conv is not supported for TFLite
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if op.groups == 1:
continue
weight = op.inp_tensors[1] # G, oc/G, ic/G, kh, kw
ic, cm = weight.shape[1] * op.groups, weight.shape[2]
h, w = weight.shape[3:5]
weight.shape = (ic, cm, h, w) # oc, ic/G, kh, kw
weight.np_data = weight.np_data.reshape(ic, cm, h, w)
@_register_tranformation_rule(TransformerRule.FUSE_ACTIVATION)
def _fuse_activation(net):
delete_intended = []
for op_id, op in zip(net._opr_ids, net.all_oprs):
if isinstance(op, (ReluOpr, TanHOpr)):
prev_ops = net.find_inp_oprs(op)
if len(prev_ops) == 0:
continue
prev_op = prev_ops[0]
if not isinstance(prev_op, OpBase):
continue
if prev_op.activation != "IDENTITY" or prev_op.name == "Deconv2d":
continue
activation = op.name.upper()
prev_op.activation = activation
prev_op.out_tensors = op.out_tensors
for t in prev_op.out_tensors:
t.owner_opr = prev_op
delete_intended.append(net._opr_ids.index(op_id))
for delete_idx in delete_intended[::-1]:
net.delete_ops(delete_idx)
@_register_tranformation_rule(TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE)
def _make_slice_as_inputs(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, GetSubTensorOpr):
continue
ndim = op.inp_tensors[0].ndim
def make_input(axis, param, init_value):
# make inputs: begin, end and step.
ret = [init_value] * ndim # pylint:disable=cell-var-from-loop
for k, v in zip(axis, param):
ret[k] = v
ret = IRTensor(
name=op.name + "_fake_input", # pylint:disable=cell-var-from-loop
shape=[len(ret)],
dtype=np.int32,
np_data=np.array(ret, dtype=np.int32),
owner_opr=op, # pylint:disable=cell-var-from-loop
q_type=np.int32,
)
return ret
begins_tensor = make_input(op.axis, op.begin_params, 0)
ends_tensor = make_input(op.axis, op.end_params, np.iinfo(np.int32).max)
steps_tensor = make_input(op.axis, op.step_params, 1)
op.inp_tensors = [op.inp_tensors[0], begins_tensor, ends_tensor, steps_tensor]
# TFLite slice do not support squeeze axis, so insert a squeeze opr here.
# infer actual output shape of tflite slice
desired_out_shape = op.out_tensors[0].shape
actual_out_shape = [1] * ndim
idx = 0
for i in range(ndim):
if i in op.squeeze_axis:
continue
actual_out_shape[i] = desired_out_shape[idx]
idx += 1
slice_out_tensor = IRTensor(
name=op.name + "fake_output",
shape=actual_out_shape,
dtype=op.out_tensors[0].dtype,
q_type=op.out_tensors[0].q_dtype,
owner_opr=op,
)
old_out = op.out_tensors
op.out_tensors = [slice_out_tensor]
squeeze = SqueezeOpr(op.squeeze_axis)
squeeze.inp_tensors = [slice_out_tensor]
squeeze.out_tensors = old_out
idx = net._opr_ids.index(id(op)) + 1
net.add_op(squeeze, idx)
# caffe transormer rules
class PatternNode:
def __init__(self, type, is_output=False, const_value=None):
self.op = None
self.type = type
self.inp_oprs = []
self.inp_const = []
self.inp_tensors = []
self.is_output = is_output
self.const_value = const_value
def check_const_value(self, op):
inp_tensors = [v.np_data for v in op.inp_tensors]
for const in self.const_value:
idx = const[0]
if idx == -1:
find = False
for index, v in enumerate(inp_tensors):
if np.array_equal(const[1], v):
find = True
del inp_tensors[index]
break
if not find:
return False
elif not np.array_equal(const[1], inp_tensors[idx]):
return False
return True
get_type = lambda op: type(op).__name__
def match(node, opr):
node_queue = [node]
opr_queue = [opr]
matched_opr = set()
matched_node = set()
while len(node_queue) != 0:
cur_node = node_queue.pop(0)
cur_opr = opr_queue.pop(0)
if cur_node.type != get_type(cur_opr) and cur_node.type != "*" or cur_opr.skip:
return False
if cur_node.op == None:
cur_node.op = cur_opr
if cur_node.const_value != None:
if not cur_node.check_const_value(cur_opr):
return False
elif cur_node.op != cur_opr:
return False
matched_opr.add(cur_opr)
matched_node.add(cur_node)
for i, var in enumerate(cur_opr.inp_tensors):
if var.np_data is not None:
cur_node.inp_const.append([i, var.np_data])
else:
cur_node.inp_tensors.append([i, var])
if len(cur_node.inp_oprs) == 0:
continue
if len(cur_node.inp_oprs) != len(cur_opr.inp_oprs):
return False
for i, j in zip(cur_node.inp_oprs, cur_opr.inp_oprs):
node_queue.append(i)
opr_queue.append(j)
for n in matched_node:
if n.is_output:
continue
for op in n.op.out_oprs:
if op not in matched_opr:
return False
return True
def get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@_register_tranformation_rule(TransformerRule.FUSE_SOFTMAX)
def _fuse_softmax(net: IRGraph):
matches = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, TrueDivOpr):
continue
try:
prev_op = net.find_inp_oprs(op)[1]
cur_index = net._opr_ids.index(id(op))
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "SUM"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 1
):
continue
prev_op = net.find_inp_oprs(op)[0]
if (
not isinstance(prev_op, ExpOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 2
):
continue
prev_op = net.find_inp_oprs(prev_op)[0]
if (
not isinstance(prev_op, SubOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 3
):
continue
prev_op = net.find_inp_oprs(prev_op)[1]
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "MAX"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 4
):
continue
except IndexError: # doesn't match
continue
softmax_opr = SoftmaxOpr(axis=get_softmax_axis(prev_op.inp_tensors[0].ndim))
softmax_opr.beta = 1
softmax_opr.inp_tensors = prev_op.inp_tensors[:1]
for i in softmax_opr.inp_tensors:
i.user_opr.append(softmax_opr)
softmax_opr.out_tensors = op.out_tensors
softmax_out_oprs = net.find_out_oprs(op)
matches[id(prev_op)] = (id(prev_op), softmax_opr, softmax_out_oprs)
for original_id, generated_pair in list(matches.items())[::-1]:
index = net._opr_ids.index(original_id)
for out_op in generated_pair[2]:
generated_pair[1].out_tensors[0].user_opr.append(out_op)
del net._opr_ids[index : index + 5]
del net.all_oprs[index : index + 5]
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.FUSE_FOR_LEAKY_RELU)
def _fuse_leaky_relu(net: IRGraph):
"""
Elemwise(ADD) + Elemwise(MUL) + Elemwise(MAX) + Elemwise(MIN) -> LeakyRelu
"""
for opr in net.all_oprs:
if (
opr.name == "Add"
and len(net.find_inp_oprs(opr)) == 2
and net.find_inp_oprs(opr)[0].name == "Max"
and net.find_inp_oprs(opr)[1].name == "Mul"
):
max_op = net.find_inp_oprs(opr)[0]
mul_op = net.find_inp_oprs(opr)[1]
if not mul_op.inp_tensors[1].shape == (1,):
continue
if not max_op.inp_tensors[1].shape == (1,):
continue
if (
len(net.find_inp_oprs(mul_op)) != 1
or net.find_inp_oprs(mul_op)[0].name != "Min"
or net.find_inp_oprs(mul_op)[0].inp_tensors[1].shape != (1,)
):
continue
min_op = net.find_inp_oprs(mul_op)[0]
if not min_op.inp_tensors[1].shape == (1,):
continue
if max_op.inp_tensors[0] != min_op.inp_tensors[0]:
continue
leaky_relu = LeakyReluOpr(
negative_slope=float(mul_op.inp_tensors[1].np_data)
)
leaky_relu.inp_tensors = [max_op.inp_tensors[0]]
max_op.inp_tensors[0].user_opr.remove(max_op)
max_op.inp_tensors[0].user_opr.remove(min_op)
max_op.inp_tensors[0].user_opr.append(leaky_relu)
leaky_relu.out_tensors = opr.out_tensors
opr.out_tensors[0].owner_opr = leaky_relu
index = net.all_oprs.index(max_op)
del net.all_oprs[index : index + 4]
del net._opr_ids[index : index + 4]
net.add_op(leaky_relu, index)
@_register_tranformation_rule(TransformerRule.FUSE_FOR_CONV_BIAS)
def _fuse_for_conv_bias(net: IRGraph):
"""
ConvolutionForward + Elemwise(ADD) -> ConvForwardBias
"""
for opr in net.all_oprs:
if (
opr.name == "Conv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 2:
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[2].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[2].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.FUSE_FOR_DECONV_BIAS)
def _fuse_for_deconv_bias(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "Deconv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 3: # shape, weight, input, bias
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[3].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[3].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.EXPAND_MUL_ADD3)
def _expand_mul_add3(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, FuseMulAdd3Opr):
continue
last_op = net.find_inp_oprs(op)
assert len(last_op) == 1
mul_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_mul_out",
shape=op.inp_tensors[0].shape,
dtype=op.inp_tensors[0].dtype,
)
new_tensor_id = max(net._tensor_ids) + 1
net.add_tensor(new_tensor_id, mul_out_tensor)
mul_op = MulOpr()
mul_out_tensor.owner_opr = mul_op
mul_op.inp_tensors = op.inp_tensors[:2]
for o in mul_op.inp_tensors:
index = o.user_opr.index(op)
o.user_opr[index] = mul_op
mul_op.out_tensors = [mul_out_tensor]
add_op = AddOpr()
add_op.inp_tensors = [mul_out_tensor, op.inp_tensors[2]]
mul_out_tensor.user_opr.append(add_op)
add_op.out_tensors = op.out_tensors
index = net._opr_ids.index(id(op))
net.delete_ops(index)
net.add_op(mul_op, index)
net.add_op(add_op, index + 1)
@_register_tranformation_rule(TransformerRule.REPLACE_FLATTEN_TO_RESHAPE)
def _replace_flatten_to_reshape(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, FlattenOpr):
out_shape = tuple(list(opr.inp_tensors[0].shape[: opr.start_axis]) + [-1])
reshape_op = ReshapeOpr(out_shape=out_shape)
reshape_op.inp_tensors = opr.inp_tensors
for t in reshape_op.inp_tensors:
idx = t.user_opr.index(opr)
t.user_opr[idx] = reshape_op
reshape_op.out_tensors = opr.out_tensors
for t in reshape_op.out_tensors:
t.owner_opr = reshape_op
net.replace_op(opr, reshape_op)
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_REALTED_OP)
def _remove_reshape_tensors(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, ReshapeOpr) and len(opr.inp_tensors) > 1:
opr.inp_tensors = opr.inp_tensors[:1]
@_register_tranformation_rule(TransformerRule.REMOVE_DROPOUT)
def _remove_dropout(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
@_register_tranformation_rule(TransformerRule.REMOVE_RELU)
def _remove_relu(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, ReluOpr):
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, ReluOpr):
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
visited_tensor = set() # type: set
def _dfs_recursive(op_set, tensor):
owner_opr = tensor.owner_opr
op_set.add(owner_opr)
if tensor in visited_tensor:
return
visited_tensor.add(tensor)
if isinstance(owner_opr, IRGraph) or owner_opr is None:
return
for tt in owner_opr.inp_tensors:
_dfs_recursive(op_set, tt)
@_register_tranformation_rule(TransformerRule.REMOVE_UNRELATED_IROP)
def _remove_unrelated_op(net: IRGraph):
match_sets = set() # type: Set[OpBase]
for out_tensor in net.graph_outputs:
_dfs_recursive(match_sets, out_tensor)
remove_idx = []
for opr in net.all_oprs:
if opr not in match_sets:
index = net._opr_ids.index(id(opr))
remove_idx.append(index)
for i in remove_idx[::-1]:
net.delete_ops(i)
@_register_tranformation_rule(TransformerRule.ADD_FAKE_HSIGMOID_OUT)
def _add_fake_hsigmoid_tensor(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, (HardSwishOpr, HardSigmoidOpr)):
add_3_out_tensor = IRTensor(
opr.out_tensors[0].name + "_fake_add3_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(add_3_out_tensor)
relu6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_relu6_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(relu6_out_tensor)
if isinstance(opr, HardSwishOpr):
div6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_div_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(div6_out_tensor)
def fold_conv_bn(
conv_weight, conv_bias, conv_groups, gamma, beta, bn_mean, bn_var, eps
):
conv_bias = conv_bias.reshape(1, -1, 1, 1)
gamma = gamma.reshape(1, -1, 1, 1)
beta = beta.reshape(1, -1, 1, 1)
bn_mean = bn_mean.reshape(1, -1, 1, 1)
bn_var = bn_var.reshape(1, -1, 1, 1)
# bn_istd = 1 / bn_std
bn_istd = 1.0 / | sqrt(bn_var + eps) | megengine.functional.sqrt |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from collections import OrderedDict
from enum import Enum
from functools import cmp_to_key
from typing import Set # pylint: disable=unused-import
from typing import Callable, Dict, Sequence
import numpy as np
from megengine import Tensor
from megengine.functional import sqrt
from ..converter_ir.ir_graph import IRGraph
from .ir_op import (
AddOpr,
Conv2dOpr,
ConvRelu2dOpr,
Deconv2dOpr,
DropoutOpr,
ExpOpr,
FlattenOpr,
FuseMulAdd3Opr,
GetSubTensorOpr,
HardSigmoidOpr,
HardSwishOpr,
IdentityOpr,
LeakyReluOpr,
MulOpr,
OpBase,
PadOpr,
ReduceOpr,
ReluOpr,
ReshapeOpr,
ResizeOpr,
SoftmaxOpr,
SqueezeOpr,
SubOpr,
TanHOpr,
TransposeOpr,
TrueDivOpr,
_PoolOpr,
)
from .ir_tensor import AxisOrder, IRTensor
class TransformerRule(Enum):
# general rules
NOPE = 1
# for TFLite
REDUCE_AXIS_AS_INPUT = 100
REMOVE_RESHAPE_INPUT = 101
# FUSE_FOR_RELU6 pass should happen before FUSE_ACTIVATION
FUSE_FOR_RELU6 = 102 ##
EXPAND_CONVRELU = 102.1
CONV_ADD_ZERO_BIAS = 103
FUSE_FOR_CONV_BIAS = 103.1
FUSE_CONV_BN = 104
DECONV_ADD_ZERO_BIAS = 105
# DEPTHWISE_CONV_RESHAPE_WEIGHT requirs RESHAPE_BIAS_TO_1DIM
DEPTHWISE_CONV_RESHAPE_WEIGHT = 106
FUSE_SOFTMAX = 107
# RESHAPE_BIAS_TO_1DIM should happen before DECONV_SHAPE_AS_INPUT
RESHAPE_BIAS_TO_1DIM = 108
DECONV_SHAPE_AS_INPUT = 109
FUSE_ASTYPE = 110 ##
PADDING_FOR_CONV_AND_POOLING = 111
TRANSPOSE_PATTERN_AS_INPUT = 112
# FUSE_FOR_LEAKY_RELU should happen before EXPAND_MUL_ADD3
FUSE_FOR_LEAKY_RELU = 113
EXPAND_MUL_ADD3 = 114
EXPAND_ADD_SIGMOID = 115 ##
FUSE_FOR_DECONV_BIAS = 117
FUSE_FOR_FULLY_CONNECTED = 118 ##
# for TFLite Converter
SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE = 119
RESIZE_PARAMS_AS_INPUT = 120
REPLACE_FLATTEN_TO_RESHAPE = 120.1
# remove reshape
REMOVE_RESHAPE_REALTED_OP = 121
REMOVE_DROPOUT = 122
FUSE_ACTIVATION = 123
REMOVE_IDENTITY = 124
REMOVE_RELU = 125
REMOVE_UNRELATED_IROP = 130
ADD_FAKE_HSIGMOID_OUT = 131
RENAME_CAFFE_LAYER_TENSOR = 132
def cmp_rules(a, b):
if a.value < b.value:
return -1
if a.value > b.value:
return 1
return 0
class IRTransform:
def __init__(self, transformer_options):
if not isinstance(transformer_options, Sequence):
transformer_options = [
transformer_options,
]
# bias of depthwise_conv must be 1 dim
if TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT in transformer_options:
if TransformerRule.RESHAPE_BIAS_TO_1DIM not in transformer_options:
transformer_options.append(TransformerRule.RESHAPE_BIAS_TO_1DIM)
self.trans_options = sorted(transformer_options, key=cmp_to_key(cmp_rules))
def transform(self, ir_graph):
for option in self.trans_options:
TRANSFORMMAP[option](ir_graph)
return ir_graph
TRANSFORMMAP: Dict[Enum, Callable] = {}
def _register_tranformation_rule(transformer_option):
def callback(impl):
TRANSFORMMAP[transformer_option] = impl
return callback
def cal_pad_mode(tm_opr):
out_shape = tm_opr.out_tensors[0].shape
inp_shape = tm_opr.inp_tensors[0].shape
if out_shape[2:] == inp_shape[2:]:
return "SAME"
else:
return "VALID"
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_INPUT)
def _remove_reshape_input(net):
for op in net.all_oprs:
if not isinstance(op, ReshapeOpr):
continue
if len(op.inp_tensors) == 2:
del op.inp_tensors[1]
@_register_tranformation_rule(TransformerRule.TRANSPOSE_PATTERN_AS_INPUT)
def _transpose_pattern_as_input(net):
for op in net.all_oprs:
if not isinstance(op, TransposeOpr):
continue
perm_tensor = IRTensor(
name=op.inp_tensors[0].name + "_perm",
shape=np.array(op.pattern).shape,
dtype=np.int32,
np_data=np.array(op.pattern, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(perm_tensor)
@_register_tranformation_rule(TransformerRule.REDUCE_AXIS_AS_INPUT)
def _reduce_axis_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ReduceOpr):
continue
axis_tensor = IRTensor(
name=op.inp_tensors[0].name + "_axis",
shape=[1],
dtype=np.int32,
np_data=np.array(op.axis, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(axis_tensor)
@_register_tranformation_rule(TransformerRule.PADDING_FOR_CONV_AND_POOLING)
def _make_padding(net: IRGraph):
def have_padding(opr):
if isinstance(opr, Conv2dOpr):
if cal_pad_mode(opr) == "SAME":
return False
if hasattr(opr, "padding") and (opr.padding[0] > 0 or opr.padding[1] > 0):
return True
return False
insert_intended = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, (Conv2dOpr, _PoolOpr)):
continue
if have_padding(op):
assert op.inp_tensors[0].ndim == 4, "ERROR: unsupported padding mode"
np_data = np.array(
[
0,
0,
op.padding[0],
op.padding[0],
op.padding[1],
op.padding[1],
0,
0,
],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
pad_in_tensor = IRTensor(
name=op.inp_tensors[0].name + "_paddings",
shape=[4, 2],
dtype=np.int32,
owner_opr=None,
np_data=np_data,
q_type=np.int32,
axis=None,
)
net.add_tensor(new_tensor_id, pad_in_tensor)
shape = list(op.inp_tensors[0].shape)
new_tensor_id = max(net._tensor_ids) + 1
pad_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_pad_out",
shape=[
shape[0],
shape[1],
shape[2] + op.padding[0] * 2,
shape[3] + op.padding[1] * 2,
],
dtype=op.inp_tensors[0].dtype,
)
if (
hasattr(op.inp_tensors[0], "scale")
and op.inp_tensors[0].scale is not None
):
pad_out_tensor.scale = op.inp_tensors[0].scale
pad_out_tensor.q_dtype = op.inp_tensors[0].q_dtype
if hasattr(op.inp_tensors[0], "zero_point"):
pad_out_tensor.zero_point = op.inp_tensors[0].zero_point
net.add_tensor(new_tensor_id, pad_out_tensor)
pad_opr = PadOpr()
pad_opr.inp_tensors = [op.inp_tensors[0], pad_in_tensor]
index = op.inp_tensors[0].user_opr.index(op)
op.inp_tensors[0].user_opr[index] = pad_opr
pad_opr.out_tensors = [pad_out_tensor]
pad_out_tensor.owner_opr = pad_opr
op.inp_tensors = [pad_out_tensor] + op.inp_tensors[1:]
pad_out_tensor.user_opr.append(op)
index = net._opr_ids.index(id(op))
insert_intended[index] = (id(pad_opr), pad_opr)
for index, generated_pair in list(insert_intended.items())[::-1]:
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.DECONV_SHAPE_AS_INPUT)
def _deconv_shape_as_input(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
result_shape = op.out_tensors[0].shape
np_data = np.array(
[result_shape[0], result_shape[2], result_shape[3], result_shape[1],],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
shape_symvar = IRTensor(
name=op.inp_tensors[0].name + "_deconv_out_shape",
shape=[4],
dtype=np.int32,
owner_opr=op,
np_data=np_data,
q_type=np.int32,
axis=None,
)
shape_tensor = net.get_tensor(new_tensor_id, shape_symvar)
if len(op.inp_tensors) == 2:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
]
else:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
op.inp_tensors[2],
]
@_register_tranformation_rule(TransformerRule.RESIZE_PARAMS_AS_INPUT)
def _resize_params_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ResizeOpr):
continue
if len(op.inp_tensors) == 2:
continue
out_size_tensor = IRTensor(
name=op.inp_tensors[0].name + "_out_size",
shape=(2,),
dtype=np.int32,
np_data=np.array(op.out_size, dtype=np.int32),
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(out_size_tensor)
@_register_tranformation_rule(TransformerRule.CONV_ADD_ZERO_BIAS)
def _add_bias_for_conv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[0]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[1]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.DECONV_ADD_ZERO_BIAS)
def _add_bias_for_deconv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[1]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[2]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.RESHAPE_BIAS_TO_1DIM)
def _reshape_bias_to_1dim(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, (Deconv2dOpr, Conv2dOpr)):
continue
if len(op.inp_tensors) == 2:
continue
bias = op.inp_tensors[2]
if bias.ndim == 4:
bias.shape = (bias.shape[1],)
bias.np_data = bias.np_data.reshape(-1)
@_register_tranformation_rule(TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT)
def _depthwise_conv_reshape_weight(net: IRGraph):
# general group conv is not supported for TFLite
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if op.groups == 1:
continue
weight = op.inp_tensors[1] # G, oc/G, ic/G, kh, kw
ic, cm = weight.shape[1] * op.groups, weight.shape[2]
h, w = weight.shape[3:5]
weight.shape = (ic, cm, h, w) # oc, ic/G, kh, kw
weight.np_data = weight.np_data.reshape(ic, cm, h, w)
@_register_tranformation_rule(TransformerRule.FUSE_ACTIVATION)
def _fuse_activation(net):
delete_intended = []
for op_id, op in zip(net._opr_ids, net.all_oprs):
if isinstance(op, (ReluOpr, TanHOpr)):
prev_ops = net.find_inp_oprs(op)
if len(prev_ops) == 0:
continue
prev_op = prev_ops[0]
if not isinstance(prev_op, OpBase):
continue
if prev_op.activation != "IDENTITY" or prev_op.name == "Deconv2d":
continue
activation = op.name.upper()
prev_op.activation = activation
prev_op.out_tensors = op.out_tensors
for t in prev_op.out_tensors:
t.owner_opr = prev_op
delete_intended.append(net._opr_ids.index(op_id))
for delete_idx in delete_intended[::-1]:
net.delete_ops(delete_idx)
@_register_tranformation_rule(TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE)
def _make_slice_as_inputs(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, GetSubTensorOpr):
continue
ndim = op.inp_tensors[0].ndim
def make_input(axis, param, init_value):
# make inputs: begin, end and step.
ret = [init_value] * ndim # pylint:disable=cell-var-from-loop
for k, v in zip(axis, param):
ret[k] = v
ret = IRTensor(
name=op.name + "_fake_input", # pylint:disable=cell-var-from-loop
shape=[len(ret)],
dtype=np.int32,
np_data=np.array(ret, dtype=np.int32),
owner_opr=op, # pylint:disable=cell-var-from-loop
q_type=np.int32,
)
return ret
begins_tensor = make_input(op.axis, op.begin_params, 0)
ends_tensor = make_input(op.axis, op.end_params, np.iinfo(np.int32).max)
steps_tensor = make_input(op.axis, op.step_params, 1)
op.inp_tensors = [op.inp_tensors[0], begins_tensor, ends_tensor, steps_tensor]
# TFLite slice do not support squeeze axis, so insert a squeeze opr here.
# infer actual output shape of tflite slice
desired_out_shape = op.out_tensors[0].shape
actual_out_shape = [1] * ndim
idx = 0
for i in range(ndim):
if i in op.squeeze_axis:
continue
actual_out_shape[i] = desired_out_shape[idx]
idx += 1
slice_out_tensor = IRTensor(
name=op.name + "fake_output",
shape=actual_out_shape,
dtype=op.out_tensors[0].dtype,
q_type=op.out_tensors[0].q_dtype,
owner_opr=op,
)
old_out = op.out_tensors
op.out_tensors = [slice_out_tensor]
squeeze = SqueezeOpr(op.squeeze_axis)
squeeze.inp_tensors = [slice_out_tensor]
squeeze.out_tensors = old_out
idx = net._opr_ids.index(id(op)) + 1
net.add_op(squeeze, idx)
# caffe transormer rules
class PatternNode:
def __init__(self, type, is_output=False, const_value=None):
self.op = None
self.type = type
self.inp_oprs = []
self.inp_const = []
self.inp_tensors = []
self.is_output = is_output
self.const_value = const_value
def check_const_value(self, op):
inp_tensors = [v.np_data for v in op.inp_tensors]
for const in self.const_value:
idx = const[0]
if idx == -1:
find = False
for index, v in enumerate(inp_tensors):
if np.array_equal(const[1], v):
find = True
del inp_tensors[index]
break
if not find:
return False
elif not np.array_equal(const[1], inp_tensors[idx]):
return False
return True
get_type = lambda op: type(op).__name__
def match(node, opr):
node_queue = [node]
opr_queue = [opr]
matched_opr = set()
matched_node = set()
while len(node_queue) != 0:
cur_node = node_queue.pop(0)
cur_opr = opr_queue.pop(0)
if cur_node.type != get_type(cur_opr) and cur_node.type != "*" or cur_opr.skip:
return False
if cur_node.op == None:
cur_node.op = cur_opr
if cur_node.const_value != None:
if not cur_node.check_const_value(cur_opr):
return False
elif cur_node.op != cur_opr:
return False
matched_opr.add(cur_opr)
matched_node.add(cur_node)
for i, var in enumerate(cur_opr.inp_tensors):
if var.np_data is not None:
cur_node.inp_const.append([i, var.np_data])
else:
cur_node.inp_tensors.append([i, var])
if len(cur_node.inp_oprs) == 0:
continue
if len(cur_node.inp_oprs) != len(cur_opr.inp_oprs):
return False
for i, j in zip(cur_node.inp_oprs, cur_opr.inp_oprs):
node_queue.append(i)
opr_queue.append(j)
for n in matched_node:
if n.is_output:
continue
for op in n.op.out_oprs:
if op not in matched_opr:
return False
return True
def get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@_register_tranformation_rule(TransformerRule.FUSE_SOFTMAX)
def _fuse_softmax(net: IRGraph):
matches = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, TrueDivOpr):
continue
try:
prev_op = net.find_inp_oprs(op)[1]
cur_index = net._opr_ids.index(id(op))
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "SUM"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 1
):
continue
prev_op = net.find_inp_oprs(op)[0]
if (
not isinstance(prev_op, ExpOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 2
):
continue
prev_op = net.find_inp_oprs(prev_op)[0]
if (
not isinstance(prev_op, SubOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 3
):
continue
prev_op = net.find_inp_oprs(prev_op)[1]
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "MAX"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 4
):
continue
except IndexError: # doesn't match
continue
softmax_opr = SoftmaxOpr(axis=get_softmax_axis(prev_op.inp_tensors[0].ndim))
softmax_opr.beta = 1
softmax_opr.inp_tensors = prev_op.inp_tensors[:1]
for i in softmax_opr.inp_tensors:
i.user_opr.append(softmax_opr)
softmax_opr.out_tensors = op.out_tensors
softmax_out_oprs = net.find_out_oprs(op)
matches[id(prev_op)] = (id(prev_op), softmax_opr, softmax_out_oprs)
for original_id, generated_pair in list(matches.items())[::-1]:
index = net._opr_ids.index(original_id)
for out_op in generated_pair[2]:
generated_pair[1].out_tensors[0].user_opr.append(out_op)
del net._opr_ids[index : index + 5]
del net.all_oprs[index : index + 5]
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.FUSE_FOR_LEAKY_RELU)
def _fuse_leaky_relu(net: IRGraph):
"""
Elemwise(ADD) + Elemwise(MUL) + Elemwise(MAX) + Elemwise(MIN) -> LeakyRelu
"""
for opr in net.all_oprs:
if (
opr.name == "Add"
and len(net.find_inp_oprs(opr)) == 2
and net.find_inp_oprs(opr)[0].name == "Max"
and net.find_inp_oprs(opr)[1].name == "Mul"
):
max_op = net.find_inp_oprs(opr)[0]
mul_op = net.find_inp_oprs(opr)[1]
if not mul_op.inp_tensors[1].shape == (1,):
continue
if not max_op.inp_tensors[1].shape == (1,):
continue
if (
len(net.find_inp_oprs(mul_op)) != 1
or net.find_inp_oprs(mul_op)[0].name != "Min"
or net.find_inp_oprs(mul_op)[0].inp_tensors[1].shape != (1,)
):
continue
min_op = net.find_inp_oprs(mul_op)[0]
if not min_op.inp_tensors[1].shape == (1,):
continue
if max_op.inp_tensors[0] != min_op.inp_tensors[0]:
continue
leaky_relu = LeakyReluOpr(
negative_slope=float(mul_op.inp_tensors[1].np_data)
)
leaky_relu.inp_tensors = [max_op.inp_tensors[0]]
max_op.inp_tensors[0].user_opr.remove(max_op)
max_op.inp_tensors[0].user_opr.remove(min_op)
max_op.inp_tensors[0].user_opr.append(leaky_relu)
leaky_relu.out_tensors = opr.out_tensors
opr.out_tensors[0].owner_opr = leaky_relu
index = net.all_oprs.index(max_op)
del net.all_oprs[index : index + 4]
del net._opr_ids[index : index + 4]
net.add_op(leaky_relu, index)
@_register_tranformation_rule(TransformerRule.FUSE_FOR_CONV_BIAS)
def _fuse_for_conv_bias(net: IRGraph):
"""
ConvolutionForward + Elemwise(ADD) -> ConvForwardBias
"""
for opr in net.all_oprs:
if (
opr.name == "Conv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 2:
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[2].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[2].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.FUSE_FOR_DECONV_BIAS)
def _fuse_for_deconv_bias(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "Deconv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 3: # shape, weight, input, bias
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[3].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[3].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.EXPAND_MUL_ADD3)
def _expand_mul_add3(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, FuseMulAdd3Opr):
continue
last_op = net.find_inp_oprs(op)
assert len(last_op) == 1
mul_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_mul_out",
shape=op.inp_tensors[0].shape,
dtype=op.inp_tensors[0].dtype,
)
new_tensor_id = max(net._tensor_ids) + 1
net.add_tensor(new_tensor_id, mul_out_tensor)
mul_op = MulOpr()
mul_out_tensor.owner_opr = mul_op
mul_op.inp_tensors = op.inp_tensors[:2]
for o in mul_op.inp_tensors:
index = o.user_opr.index(op)
o.user_opr[index] = mul_op
mul_op.out_tensors = [mul_out_tensor]
add_op = AddOpr()
add_op.inp_tensors = [mul_out_tensor, op.inp_tensors[2]]
mul_out_tensor.user_opr.append(add_op)
add_op.out_tensors = op.out_tensors
index = net._opr_ids.index(id(op))
net.delete_ops(index)
net.add_op(mul_op, index)
net.add_op(add_op, index + 1)
@_register_tranformation_rule(TransformerRule.REPLACE_FLATTEN_TO_RESHAPE)
def _replace_flatten_to_reshape(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, FlattenOpr):
out_shape = tuple(list(opr.inp_tensors[0].shape[: opr.start_axis]) + [-1])
reshape_op = ReshapeOpr(out_shape=out_shape)
reshape_op.inp_tensors = opr.inp_tensors
for t in reshape_op.inp_tensors:
idx = t.user_opr.index(opr)
t.user_opr[idx] = reshape_op
reshape_op.out_tensors = opr.out_tensors
for t in reshape_op.out_tensors:
t.owner_opr = reshape_op
net.replace_op(opr, reshape_op)
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_REALTED_OP)
def _remove_reshape_tensors(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, ReshapeOpr) and len(opr.inp_tensors) > 1:
opr.inp_tensors = opr.inp_tensors[:1]
@_register_tranformation_rule(TransformerRule.REMOVE_DROPOUT)
def _remove_dropout(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
@_register_tranformation_rule(TransformerRule.REMOVE_RELU)
def _remove_relu(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, ReluOpr):
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, ReluOpr):
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
visited_tensor = set() # type: set
def _dfs_recursive(op_set, tensor):
owner_opr = tensor.owner_opr
op_set.add(owner_opr)
if tensor in visited_tensor:
return
visited_tensor.add(tensor)
if isinstance(owner_opr, IRGraph) or owner_opr is None:
return
for tt in owner_opr.inp_tensors:
_dfs_recursive(op_set, tt)
@_register_tranformation_rule(TransformerRule.REMOVE_UNRELATED_IROP)
def _remove_unrelated_op(net: IRGraph):
match_sets = set() # type: Set[OpBase]
for out_tensor in net.graph_outputs:
_dfs_recursive(match_sets, out_tensor)
remove_idx = []
for opr in net.all_oprs:
if opr not in match_sets:
index = net._opr_ids.index(id(opr))
remove_idx.append(index)
for i in remove_idx[::-1]:
net.delete_ops(i)
@_register_tranformation_rule(TransformerRule.ADD_FAKE_HSIGMOID_OUT)
def _add_fake_hsigmoid_tensor(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, (HardSwishOpr, HardSigmoidOpr)):
add_3_out_tensor = IRTensor(
opr.out_tensors[0].name + "_fake_add3_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(add_3_out_tensor)
relu6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_relu6_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(relu6_out_tensor)
if isinstance(opr, HardSwishOpr):
div6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_div_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(div6_out_tensor)
def fold_conv_bn(
conv_weight, conv_bias, conv_groups, gamma, beta, bn_mean, bn_var, eps
):
conv_bias = conv_bias.reshape(1, -1, 1, 1)
gamma = gamma.reshape(1, -1, 1, 1)
beta = beta.reshape(1, -1, 1, 1)
bn_mean = bn_mean.reshape(1, -1, 1, 1)
bn_var = bn_var.reshape(1, -1, 1, 1)
# bn_istd = 1 / bn_std
bn_istd = 1.0 / sqrt(bn_var + eps) # type: ignore[attr-defined]
# w_fold = gamma / bn_std * W
scale_factor = gamma * bn_istd
if conv_groups == 1:
w_fold = conv_weight * scale_factor.reshape(-1, 1, 1, 1)
else:
w_fold = conv_weight * scale_factor.reshape(conv_groups, -1, 1, 1, 1)
# b_fold = gamma * (b - bn_mean) / bn_std + beta
b_fold = beta + gamma * (conv_bias - bn_mean) * bn_istd
return w_fold, b_fold
@_register_tranformation_rule(TransformerRule.FUSE_CONV_BN)
def _fuse_conv_bn(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "BatchNormalization"
and len(net.find_inp_oprs(opr)) == 1
and net.find_inp_oprs(opr)[0].name == "Conv2d"
and len(net.find_out_oprs(net.find_inp_oprs(opr)[0])) == 1
and net.find_out_oprs(net.find_inp_oprs(opr)[0])[0] == opr
):
gamma = (
| Tensor(opr.weight) | megengine.Tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from collections import OrderedDict
from enum import Enum
from functools import cmp_to_key
from typing import Set # pylint: disable=unused-import
from typing import Callable, Dict, Sequence
import numpy as np
from megengine import Tensor
from megengine.functional import sqrt
from ..converter_ir.ir_graph import IRGraph
from .ir_op import (
AddOpr,
Conv2dOpr,
ConvRelu2dOpr,
Deconv2dOpr,
DropoutOpr,
ExpOpr,
FlattenOpr,
FuseMulAdd3Opr,
GetSubTensorOpr,
HardSigmoidOpr,
HardSwishOpr,
IdentityOpr,
LeakyReluOpr,
MulOpr,
OpBase,
PadOpr,
ReduceOpr,
ReluOpr,
ReshapeOpr,
ResizeOpr,
SoftmaxOpr,
SqueezeOpr,
SubOpr,
TanHOpr,
TransposeOpr,
TrueDivOpr,
_PoolOpr,
)
from .ir_tensor import AxisOrder, IRTensor
class TransformerRule(Enum):
# general rules
NOPE = 1
# for TFLite
REDUCE_AXIS_AS_INPUT = 100
REMOVE_RESHAPE_INPUT = 101
# FUSE_FOR_RELU6 pass should happen before FUSE_ACTIVATION
FUSE_FOR_RELU6 = 102 ##
EXPAND_CONVRELU = 102.1
CONV_ADD_ZERO_BIAS = 103
FUSE_FOR_CONV_BIAS = 103.1
FUSE_CONV_BN = 104
DECONV_ADD_ZERO_BIAS = 105
# DEPTHWISE_CONV_RESHAPE_WEIGHT requirs RESHAPE_BIAS_TO_1DIM
DEPTHWISE_CONV_RESHAPE_WEIGHT = 106
FUSE_SOFTMAX = 107
# RESHAPE_BIAS_TO_1DIM should happen before DECONV_SHAPE_AS_INPUT
RESHAPE_BIAS_TO_1DIM = 108
DECONV_SHAPE_AS_INPUT = 109
FUSE_ASTYPE = 110 ##
PADDING_FOR_CONV_AND_POOLING = 111
TRANSPOSE_PATTERN_AS_INPUT = 112
# FUSE_FOR_LEAKY_RELU should happen before EXPAND_MUL_ADD3
FUSE_FOR_LEAKY_RELU = 113
EXPAND_MUL_ADD3 = 114
EXPAND_ADD_SIGMOID = 115 ##
FUSE_FOR_DECONV_BIAS = 117
FUSE_FOR_FULLY_CONNECTED = 118 ##
# for TFLite Converter
SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE = 119
RESIZE_PARAMS_AS_INPUT = 120
REPLACE_FLATTEN_TO_RESHAPE = 120.1
# remove reshape
REMOVE_RESHAPE_REALTED_OP = 121
REMOVE_DROPOUT = 122
FUSE_ACTIVATION = 123
REMOVE_IDENTITY = 124
REMOVE_RELU = 125
REMOVE_UNRELATED_IROP = 130
ADD_FAKE_HSIGMOID_OUT = 131
RENAME_CAFFE_LAYER_TENSOR = 132
def cmp_rules(a, b):
if a.value < b.value:
return -1
if a.value > b.value:
return 1
return 0
class IRTransform:
def __init__(self, transformer_options):
if not isinstance(transformer_options, Sequence):
transformer_options = [
transformer_options,
]
# bias of depthwise_conv must be 1 dim
if TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT in transformer_options:
if TransformerRule.RESHAPE_BIAS_TO_1DIM not in transformer_options:
transformer_options.append(TransformerRule.RESHAPE_BIAS_TO_1DIM)
self.trans_options = sorted(transformer_options, key=cmp_to_key(cmp_rules))
def transform(self, ir_graph):
for option in self.trans_options:
TRANSFORMMAP[option](ir_graph)
return ir_graph
TRANSFORMMAP: Dict[Enum, Callable] = {}
def _register_tranformation_rule(transformer_option):
def callback(impl):
TRANSFORMMAP[transformer_option] = impl
return callback
def cal_pad_mode(tm_opr):
out_shape = tm_opr.out_tensors[0].shape
inp_shape = tm_opr.inp_tensors[0].shape
if out_shape[2:] == inp_shape[2:]:
return "SAME"
else:
return "VALID"
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_INPUT)
def _remove_reshape_input(net):
for op in net.all_oprs:
if not isinstance(op, ReshapeOpr):
continue
if len(op.inp_tensors) == 2:
del op.inp_tensors[1]
@_register_tranformation_rule(TransformerRule.TRANSPOSE_PATTERN_AS_INPUT)
def _transpose_pattern_as_input(net):
for op in net.all_oprs:
if not isinstance(op, TransposeOpr):
continue
perm_tensor = IRTensor(
name=op.inp_tensors[0].name + "_perm",
shape=np.array(op.pattern).shape,
dtype=np.int32,
np_data=np.array(op.pattern, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(perm_tensor)
@_register_tranformation_rule(TransformerRule.REDUCE_AXIS_AS_INPUT)
def _reduce_axis_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ReduceOpr):
continue
axis_tensor = IRTensor(
name=op.inp_tensors[0].name + "_axis",
shape=[1],
dtype=np.int32,
np_data=np.array(op.axis, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(axis_tensor)
@_register_tranformation_rule(TransformerRule.PADDING_FOR_CONV_AND_POOLING)
def _make_padding(net: IRGraph):
def have_padding(opr):
if isinstance(opr, Conv2dOpr):
if cal_pad_mode(opr) == "SAME":
return False
if hasattr(opr, "padding") and (opr.padding[0] > 0 or opr.padding[1] > 0):
return True
return False
insert_intended = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, (Conv2dOpr, _PoolOpr)):
continue
if have_padding(op):
assert op.inp_tensors[0].ndim == 4, "ERROR: unsupported padding mode"
np_data = np.array(
[
0,
0,
op.padding[0],
op.padding[0],
op.padding[1],
op.padding[1],
0,
0,
],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
pad_in_tensor = IRTensor(
name=op.inp_tensors[0].name + "_paddings",
shape=[4, 2],
dtype=np.int32,
owner_opr=None,
np_data=np_data,
q_type=np.int32,
axis=None,
)
net.add_tensor(new_tensor_id, pad_in_tensor)
shape = list(op.inp_tensors[0].shape)
new_tensor_id = max(net._tensor_ids) + 1
pad_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_pad_out",
shape=[
shape[0],
shape[1],
shape[2] + op.padding[0] * 2,
shape[3] + op.padding[1] * 2,
],
dtype=op.inp_tensors[0].dtype,
)
if (
hasattr(op.inp_tensors[0], "scale")
and op.inp_tensors[0].scale is not None
):
pad_out_tensor.scale = op.inp_tensors[0].scale
pad_out_tensor.q_dtype = op.inp_tensors[0].q_dtype
if hasattr(op.inp_tensors[0], "zero_point"):
pad_out_tensor.zero_point = op.inp_tensors[0].zero_point
net.add_tensor(new_tensor_id, pad_out_tensor)
pad_opr = PadOpr()
pad_opr.inp_tensors = [op.inp_tensors[0], pad_in_tensor]
index = op.inp_tensors[0].user_opr.index(op)
op.inp_tensors[0].user_opr[index] = pad_opr
pad_opr.out_tensors = [pad_out_tensor]
pad_out_tensor.owner_opr = pad_opr
op.inp_tensors = [pad_out_tensor] + op.inp_tensors[1:]
pad_out_tensor.user_opr.append(op)
index = net._opr_ids.index(id(op))
insert_intended[index] = (id(pad_opr), pad_opr)
for index, generated_pair in list(insert_intended.items())[::-1]:
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.DECONV_SHAPE_AS_INPUT)
def _deconv_shape_as_input(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
result_shape = op.out_tensors[0].shape
np_data = np.array(
[result_shape[0], result_shape[2], result_shape[3], result_shape[1],],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
shape_symvar = IRTensor(
name=op.inp_tensors[0].name + "_deconv_out_shape",
shape=[4],
dtype=np.int32,
owner_opr=op,
np_data=np_data,
q_type=np.int32,
axis=None,
)
shape_tensor = net.get_tensor(new_tensor_id, shape_symvar)
if len(op.inp_tensors) == 2:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
]
else:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
op.inp_tensors[2],
]
@_register_tranformation_rule(TransformerRule.RESIZE_PARAMS_AS_INPUT)
def _resize_params_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ResizeOpr):
continue
if len(op.inp_tensors) == 2:
continue
out_size_tensor = IRTensor(
name=op.inp_tensors[0].name + "_out_size",
shape=(2,),
dtype=np.int32,
np_data=np.array(op.out_size, dtype=np.int32),
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(out_size_tensor)
@_register_tranformation_rule(TransformerRule.CONV_ADD_ZERO_BIAS)
def _add_bias_for_conv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[0]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[1]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.DECONV_ADD_ZERO_BIAS)
def _add_bias_for_deconv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[1]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[2]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.RESHAPE_BIAS_TO_1DIM)
def _reshape_bias_to_1dim(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, (Deconv2dOpr, Conv2dOpr)):
continue
if len(op.inp_tensors) == 2:
continue
bias = op.inp_tensors[2]
if bias.ndim == 4:
bias.shape = (bias.shape[1],)
bias.np_data = bias.np_data.reshape(-1)
@_register_tranformation_rule(TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT)
def _depthwise_conv_reshape_weight(net: IRGraph):
# general group conv is not supported for TFLite
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if op.groups == 1:
continue
weight = op.inp_tensors[1] # G, oc/G, ic/G, kh, kw
ic, cm = weight.shape[1] * op.groups, weight.shape[2]
h, w = weight.shape[3:5]
weight.shape = (ic, cm, h, w) # oc, ic/G, kh, kw
weight.np_data = weight.np_data.reshape(ic, cm, h, w)
@_register_tranformation_rule(TransformerRule.FUSE_ACTIVATION)
def _fuse_activation(net):
delete_intended = []
for op_id, op in zip(net._opr_ids, net.all_oprs):
if isinstance(op, (ReluOpr, TanHOpr)):
prev_ops = net.find_inp_oprs(op)
if len(prev_ops) == 0:
continue
prev_op = prev_ops[0]
if not isinstance(prev_op, OpBase):
continue
if prev_op.activation != "IDENTITY" or prev_op.name == "Deconv2d":
continue
activation = op.name.upper()
prev_op.activation = activation
prev_op.out_tensors = op.out_tensors
for t in prev_op.out_tensors:
t.owner_opr = prev_op
delete_intended.append(net._opr_ids.index(op_id))
for delete_idx in delete_intended[::-1]:
net.delete_ops(delete_idx)
@_register_tranformation_rule(TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE)
def _make_slice_as_inputs(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, GetSubTensorOpr):
continue
ndim = op.inp_tensors[0].ndim
def make_input(axis, param, init_value):
# make inputs: begin, end and step.
ret = [init_value] * ndim # pylint:disable=cell-var-from-loop
for k, v in zip(axis, param):
ret[k] = v
ret = IRTensor(
name=op.name + "_fake_input", # pylint:disable=cell-var-from-loop
shape=[len(ret)],
dtype=np.int32,
np_data=np.array(ret, dtype=np.int32),
owner_opr=op, # pylint:disable=cell-var-from-loop
q_type=np.int32,
)
return ret
begins_tensor = make_input(op.axis, op.begin_params, 0)
ends_tensor = make_input(op.axis, op.end_params, np.iinfo(np.int32).max)
steps_tensor = make_input(op.axis, op.step_params, 1)
op.inp_tensors = [op.inp_tensors[0], begins_tensor, ends_tensor, steps_tensor]
# TFLite slice do not support squeeze axis, so insert a squeeze opr here.
# infer actual output shape of tflite slice
desired_out_shape = op.out_tensors[0].shape
actual_out_shape = [1] * ndim
idx = 0
for i in range(ndim):
if i in op.squeeze_axis:
continue
actual_out_shape[i] = desired_out_shape[idx]
idx += 1
slice_out_tensor = IRTensor(
name=op.name + "fake_output",
shape=actual_out_shape,
dtype=op.out_tensors[0].dtype,
q_type=op.out_tensors[0].q_dtype,
owner_opr=op,
)
old_out = op.out_tensors
op.out_tensors = [slice_out_tensor]
squeeze = SqueezeOpr(op.squeeze_axis)
squeeze.inp_tensors = [slice_out_tensor]
squeeze.out_tensors = old_out
idx = net._opr_ids.index(id(op)) + 1
net.add_op(squeeze, idx)
# caffe transormer rules
class PatternNode:
def __init__(self, type, is_output=False, const_value=None):
self.op = None
self.type = type
self.inp_oprs = []
self.inp_const = []
self.inp_tensors = []
self.is_output = is_output
self.const_value = const_value
def check_const_value(self, op):
inp_tensors = [v.np_data for v in op.inp_tensors]
for const in self.const_value:
idx = const[0]
if idx == -1:
find = False
for index, v in enumerate(inp_tensors):
if np.array_equal(const[1], v):
find = True
del inp_tensors[index]
break
if not find:
return False
elif not np.array_equal(const[1], inp_tensors[idx]):
return False
return True
get_type = lambda op: type(op).__name__
def match(node, opr):
node_queue = [node]
opr_queue = [opr]
matched_opr = set()
matched_node = set()
while len(node_queue) != 0:
cur_node = node_queue.pop(0)
cur_opr = opr_queue.pop(0)
if cur_node.type != get_type(cur_opr) and cur_node.type != "*" or cur_opr.skip:
return False
if cur_node.op == None:
cur_node.op = cur_opr
if cur_node.const_value != None:
if not cur_node.check_const_value(cur_opr):
return False
elif cur_node.op != cur_opr:
return False
matched_opr.add(cur_opr)
matched_node.add(cur_node)
for i, var in enumerate(cur_opr.inp_tensors):
if var.np_data is not None:
cur_node.inp_const.append([i, var.np_data])
else:
cur_node.inp_tensors.append([i, var])
if len(cur_node.inp_oprs) == 0:
continue
if len(cur_node.inp_oprs) != len(cur_opr.inp_oprs):
return False
for i, j in zip(cur_node.inp_oprs, cur_opr.inp_oprs):
node_queue.append(i)
opr_queue.append(j)
for n in matched_node:
if n.is_output:
continue
for op in n.op.out_oprs:
if op not in matched_opr:
return False
return True
def get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@_register_tranformation_rule(TransformerRule.FUSE_SOFTMAX)
def _fuse_softmax(net: IRGraph):
matches = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, TrueDivOpr):
continue
try:
prev_op = net.find_inp_oprs(op)[1]
cur_index = net._opr_ids.index(id(op))
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "SUM"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 1
):
continue
prev_op = net.find_inp_oprs(op)[0]
if (
not isinstance(prev_op, ExpOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 2
):
continue
prev_op = net.find_inp_oprs(prev_op)[0]
if (
not isinstance(prev_op, SubOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 3
):
continue
prev_op = net.find_inp_oprs(prev_op)[1]
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "MAX"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 4
):
continue
except IndexError: # doesn't match
continue
softmax_opr = SoftmaxOpr(axis=get_softmax_axis(prev_op.inp_tensors[0].ndim))
softmax_opr.beta = 1
softmax_opr.inp_tensors = prev_op.inp_tensors[:1]
for i in softmax_opr.inp_tensors:
i.user_opr.append(softmax_opr)
softmax_opr.out_tensors = op.out_tensors
softmax_out_oprs = net.find_out_oprs(op)
matches[id(prev_op)] = (id(prev_op), softmax_opr, softmax_out_oprs)
for original_id, generated_pair in list(matches.items())[::-1]:
index = net._opr_ids.index(original_id)
for out_op in generated_pair[2]:
generated_pair[1].out_tensors[0].user_opr.append(out_op)
del net._opr_ids[index : index + 5]
del net.all_oprs[index : index + 5]
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.FUSE_FOR_LEAKY_RELU)
def _fuse_leaky_relu(net: IRGraph):
"""
Elemwise(ADD) + Elemwise(MUL) + Elemwise(MAX) + Elemwise(MIN) -> LeakyRelu
"""
for opr in net.all_oprs:
if (
opr.name == "Add"
and len(net.find_inp_oprs(opr)) == 2
and net.find_inp_oprs(opr)[0].name == "Max"
and net.find_inp_oprs(opr)[1].name == "Mul"
):
max_op = net.find_inp_oprs(opr)[0]
mul_op = net.find_inp_oprs(opr)[1]
if not mul_op.inp_tensors[1].shape == (1,):
continue
if not max_op.inp_tensors[1].shape == (1,):
continue
if (
len(net.find_inp_oprs(mul_op)) != 1
or net.find_inp_oprs(mul_op)[0].name != "Min"
or net.find_inp_oprs(mul_op)[0].inp_tensors[1].shape != (1,)
):
continue
min_op = net.find_inp_oprs(mul_op)[0]
if not min_op.inp_tensors[1].shape == (1,):
continue
if max_op.inp_tensors[0] != min_op.inp_tensors[0]:
continue
leaky_relu = LeakyReluOpr(
negative_slope=float(mul_op.inp_tensors[1].np_data)
)
leaky_relu.inp_tensors = [max_op.inp_tensors[0]]
max_op.inp_tensors[0].user_opr.remove(max_op)
max_op.inp_tensors[0].user_opr.remove(min_op)
max_op.inp_tensors[0].user_opr.append(leaky_relu)
leaky_relu.out_tensors = opr.out_tensors
opr.out_tensors[0].owner_opr = leaky_relu
index = net.all_oprs.index(max_op)
del net.all_oprs[index : index + 4]
del net._opr_ids[index : index + 4]
net.add_op(leaky_relu, index)
@_register_tranformation_rule(TransformerRule.FUSE_FOR_CONV_BIAS)
def _fuse_for_conv_bias(net: IRGraph):
"""
ConvolutionForward + Elemwise(ADD) -> ConvForwardBias
"""
for opr in net.all_oprs:
if (
opr.name == "Conv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 2:
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[2].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[2].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.FUSE_FOR_DECONV_BIAS)
def _fuse_for_deconv_bias(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "Deconv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 3: # shape, weight, input, bias
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[3].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[3].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.EXPAND_MUL_ADD3)
def _expand_mul_add3(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, FuseMulAdd3Opr):
continue
last_op = net.find_inp_oprs(op)
assert len(last_op) == 1
mul_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_mul_out",
shape=op.inp_tensors[0].shape,
dtype=op.inp_tensors[0].dtype,
)
new_tensor_id = max(net._tensor_ids) + 1
net.add_tensor(new_tensor_id, mul_out_tensor)
mul_op = MulOpr()
mul_out_tensor.owner_opr = mul_op
mul_op.inp_tensors = op.inp_tensors[:2]
for o in mul_op.inp_tensors:
index = o.user_opr.index(op)
o.user_opr[index] = mul_op
mul_op.out_tensors = [mul_out_tensor]
add_op = AddOpr()
add_op.inp_tensors = [mul_out_tensor, op.inp_tensors[2]]
mul_out_tensor.user_opr.append(add_op)
add_op.out_tensors = op.out_tensors
index = net._opr_ids.index(id(op))
net.delete_ops(index)
net.add_op(mul_op, index)
net.add_op(add_op, index + 1)
@_register_tranformation_rule(TransformerRule.REPLACE_FLATTEN_TO_RESHAPE)
def _replace_flatten_to_reshape(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, FlattenOpr):
out_shape = tuple(list(opr.inp_tensors[0].shape[: opr.start_axis]) + [-1])
reshape_op = ReshapeOpr(out_shape=out_shape)
reshape_op.inp_tensors = opr.inp_tensors
for t in reshape_op.inp_tensors:
idx = t.user_opr.index(opr)
t.user_opr[idx] = reshape_op
reshape_op.out_tensors = opr.out_tensors
for t in reshape_op.out_tensors:
t.owner_opr = reshape_op
net.replace_op(opr, reshape_op)
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_REALTED_OP)
def _remove_reshape_tensors(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, ReshapeOpr) and len(opr.inp_tensors) > 1:
opr.inp_tensors = opr.inp_tensors[:1]
@_register_tranformation_rule(TransformerRule.REMOVE_DROPOUT)
def _remove_dropout(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
@_register_tranformation_rule(TransformerRule.REMOVE_RELU)
def _remove_relu(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, ReluOpr):
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, ReluOpr):
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
visited_tensor = set() # type: set
def _dfs_recursive(op_set, tensor):
owner_opr = tensor.owner_opr
op_set.add(owner_opr)
if tensor in visited_tensor:
return
visited_tensor.add(tensor)
if isinstance(owner_opr, IRGraph) or owner_opr is None:
return
for tt in owner_opr.inp_tensors:
_dfs_recursive(op_set, tt)
@_register_tranformation_rule(TransformerRule.REMOVE_UNRELATED_IROP)
def _remove_unrelated_op(net: IRGraph):
match_sets = set() # type: Set[OpBase]
for out_tensor in net.graph_outputs:
_dfs_recursive(match_sets, out_tensor)
remove_idx = []
for opr in net.all_oprs:
if opr not in match_sets:
index = net._opr_ids.index(id(opr))
remove_idx.append(index)
for i in remove_idx[::-1]:
net.delete_ops(i)
@_register_tranformation_rule(TransformerRule.ADD_FAKE_HSIGMOID_OUT)
def _add_fake_hsigmoid_tensor(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, (HardSwishOpr, HardSigmoidOpr)):
add_3_out_tensor = IRTensor(
opr.out_tensors[0].name + "_fake_add3_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(add_3_out_tensor)
relu6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_relu6_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(relu6_out_tensor)
if isinstance(opr, HardSwishOpr):
div6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_div_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(div6_out_tensor)
def fold_conv_bn(
conv_weight, conv_bias, conv_groups, gamma, beta, bn_mean, bn_var, eps
):
conv_bias = conv_bias.reshape(1, -1, 1, 1)
gamma = gamma.reshape(1, -1, 1, 1)
beta = beta.reshape(1, -1, 1, 1)
bn_mean = bn_mean.reshape(1, -1, 1, 1)
bn_var = bn_var.reshape(1, -1, 1, 1)
# bn_istd = 1 / bn_std
bn_istd = 1.0 / sqrt(bn_var + eps) # type: ignore[attr-defined]
# w_fold = gamma / bn_std * W
scale_factor = gamma * bn_istd
if conv_groups == 1:
w_fold = conv_weight * scale_factor.reshape(-1, 1, 1, 1)
else:
w_fold = conv_weight * scale_factor.reshape(conv_groups, -1, 1, 1, 1)
# b_fold = gamma * (b - bn_mean) / bn_std + beta
b_fold = beta + gamma * (conv_bias - bn_mean) * bn_istd
return w_fold, b_fold
@_register_tranformation_rule(TransformerRule.FUSE_CONV_BN)
def _fuse_conv_bn(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "BatchNormalization"
and len(net.find_inp_oprs(opr)) == 1
and net.find_inp_oprs(opr)[0].name == "Conv2d"
and len(net.find_out_oprs(net.find_inp_oprs(opr)[0])) == 1
and net.find_out_oprs(net.find_inp_oprs(opr)[0])[0] == opr
):
gamma = (
Tensor(opr.weight) # type: ignore[attr-defined]
if opr.weight is not None # type: ignore[attr-defined]
else | Tensor(opr.inp_tensors[1].np_data) | megengine.Tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from collections import OrderedDict
from enum import Enum
from functools import cmp_to_key
from typing import Set # pylint: disable=unused-import
from typing import Callable, Dict, Sequence
import numpy as np
from megengine import Tensor
from megengine.functional import sqrt
from ..converter_ir.ir_graph import IRGraph
from .ir_op import (
AddOpr,
Conv2dOpr,
ConvRelu2dOpr,
Deconv2dOpr,
DropoutOpr,
ExpOpr,
FlattenOpr,
FuseMulAdd3Opr,
GetSubTensorOpr,
HardSigmoidOpr,
HardSwishOpr,
IdentityOpr,
LeakyReluOpr,
MulOpr,
OpBase,
PadOpr,
ReduceOpr,
ReluOpr,
ReshapeOpr,
ResizeOpr,
SoftmaxOpr,
SqueezeOpr,
SubOpr,
TanHOpr,
TransposeOpr,
TrueDivOpr,
_PoolOpr,
)
from .ir_tensor import AxisOrder, IRTensor
class TransformerRule(Enum):
# general rules
NOPE = 1
# for TFLite
REDUCE_AXIS_AS_INPUT = 100
REMOVE_RESHAPE_INPUT = 101
# FUSE_FOR_RELU6 pass should happen before FUSE_ACTIVATION
FUSE_FOR_RELU6 = 102 ##
EXPAND_CONVRELU = 102.1
CONV_ADD_ZERO_BIAS = 103
FUSE_FOR_CONV_BIAS = 103.1
FUSE_CONV_BN = 104
DECONV_ADD_ZERO_BIAS = 105
# DEPTHWISE_CONV_RESHAPE_WEIGHT requirs RESHAPE_BIAS_TO_1DIM
DEPTHWISE_CONV_RESHAPE_WEIGHT = 106
FUSE_SOFTMAX = 107
# RESHAPE_BIAS_TO_1DIM should happen before DECONV_SHAPE_AS_INPUT
RESHAPE_BIAS_TO_1DIM = 108
DECONV_SHAPE_AS_INPUT = 109
FUSE_ASTYPE = 110 ##
PADDING_FOR_CONV_AND_POOLING = 111
TRANSPOSE_PATTERN_AS_INPUT = 112
# FUSE_FOR_LEAKY_RELU should happen before EXPAND_MUL_ADD3
FUSE_FOR_LEAKY_RELU = 113
EXPAND_MUL_ADD3 = 114
EXPAND_ADD_SIGMOID = 115 ##
FUSE_FOR_DECONV_BIAS = 117
FUSE_FOR_FULLY_CONNECTED = 118 ##
# for TFLite Converter
SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE = 119
RESIZE_PARAMS_AS_INPUT = 120
REPLACE_FLATTEN_TO_RESHAPE = 120.1
# remove reshape
REMOVE_RESHAPE_REALTED_OP = 121
REMOVE_DROPOUT = 122
FUSE_ACTIVATION = 123
REMOVE_IDENTITY = 124
REMOVE_RELU = 125
REMOVE_UNRELATED_IROP = 130
ADD_FAKE_HSIGMOID_OUT = 131
RENAME_CAFFE_LAYER_TENSOR = 132
def cmp_rules(a, b):
if a.value < b.value:
return -1
if a.value > b.value:
return 1
return 0
class IRTransform:
def __init__(self, transformer_options):
if not isinstance(transformer_options, Sequence):
transformer_options = [
transformer_options,
]
# bias of depthwise_conv must be 1 dim
if TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT in transformer_options:
if TransformerRule.RESHAPE_BIAS_TO_1DIM not in transformer_options:
transformer_options.append(TransformerRule.RESHAPE_BIAS_TO_1DIM)
self.trans_options = sorted(transformer_options, key=cmp_to_key(cmp_rules))
def transform(self, ir_graph):
for option in self.trans_options:
TRANSFORMMAP[option](ir_graph)
return ir_graph
TRANSFORMMAP: Dict[Enum, Callable] = {}
def _register_tranformation_rule(transformer_option):
def callback(impl):
TRANSFORMMAP[transformer_option] = impl
return callback
def cal_pad_mode(tm_opr):
out_shape = tm_opr.out_tensors[0].shape
inp_shape = tm_opr.inp_tensors[0].shape
if out_shape[2:] == inp_shape[2:]:
return "SAME"
else:
return "VALID"
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_INPUT)
def _remove_reshape_input(net):
for op in net.all_oprs:
if not isinstance(op, ReshapeOpr):
continue
if len(op.inp_tensors) == 2:
del op.inp_tensors[1]
@_register_tranformation_rule(TransformerRule.TRANSPOSE_PATTERN_AS_INPUT)
def _transpose_pattern_as_input(net):
for op in net.all_oprs:
if not isinstance(op, TransposeOpr):
continue
perm_tensor = IRTensor(
name=op.inp_tensors[0].name + "_perm",
shape=np.array(op.pattern).shape,
dtype=np.int32,
np_data=np.array(op.pattern, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(perm_tensor)
@_register_tranformation_rule(TransformerRule.REDUCE_AXIS_AS_INPUT)
def _reduce_axis_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ReduceOpr):
continue
axis_tensor = IRTensor(
name=op.inp_tensors[0].name + "_axis",
shape=[1],
dtype=np.int32,
np_data=np.array(op.axis, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(axis_tensor)
@_register_tranformation_rule(TransformerRule.PADDING_FOR_CONV_AND_POOLING)
def _make_padding(net: IRGraph):
def have_padding(opr):
if isinstance(opr, Conv2dOpr):
if cal_pad_mode(opr) == "SAME":
return False
if hasattr(opr, "padding") and (opr.padding[0] > 0 or opr.padding[1] > 0):
return True
return False
insert_intended = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, (Conv2dOpr, _PoolOpr)):
continue
if have_padding(op):
assert op.inp_tensors[0].ndim == 4, "ERROR: unsupported padding mode"
np_data = np.array(
[
0,
0,
op.padding[0],
op.padding[0],
op.padding[1],
op.padding[1],
0,
0,
],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
pad_in_tensor = IRTensor(
name=op.inp_tensors[0].name + "_paddings",
shape=[4, 2],
dtype=np.int32,
owner_opr=None,
np_data=np_data,
q_type=np.int32,
axis=None,
)
net.add_tensor(new_tensor_id, pad_in_tensor)
shape = list(op.inp_tensors[0].shape)
new_tensor_id = max(net._tensor_ids) + 1
pad_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_pad_out",
shape=[
shape[0],
shape[1],
shape[2] + op.padding[0] * 2,
shape[3] + op.padding[1] * 2,
],
dtype=op.inp_tensors[0].dtype,
)
if (
hasattr(op.inp_tensors[0], "scale")
and op.inp_tensors[0].scale is not None
):
pad_out_tensor.scale = op.inp_tensors[0].scale
pad_out_tensor.q_dtype = op.inp_tensors[0].q_dtype
if hasattr(op.inp_tensors[0], "zero_point"):
pad_out_tensor.zero_point = op.inp_tensors[0].zero_point
net.add_tensor(new_tensor_id, pad_out_tensor)
pad_opr = PadOpr()
pad_opr.inp_tensors = [op.inp_tensors[0], pad_in_tensor]
index = op.inp_tensors[0].user_opr.index(op)
op.inp_tensors[0].user_opr[index] = pad_opr
pad_opr.out_tensors = [pad_out_tensor]
pad_out_tensor.owner_opr = pad_opr
op.inp_tensors = [pad_out_tensor] + op.inp_tensors[1:]
pad_out_tensor.user_opr.append(op)
index = net._opr_ids.index(id(op))
insert_intended[index] = (id(pad_opr), pad_opr)
for index, generated_pair in list(insert_intended.items())[::-1]:
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.DECONV_SHAPE_AS_INPUT)
def _deconv_shape_as_input(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
result_shape = op.out_tensors[0].shape
np_data = np.array(
[result_shape[0], result_shape[2], result_shape[3], result_shape[1],],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
shape_symvar = IRTensor(
name=op.inp_tensors[0].name + "_deconv_out_shape",
shape=[4],
dtype=np.int32,
owner_opr=op,
np_data=np_data,
q_type=np.int32,
axis=None,
)
shape_tensor = net.get_tensor(new_tensor_id, shape_symvar)
if len(op.inp_tensors) == 2:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
]
else:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
op.inp_tensors[2],
]
@_register_tranformation_rule(TransformerRule.RESIZE_PARAMS_AS_INPUT)
def _resize_params_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ResizeOpr):
continue
if len(op.inp_tensors) == 2:
continue
out_size_tensor = IRTensor(
name=op.inp_tensors[0].name + "_out_size",
shape=(2,),
dtype=np.int32,
np_data=np.array(op.out_size, dtype=np.int32),
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(out_size_tensor)
@_register_tranformation_rule(TransformerRule.CONV_ADD_ZERO_BIAS)
def _add_bias_for_conv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[0]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[1]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.DECONV_ADD_ZERO_BIAS)
def _add_bias_for_deconv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[1]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[2]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.RESHAPE_BIAS_TO_1DIM)
def _reshape_bias_to_1dim(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, (Deconv2dOpr, Conv2dOpr)):
continue
if len(op.inp_tensors) == 2:
continue
bias = op.inp_tensors[2]
if bias.ndim == 4:
bias.shape = (bias.shape[1],)
bias.np_data = bias.np_data.reshape(-1)
@_register_tranformation_rule(TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT)
def _depthwise_conv_reshape_weight(net: IRGraph):
# general group conv is not supported for TFLite
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if op.groups == 1:
continue
weight = op.inp_tensors[1] # G, oc/G, ic/G, kh, kw
ic, cm = weight.shape[1] * op.groups, weight.shape[2]
h, w = weight.shape[3:5]
weight.shape = (ic, cm, h, w) # oc, ic/G, kh, kw
weight.np_data = weight.np_data.reshape(ic, cm, h, w)
@_register_tranformation_rule(TransformerRule.FUSE_ACTIVATION)
def _fuse_activation(net):
delete_intended = []
for op_id, op in zip(net._opr_ids, net.all_oprs):
if isinstance(op, (ReluOpr, TanHOpr)):
prev_ops = net.find_inp_oprs(op)
if len(prev_ops) == 0:
continue
prev_op = prev_ops[0]
if not isinstance(prev_op, OpBase):
continue
if prev_op.activation != "IDENTITY" or prev_op.name == "Deconv2d":
continue
activation = op.name.upper()
prev_op.activation = activation
prev_op.out_tensors = op.out_tensors
for t in prev_op.out_tensors:
t.owner_opr = prev_op
delete_intended.append(net._opr_ids.index(op_id))
for delete_idx in delete_intended[::-1]:
net.delete_ops(delete_idx)
@_register_tranformation_rule(TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE)
def _make_slice_as_inputs(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, GetSubTensorOpr):
continue
ndim = op.inp_tensors[0].ndim
def make_input(axis, param, init_value):
# make inputs: begin, end and step.
ret = [init_value] * ndim # pylint:disable=cell-var-from-loop
for k, v in zip(axis, param):
ret[k] = v
ret = IRTensor(
name=op.name + "_fake_input", # pylint:disable=cell-var-from-loop
shape=[len(ret)],
dtype=np.int32,
np_data=np.array(ret, dtype=np.int32),
owner_opr=op, # pylint:disable=cell-var-from-loop
q_type=np.int32,
)
return ret
begins_tensor = make_input(op.axis, op.begin_params, 0)
ends_tensor = make_input(op.axis, op.end_params, np.iinfo(np.int32).max)
steps_tensor = make_input(op.axis, op.step_params, 1)
op.inp_tensors = [op.inp_tensors[0], begins_tensor, ends_tensor, steps_tensor]
# TFLite slice do not support squeeze axis, so insert a squeeze opr here.
# infer actual output shape of tflite slice
desired_out_shape = op.out_tensors[0].shape
actual_out_shape = [1] * ndim
idx = 0
for i in range(ndim):
if i in op.squeeze_axis:
continue
actual_out_shape[i] = desired_out_shape[idx]
idx += 1
slice_out_tensor = IRTensor(
name=op.name + "fake_output",
shape=actual_out_shape,
dtype=op.out_tensors[0].dtype,
q_type=op.out_tensors[0].q_dtype,
owner_opr=op,
)
old_out = op.out_tensors
op.out_tensors = [slice_out_tensor]
squeeze = SqueezeOpr(op.squeeze_axis)
squeeze.inp_tensors = [slice_out_tensor]
squeeze.out_tensors = old_out
idx = net._opr_ids.index(id(op)) + 1
net.add_op(squeeze, idx)
# caffe transormer rules
class PatternNode:
def __init__(self, type, is_output=False, const_value=None):
self.op = None
self.type = type
self.inp_oprs = []
self.inp_const = []
self.inp_tensors = []
self.is_output = is_output
self.const_value = const_value
def check_const_value(self, op):
inp_tensors = [v.np_data for v in op.inp_tensors]
for const in self.const_value:
idx = const[0]
if idx == -1:
find = False
for index, v in enumerate(inp_tensors):
if np.array_equal(const[1], v):
find = True
del inp_tensors[index]
break
if not find:
return False
elif not np.array_equal(const[1], inp_tensors[idx]):
return False
return True
get_type = lambda op: type(op).__name__
def match(node, opr):
node_queue = [node]
opr_queue = [opr]
matched_opr = set()
matched_node = set()
while len(node_queue) != 0:
cur_node = node_queue.pop(0)
cur_opr = opr_queue.pop(0)
if cur_node.type != get_type(cur_opr) and cur_node.type != "*" or cur_opr.skip:
return False
if cur_node.op == None:
cur_node.op = cur_opr
if cur_node.const_value != None:
if not cur_node.check_const_value(cur_opr):
return False
elif cur_node.op != cur_opr:
return False
matched_opr.add(cur_opr)
matched_node.add(cur_node)
for i, var in enumerate(cur_opr.inp_tensors):
if var.np_data is not None:
cur_node.inp_const.append([i, var.np_data])
else:
cur_node.inp_tensors.append([i, var])
if len(cur_node.inp_oprs) == 0:
continue
if len(cur_node.inp_oprs) != len(cur_opr.inp_oprs):
return False
for i, j in zip(cur_node.inp_oprs, cur_opr.inp_oprs):
node_queue.append(i)
opr_queue.append(j)
for n in matched_node:
if n.is_output:
continue
for op in n.op.out_oprs:
if op not in matched_opr:
return False
return True
def get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@_register_tranformation_rule(TransformerRule.FUSE_SOFTMAX)
def _fuse_softmax(net: IRGraph):
matches = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, TrueDivOpr):
continue
try:
prev_op = net.find_inp_oprs(op)[1]
cur_index = net._opr_ids.index(id(op))
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "SUM"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 1
):
continue
prev_op = net.find_inp_oprs(op)[0]
if (
not isinstance(prev_op, ExpOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 2
):
continue
prev_op = net.find_inp_oprs(prev_op)[0]
if (
not isinstance(prev_op, SubOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 3
):
continue
prev_op = net.find_inp_oprs(prev_op)[1]
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "MAX"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 4
):
continue
except IndexError: # doesn't match
continue
softmax_opr = SoftmaxOpr(axis=get_softmax_axis(prev_op.inp_tensors[0].ndim))
softmax_opr.beta = 1
softmax_opr.inp_tensors = prev_op.inp_tensors[:1]
for i in softmax_opr.inp_tensors:
i.user_opr.append(softmax_opr)
softmax_opr.out_tensors = op.out_tensors
softmax_out_oprs = net.find_out_oprs(op)
matches[id(prev_op)] = (id(prev_op), softmax_opr, softmax_out_oprs)
for original_id, generated_pair in list(matches.items())[::-1]:
index = net._opr_ids.index(original_id)
for out_op in generated_pair[2]:
generated_pair[1].out_tensors[0].user_opr.append(out_op)
del net._opr_ids[index : index + 5]
del net.all_oprs[index : index + 5]
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.FUSE_FOR_LEAKY_RELU)
def _fuse_leaky_relu(net: IRGraph):
"""
Elemwise(ADD) + Elemwise(MUL) + Elemwise(MAX) + Elemwise(MIN) -> LeakyRelu
"""
for opr in net.all_oprs:
if (
opr.name == "Add"
and len(net.find_inp_oprs(opr)) == 2
and net.find_inp_oprs(opr)[0].name == "Max"
and net.find_inp_oprs(opr)[1].name == "Mul"
):
max_op = net.find_inp_oprs(opr)[0]
mul_op = net.find_inp_oprs(opr)[1]
if not mul_op.inp_tensors[1].shape == (1,):
continue
if not max_op.inp_tensors[1].shape == (1,):
continue
if (
len(net.find_inp_oprs(mul_op)) != 1
or net.find_inp_oprs(mul_op)[0].name != "Min"
or net.find_inp_oprs(mul_op)[0].inp_tensors[1].shape != (1,)
):
continue
min_op = net.find_inp_oprs(mul_op)[0]
if not min_op.inp_tensors[1].shape == (1,):
continue
if max_op.inp_tensors[0] != min_op.inp_tensors[0]:
continue
leaky_relu = LeakyReluOpr(
negative_slope=float(mul_op.inp_tensors[1].np_data)
)
leaky_relu.inp_tensors = [max_op.inp_tensors[0]]
max_op.inp_tensors[0].user_opr.remove(max_op)
max_op.inp_tensors[0].user_opr.remove(min_op)
max_op.inp_tensors[0].user_opr.append(leaky_relu)
leaky_relu.out_tensors = opr.out_tensors
opr.out_tensors[0].owner_opr = leaky_relu
index = net.all_oprs.index(max_op)
del net.all_oprs[index : index + 4]
del net._opr_ids[index : index + 4]
net.add_op(leaky_relu, index)
@_register_tranformation_rule(TransformerRule.FUSE_FOR_CONV_BIAS)
def _fuse_for_conv_bias(net: IRGraph):
"""
ConvolutionForward + Elemwise(ADD) -> ConvForwardBias
"""
for opr in net.all_oprs:
if (
opr.name == "Conv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 2:
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[2].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[2].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.FUSE_FOR_DECONV_BIAS)
def _fuse_for_deconv_bias(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "Deconv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 3: # shape, weight, input, bias
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[3].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[3].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.EXPAND_MUL_ADD3)
def _expand_mul_add3(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, FuseMulAdd3Opr):
continue
last_op = net.find_inp_oprs(op)
assert len(last_op) == 1
mul_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_mul_out",
shape=op.inp_tensors[0].shape,
dtype=op.inp_tensors[0].dtype,
)
new_tensor_id = max(net._tensor_ids) + 1
net.add_tensor(new_tensor_id, mul_out_tensor)
mul_op = MulOpr()
mul_out_tensor.owner_opr = mul_op
mul_op.inp_tensors = op.inp_tensors[:2]
for o in mul_op.inp_tensors:
index = o.user_opr.index(op)
o.user_opr[index] = mul_op
mul_op.out_tensors = [mul_out_tensor]
add_op = AddOpr()
add_op.inp_tensors = [mul_out_tensor, op.inp_tensors[2]]
mul_out_tensor.user_opr.append(add_op)
add_op.out_tensors = op.out_tensors
index = net._opr_ids.index(id(op))
net.delete_ops(index)
net.add_op(mul_op, index)
net.add_op(add_op, index + 1)
@_register_tranformation_rule(TransformerRule.REPLACE_FLATTEN_TO_RESHAPE)
def _replace_flatten_to_reshape(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, FlattenOpr):
out_shape = tuple(list(opr.inp_tensors[0].shape[: opr.start_axis]) + [-1])
reshape_op = ReshapeOpr(out_shape=out_shape)
reshape_op.inp_tensors = opr.inp_tensors
for t in reshape_op.inp_tensors:
idx = t.user_opr.index(opr)
t.user_opr[idx] = reshape_op
reshape_op.out_tensors = opr.out_tensors
for t in reshape_op.out_tensors:
t.owner_opr = reshape_op
net.replace_op(opr, reshape_op)
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_REALTED_OP)
def _remove_reshape_tensors(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, ReshapeOpr) and len(opr.inp_tensors) > 1:
opr.inp_tensors = opr.inp_tensors[:1]
@_register_tranformation_rule(TransformerRule.REMOVE_DROPOUT)
def _remove_dropout(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
@_register_tranformation_rule(TransformerRule.REMOVE_RELU)
def _remove_relu(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, ReluOpr):
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, ReluOpr):
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
visited_tensor = set() # type: set
def _dfs_recursive(op_set, tensor):
owner_opr = tensor.owner_opr
op_set.add(owner_opr)
if tensor in visited_tensor:
return
visited_tensor.add(tensor)
if isinstance(owner_opr, IRGraph) or owner_opr is None:
return
for tt in owner_opr.inp_tensors:
_dfs_recursive(op_set, tt)
@_register_tranformation_rule(TransformerRule.REMOVE_UNRELATED_IROP)
def _remove_unrelated_op(net: IRGraph):
match_sets = set() # type: Set[OpBase]
for out_tensor in net.graph_outputs:
_dfs_recursive(match_sets, out_tensor)
remove_idx = []
for opr in net.all_oprs:
if opr not in match_sets:
index = net._opr_ids.index(id(opr))
remove_idx.append(index)
for i in remove_idx[::-1]:
net.delete_ops(i)
@_register_tranformation_rule(TransformerRule.ADD_FAKE_HSIGMOID_OUT)
def _add_fake_hsigmoid_tensor(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, (HardSwishOpr, HardSigmoidOpr)):
add_3_out_tensor = IRTensor(
opr.out_tensors[0].name + "_fake_add3_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(add_3_out_tensor)
relu6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_relu6_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(relu6_out_tensor)
if isinstance(opr, HardSwishOpr):
div6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_div_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(div6_out_tensor)
def fold_conv_bn(
conv_weight, conv_bias, conv_groups, gamma, beta, bn_mean, bn_var, eps
):
conv_bias = conv_bias.reshape(1, -1, 1, 1)
gamma = gamma.reshape(1, -1, 1, 1)
beta = beta.reshape(1, -1, 1, 1)
bn_mean = bn_mean.reshape(1, -1, 1, 1)
bn_var = bn_var.reshape(1, -1, 1, 1)
# bn_istd = 1 / bn_std
bn_istd = 1.0 / sqrt(bn_var + eps) # type: ignore[attr-defined]
# w_fold = gamma / bn_std * W
scale_factor = gamma * bn_istd
if conv_groups == 1:
w_fold = conv_weight * scale_factor.reshape(-1, 1, 1, 1)
else:
w_fold = conv_weight * scale_factor.reshape(conv_groups, -1, 1, 1, 1)
# b_fold = gamma * (b - bn_mean) / bn_std + beta
b_fold = beta + gamma * (conv_bias - bn_mean) * bn_istd
return w_fold, b_fold
@_register_tranformation_rule(TransformerRule.FUSE_CONV_BN)
def _fuse_conv_bn(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "BatchNormalization"
and len(net.find_inp_oprs(opr)) == 1
and net.find_inp_oprs(opr)[0].name == "Conv2d"
and len(net.find_out_oprs(net.find_inp_oprs(opr)[0])) == 1
and net.find_out_oprs(net.find_inp_oprs(opr)[0])[0] == opr
):
gamma = (
Tensor(opr.weight) # type: ignore[attr-defined]
if opr.weight is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[1].np_data)
)
beta = (
| Tensor(opr.bias) | megengine.Tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from collections import OrderedDict
from enum import Enum
from functools import cmp_to_key
from typing import Set # pylint: disable=unused-import
from typing import Callable, Dict, Sequence
import numpy as np
from megengine import Tensor
from megengine.functional import sqrt
from ..converter_ir.ir_graph import IRGraph
from .ir_op import (
AddOpr,
Conv2dOpr,
ConvRelu2dOpr,
Deconv2dOpr,
DropoutOpr,
ExpOpr,
FlattenOpr,
FuseMulAdd3Opr,
GetSubTensorOpr,
HardSigmoidOpr,
HardSwishOpr,
IdentityOpr,
LeakyReluOpr,
MulOpr,
OpBase,
PadOpr,
ReduceOpr,
ReluOpr,
ReshapeOpr,
ResizeOpr,
SoftmaxOpr,
SqueezeOpr,
SubOpr,
TanHOpr,
TransposeOpr,
TrueDivOpr,
_PoolOpr,
)
from .ir_tensor import AxisOrder, IRTensor
class TransformerRule(Enum):
# general rules
NOPE = 1
# for TFLite
REDUCE_AXIS_AS_INPUT = 100
REMOVE_RESHAPE_INPUT = 101
# FUSE_FOR_RELU6 pass should happen before FUSE_ACTIVATION
FUSE_FOR_RELU6 = 102 ##
EXPAND_CONVRELU = 102.1
CONV_ADD_ZERO_BIAS = 103
FUSE_FOR_CONV_BIAS = 103.1
FUSE_CONV_BN = 104
DECONV_ADD_ZERO_BIAS = 105
# DEPTHWISE_CONV_RESHAPE_WEIGHT requirs RESHAPE_BIAS_TO_1DIM
DEPTHWISE_CONV_RESHAPE_WEIGHT = 106
FUSE_SOFTMAX = 107
# RESHAPE_BIAS_TO_1DIM should happen before DECONV_SHAPE_AS_INPUT
RESHAPE_BIAS_TO_1DIM = 108
DECONV_SHAPE_AS_INPUT = 109
FUSE_ASTYPE = 110 ##
PADDING_FOR_CONV_AND_POOLING = 111
TRANSPOSE_PATTERN_AS_INPUT = 112
# FUSE_FOR_LEAKY_RELU should happen before EXPAND_MUL_ADD3
FUSE_FOR_LEAKY_RELU = 113
EXPAND_MUL_ADD3 = 114
EXPAND_ADD_SIGMOID = 115 ##
FUSE_FOR_DECONV_BIAS = 117
FUSE_FOR_FULLY_CONNECTED = 118 ##
# for TFLite Converter
SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE = 119
RESIZE_PARAMS_AS_INPUT = 120
REPLACE_FLATTEN_TO_RESHAPE = 120.1
# remove reshape
REMOVE_RESHAPE_REALTED_OP = 121
REMOVE_DROPOUT = 122
FUSE_ACTIVATION = 123
REMOVE_IDENTITY = 124
REMOVE_RELU = 125
REMOVE_UNRELATED_IROP = 130
ADD_FAKE_HSIGMOID_OUT = 131
RENAME_CAFFE_LAYER_TENSOR = 132
def cmp_rules(a, b):
if a.value < b.value:
return -1
if a.value > b.value:
return 1
return 0
class IRTransform:
def __init__(self, transformer_options):
if not isinstance(transformer_options, Sequence):
transformer_options = [
transformer_options,
]
# bias of depthwise_conv must be 1 dim
if TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT in transformer_options:
if TransformerRule.RESHAPE_BIAS_TO_1DIM not in transformer_options:
transformer_options.append(TransformerRule.RESHAPE_BIAS_TO_1DIM)
self.trans_options = sorted(transformer_options, key=cmp_to_key(cmp_rules))
def transform(self, ir_graph):
for option in self.trans_options:
TRANSFORMMAP[option](ir_graph)
return ir_graph
TRANSFORMMAP: Dict[Enum, Callable] = {}
def _register_tranformation_rule(transformer_option):
def callback(impl):
TRANSFORMMAP[transformer_option] = impl
return callback
def cal_pad_mode(tm_opr):
out_shape = tm_opr.out_tensors[0].shape
inp_shape = tm_opr.inp_tensors[0].shape
if out_shape[2:] == inp_shape[2:]:
return "SAME"
else:
return "VALID"
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_INPUT)
def _remove_reshape_input(net):
for op in net.all_oprs:
if not isinstance(op, ReshapeOpr):
continue
if len(op.inp_tensors) == 2:
del op.inp_tensors[1]
@_register_tranformation_rule(TransformerRule.TRANSPOSE_PATTERN_AS_INPUT)
def _transpose_pattern_as_input(net):
for op in net.all_oprs:
if not isinstance(op, TransposeOpr):
continue
perm_tensor = IRTensor(
name=op.inp_tensors[0].name + "_perm",
shape=np.array(op.pattern).shape,
dtype=np.int32,
np_data=np.array(op.pattern, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(perm_tensor)
@_register_tranformation_rule(TransformerRule.REDUCE_AXIS_AS_INPUT)
def _reduce_axis_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ReduceOpr):
continue
axis_tensor = IRTensor(
name=op.inp_tensors[0].name + "_axis",
shape=[1],
dtype=np.int32,
np_data=np.array(op.axis, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(axis_tensor)
@_register_tranformation_rule(TransformerRule.PADDING_FOR_CONV_AND_POOLING)
def _make_padding(net: IRGraph):
def have_padding(opr):
if isinstance(opr, Conv2dOpr):
if cal_pad_mode(opr) == "SAME":
return False
if hasattr(opr, "padding") and (opr.padding[0] > 0 or opr.padding[1] > 0):
return True
return False
insert_intended = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, (Conv2dOpr, _PoolOpr)):
continue
if have_padding(op):
assert op.inp_tensors[0].ndim == 4, "ERROR: unsupported padding mode"
np_data = np.array(
[
0,
0,
op.padding[0],
op.padding[0],
op.padding[1],
op.padding[1],
0,
0,
],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
pad_in_tensor = IRTensor(
name=op.inp_tensors[0].name + "_paddings",
shape=[4, 2],
dtype=np.int32,
owner_opr=None,
np_data=np_data,
q_type=np.int32,
axis=None,
)
net.add_tensor(new_tensor_id, pad_in_tensor)
shape = list(op.inp_tensors[0].shape)
new_tensor_id = max(net._tensor_ids) + 1
pad_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_pad_out",
shape=[
shape[0],
shape[1],
shape[2] + op.padding[0] * 2,
shape[3] + op.padding[1] * 2,
],
dtype=op.inp_tensors[0].dtype,
)
if (
hasattr(op.inp_tensors[0], "scale")
and op.inp_tensors[0].scale is not None
):
pad_out_tensor.scale = op.inp_tensors[0].scale
pad_out_tensor.q_dtype = op.inp_tensors[0].q_dtype
if hasattr(op.inp_tensors[0], "zero_point"):
pad_out_tensor.zero_point = op.inp_tensors[0].zero_point
net.add_tensor(new_tensor_id, pad_out_tensor)
pad_opr = PadOpr()
pad_opr.inp_tensors = [op.inp_tensors[0], pad_in_tensor]
index = op.inp_tensors[0].user_opr.index(op)
op.inp_tensors[0].user_opr[index] = pad_opr
pad_opr.out_tensors = [pad_out_tensor]
pad_out_tensor.owner_opr = pad_opr
op.inp_tensors = [pad_out_tensor] + op.inp_tensors[1:]
pad_out_tensor.user_opr.append(op)
index = net._opr_ids.index(id(op))
insert_intended[index] = (id(pad_opr), pad_opr)
for index, generated_pair in list(insert_intended.items())[::-1]:
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.DECONV_SHAPE_AS_INPUT)
def _deconv_shape_as_input(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
result_shape = op.out_tensors[0].shape
np_data = np.array(
[result_shape[0], result_shape[2], result_shape[3], result_shape[1],],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
shape_symvar = IRTensor(
name=op.inp_tensors[0].name + "_deconv_out_shape",
shape=[4],
dtype=np.int32,
owner_opr=op,
np_data=np_data,
q_type=np.int32,
axis=None,
)
shape_tensor = net.get_tensor(new_tensor_id, shape_symvar)
if len(op.inp_tensors) == 2:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
]
else:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
op.inp_tensors[2],
]
@_register_tranformation_rule(TransformerRule.RESIZE_PARAMS_AS_INPUT)
def _resize_params_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ResizeOpr):
continue
if len(op.inp_tensors) == 2:
continue
out_size_tensor = IRTensor(
name=op.inp_tensors[0].name + "_out_size",
shape=(2,),
dtype=np.int32,
np_data=np.array(op.out_size, dtype=np.int32),
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(out_size_tensor)
@_register_tranformation_rule(TransformerRule.CONV_ADD_ZERO_BIAS)
def _add_bias_for_conv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[0]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[1]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.DECONV_ADD_ZERO_BIAS)
def _add_bias_for_deconv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[1]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[2]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.RESHAPE_BIAS_TO_1DIM)
def _reshape_bias_to_1dim(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, (Deconv2dOpr, Conv2dOpr)):
continue
if len(op.inp_tensors) == 2:
continue
bias = op.inp_tensors[2]
if bias.ndim == 4:
bias.shape = (bias.shape[1],)
bias.np_data = bias.np_data.reshape(-1)
@_register_tranformation_rule(TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT)
def _depthwise_conv_reshape_weight(net: IRGraph):
# general group conv is not supported for TFLite
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if op.groups == 1:
continue
weight = op.inp_tensors[1] # G, oc/G, ic/G, kh, kw
ic, cm = weight.shape[1] * op.groups, weight.shape[2]
h, w = weight.shape[3:5]
weight.shape = (ic, cm, h, w) # oc, ic/G, kh, kw
weight.np_data = weight.np_data.reshape(ic, cm, h, w)
@_register_tranformation_rule(TransformerRule.FUSE_ACTIVATION)
def _fuse_activation(net):
delete_intended = []
for op_id, op in zip(net._opr_ids, net.all_oprs):
if isinstance(op, (ReluOpr, TanHOpr)):
prev_ops = net.find_inp_oprs(op)
if len(prev_ops) == 0:
continue
prev_op = prev_ops[0]
if not isinstance(prev_op, OpBase):
continue
if prev_op.activation != "IDENTITY" or prev_op.name == "Deconv2d":
continue
activation = op.name.upper()
prev_op.activation = activation
prev_op.out_tensors = op.out_tensors
for t in prev_op.out_tensors:
t.owner_opr = prev_op
delete_intended.append(net._opr_ids.index(op_id))
for delete_idx in delete_intended[::-1]:
net.delete_ops(delete_idx)
@_register_tranformation_rule(TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE)
def _make_slice_as_inputs(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, GetSubTensorOpr):
continue
ndim = op.inp_tensors[0].ndim
def make_input(axis, param, init_value):
# make inputs: begin, end and step.
ret = [init_value] * ndim # pylint:disable=cell-var-from-loop
for k, v in zip(axis, param):
ret[k] = v
ret = IRTensor(
name=op.name + "_fake_input", # pylint:disable=cell-var-from-loop
shape=[len(ret)],
dtype=np.int32,
np_data=np.array(ret, dtype=np.int32),
owner_opr=op, # pylint:disable=cell-var-from-loop
q_type=np.int32,
)
return ret
begins_tensor = make_input(op.axis, op.begin_params, 0)
ends_tensor = make_input(op.axis, op.end_params, np.iinfo(np.int32).max)
steps_tensor = make_input(op.axis, op.step_params, 1)
op.inp_tensors = [op.inp_tensors[0], begins_tensor, ends_tensor, steps_tensor]
# TFLite slice do not support squeeze axis, so insert a squeeze opr here.
# infer actual output shape of tflite slice
desired_out_shape = op.out_tensors[0].shape
actual_out_shape = [1] * ndim
idx = 0
for i in range(ndim):
if i in op.squeeze_axis:
continue
actual_out_shape[i] = desired_out_shape[idx]
idx += 1
slice_out_tensor = IRTensor(
name=op.name + "fake_output",
shape=actual_out_shape,
dtype=op.out_tensors[0].dtype,
q_type=op.out_tensors[0].q_dtype,
owner_opr=op,
)
old_out = op.out_tensors
op.out_tensors = [slice_out_tensor]
squeeze = SqueezeOpr(op.squeeze_axis)
squeeze.inp_tensors = [slice_out_tensor]
squeeze.out_tensors = old_out
idx = net._opr_ids.index(id(op)) + 1
net.add_op(squeeze, idx)
# caffe transormer rules
class PatternNode:
def __init__(self, type, is_output=False, const_value=None):
self.op = None
self.type = type
self.inp_oprs = []
self.inp_const = []
self.inp_tensors = []
self.is_output = is_output
self.const_value = const_value
def check_const_value(self, op):
inp_tensors = [v.np_data for v in op.inp_tensors]
for const in self.const_value:
idx = const[0]
if idx == -1:
find = False
for index, v in enumerate(inp_tensors):
if np.array_equal(const[1], v):
find = True
del inp_tensors[index]
break
if not find:
return False
elif not np.array_equal(const[1], inp_tensors[idx]):
return False
return True
get_type = lambda op: type(op).__name__
def match(node, opr):
node_queue = [node]
opr_queue = [opr]
matched_opr = set()
matched_node = set()
while len(node_queue) != 0:
cur_node = node_queue.pop(0)
cur_opr = opr_queue.pop(0)
if cur_node.type != get_type(cur_opr) and cur_node.type != "*" or cur_opr.skip:
return False
if cur_node.op == None:
cur_node.op = cur_opr
if cur_node.const_value != None:
if not cur_node.check_const_value(cur_opr):
return False
elif cur_node.op != cur_opr:
return False
matched_opr.add(cur_opr)
matched_node.add(cur_node)
for i, var in enumerate(cur_opr.inp_tensors):
if var.np_data is not None:
cur_node.inp_const.append([i, var.np_data])
else:
cur_node.inp_tensors.append([i, var])
if len(cur_node.inp_oprs) == 0:
continue
if len(cur_node.inp_oprs) != len(cur_opr.inp_oprs):
return False
for i, j in zip(cur_node.inp_oprs, cur_opr.inp_oprs):
node_queue.append(i)
opr_queue.append(j)
for n in matched_node:
if n.is_output:
continue
for op in n.op.out_oprs:
if op not in matched_opr:
return False
return True
def get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@_register_tranformation_rule(TransformerRule.FUSE_SOFTMAX)
def _fuse_softmax(net: IRGraph):
matches = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, TrueDivOpr):
continue
try:
prev_op = net.find_inp_oprs(op)[1]
cur_index = net._opr_ids.index(id(op))
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "SUM"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 1
):
continue
prev_op = net.find_inp_oprs(op)[0]
if (
not isinstance(prev_op, ExpOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 2
):
continue
prev_op = net.find_inp_oprs(prev_op)[0]
if (
not isinstance(prev_op, SubOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 3
):
continue
prev_op = net.find_inp_oprs(prev_op)[1]
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "MAX"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 4
):
continue
except IndexError: # doesn't match
continue
softmax_opr = SoftmaxOpr(axis=get_softmax_axis(prev_op.inp_tensors[0].ndim))
softmax_opr.beta = 1
softmax_opr.inp_tensors = prev_op.inp_tensors[:1]
for i in softmax_opr.inp_tensors:
i.user_opr.append(softmax_opr)
softmax_opr.out_tensors = op.out_tensors
softmax_out_oprs = net.find_out_oprs(op)
matches[id(prev_op)] = (id(prev_op), softmax_opr, softmax_out_oprs)
for original_id, generated_pair in list(matches.items())[::-1]:
index = net._opr_ids.index(original_id)
for out_op in generated_pair[2]:
generated_pair[1].out_tensors[0].user_opr.append(out_op)
del net._opr_ids[index : index + 5]
del net.all_oprs[index : index + 5]
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.FUSE_FOR_LEAKY_RELU)
def _fuse_leaky_relu(net: IRGraph):
"""
Elemwise(ADD) + Elemwise(MUL) + Elemwise(MAX) + Elemwise(MIN) -> LeakyRelu
"""
for opr in net.all_oprs:
if (
opr.name == "Add"
and len(net.find_inp_oprs(opr)) == 2
and net.find_inp_oprs(opr)[0].name == "Max"
and net.find_inp_oprs(opr)[1].name == "Mul"
):
max_op = net.find_inp_oprs(opr)[0]
mul_op = net.find_inp_oprs(opr)[1]
if not mul_op.inp_tensors[1].shape == (1,):
continue
if not max_op.inp_tensors[1].shape == (1,):
continue
if (
len(net.find_inp_oprs(mul_op)) != 1
or net.find_inp_oprs(mul_op)[0].name != "Min"
or net.find_inp_oprs(mul_op)[0].inp_tensors[1].shape != (1,)
):
continue
min_op = net.find_inp_oprs(mul_op)[0]
if not min_op.inp_tensors[1].shape == (1,):
continue
if max_op.inp_tensors[0] != min_op.inp_tensors[0]:
continue
leaky_relu = LeakyReluOpr(
negative_slope=float(mul_op.inp_tensors[1].np_data)
)
leaky_relu.inp_tensors = [max_op.inp_tensors[0]]
max_op.inp_tensors[0].user_opr.remove(max_op)
max_op.inp_tensors[0].user_opr.remove(min_op)
max_op.inp_tensors[0].user_opr.append(leaky_relu)
leaky_relu.out_tensors = opr.out_tensors
opr.out_tensors[0].owner_opr = leaky_relu
index = net.all_oprs.index(max_op)
del net.all_oprs[index : index + 4]
del net._opr_ids[index : index + 4]
net.add_op(leaky_relu, index)
@_register_tranformation_rule(TransformerRule.FUSE_FOR_CONV_BIAS)
def _fuse_for_conv_bias(net: IRGraph):
"""
ConvolutionForward + Elemwise(ADD) -> ConvForwardBias
"""
for opr in net.all_oprs:
if (
opr.name == "Conv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 2:
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[2].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[2].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.FUSE_FOR_DECONV_BIAS)
def _fuse_for_deconv_bias(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "Deconv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 3: # shape, weight, input, bias
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[3].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[3].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.EXPAND_MUL_ADD3)
def _expand_mul_add3(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, FuseMulAdd3Opr):
continue
last_op = net.find_inp_oprs(op)
assert len(last_op) == 1
mul_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_mul_out",
shape=op.inp_tensors[0].shape,
dtype=op.inp_tensors[0].dtype,
)
new_tensor_id = max(net._tensor_ids) + 1
net.add_tensor(new_tensor_id, mul_out_tensor)
mul_op = MulOpr()
mul_out_tensor.owner_opr = mul_op
mul_op.inp_tensors = op.inp_tensors[:2]
for o in mul_op.inp_tensors:
index = o.user_opr.index(op)
o.user_opr[index] = mul_op
mul_op.out_tensors = [mul_out_tensor]
add_op = AddOpr()
add_op.inp_tensors = [mul_out_tensor, op.inp_tensors[2]]
mul_out_tensor.user_opr.append(add_op)
add_op.out_tensors = op.out_tensors
index = net._opr_ids.index(id(op))
net.delete_ops(index)
net.add_op(mul_op, index)
net.add_op(add_op, index + 1)
@_register_tranformation_rule(TransformerRule.REPLACE_FLATTEN_TO_RESHAPE)
def _replace_flatten_to_reshape(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, FlattenOpr):
out_shape = tuple(list(opr.inp_tensors[0].shape[: opr.start_axis]) + [-1])
reshape_op = ReshapeOpr(out_shape=out_shape)
reshape_op.inp_tensors = opr.inp_tensors
for t in reshape_op.inp_tensors:
idx = t.user_opr.index(opr)
t.user_opr[idx] = reshape_op
reshape_op.out_tensors = opr.out_tensors
for t in reshape_op.out_tensors:
t.owner_opr = reshape_op
net.replace_op(opr, reshape_op)
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_REALTED_OP)
def _remove_reshape_tensors(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, ReshapeOpr) and len(opr.inp_tensors) > 1:
opr.inp_tensors = opr.inp_tensors[:1]
@_register_tranformation_rule(TransformerRule.REMOVE_DROPOUT)
def _remove_dropout(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
@_register_tranformation_rule(TransformerRule.REMOVE_RELU)
def _remove_relu(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, ReluOpr):
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, ReluOpr):
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
visited_tensor = set() # type: set
def _dfs_recursive(op_set, tensor):
owner_opr = tensor.owner_opr
op_set.add(owner_opr)
if tensor in visited_tensor:
return
visited_tensor.add(tensor)
if isinstance(owner_opr, IRGraph) or owner_opr is None:
return
for tt in owner_opr.inp_tensors:
_dfs_recursive(op_set, tt)
@_register_tranformation_rule(TransformerRule.REMOVE_UNRELATED_IROP)
def _remove_unrelated_op(net: IRGraph):
match_sets = set() # type: Set[OpBase]
for out_tensor in net.graph_outputs:
_dfs_recursive(match_sets, out_tensor)
remove_idx = []
for opr in net.all_oprs:
if opr not in match_sets:
index = net._opr_ids.index(id(opr))
remove_idx.append(index)
for i in remove_idx[::-1]:
net.delete_ops(i)
@_register_tranformation_rule(TransformerRule.ADD_FAKE_HSIGMOID_OUT)
def _add_fake_hsigmoid_tensor(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, (HardSwishOpr, HardSigmoidOpr)):
add_3_out_tensor = IRTensor(
opr.out_tensors[0].name + "_fake_add3_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(add_3_out_tensor)
relu6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_relu6_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(relu6_out_tensor)
if isinstance(opr, HardSwishOpr):
div6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_div_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(div6_out_tensor)
def fold_conv_bn(
conv_weight, conv_bias, conv_groups, gamma, beta, bn_mean, bn_var, eps
):
conv_bias = conv_bias.reshape(1, -1, 1, 1)
gamma = gamma.reshape(1, -1, 1, 1)
beta = beta.reshape(1, -1, 1, 1)
bn_mean = bn_mean.reshape(1, -1, 1, 1)
bn_var = bn_var.reshape(1, -1, 1, 1)
# bn_istd = 1 / bn_std
bn_istd = 1.0 / sqrt(bn_var + eps) # type: ignore[attr-defined]
# w_fold = gamma / bn_std * W
scale_factor = gamma * bn_istd
if conv_groups == 1:
w_fold = conv_weight * scale_factor.reshape(-1, 1, 1, 1)
else:
w_fold = conv_weight * scale_factor.reshape(conv_groups, -1, 1, 1, 1)
# b_fold = gamma * (b - bn_mean) / bn_std + beta
b_fold = beta + gamma * (conv_bias - bn_mean) * bn_istd
return w_fold, b_fold
@_register_tranformation_rule(TransformerRule.FUSE_CONV_BN)
def _fuse_conv_bn(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "BatchNormalization"
and len(net.find_inp_oprs(opr)) == 1
and net.find_inp_oprs(opr)[0].name == "Conv2d"
and len(net.find_out_oprs(net.find_inp_oprs(opr)[0])) == 1
and net.find_out_oprs(net.find_inp_oprs(opr)[0])[0] == opr
):
gamma = (
Tensor(opr.weight) # type: ignore[attr-defined]
if opr.weight is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[1].np_data)
)
beta = (
Tensor(opr.bias) # type: ignore[attr-defined]
if opr.bias is not None # type: ignore[attr-defined]
else | Tensor(opr.inp_tensors[2].np_data) | megengine.Tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from collections import OrderedDict
from enum import Enum
from functools import cmp_to_key
from typing import Set # pylint: disable=unused-import
from typing import Callable, Dict, Sequence
import numpy as np
from megengine import Tensor
from megengine.functional import sqrt
from ..converter_ir.ir_graph import IRGraph
from .ir_op import (
AddOpr,
Conv2dOpr,
ConvRelu2dOpr,
Deconv2dOpr,
DropoutOpr,
ExpOpr,
FlattenOpr,
FuseMulAdd3Opr,
GetSubTensorOpr,
HardSigmoidOpr,
HardSwishOpr,
IdentityOpr,
LeakyReluOpr,
MulOpr,
OpBase,
PadOpr,
ReduceOpr,
ReluOpr,
ReshapeOpr,
ResizeOpr,
SoftmaxOpr,
SqueezeOpr,
SubOpr,
TanHOpr,
TransposeOpr,
TrueDivOpr,
_PoolOpr,
)
from .ir_tensor import AxisOrder, IRTensor
class TransformerRule(Enum):
# general rules
NOPE = 1
# for TFLite
REDUCE_AXIS_AS_INPUT = 100
REMOVE_RESHAPE_INPUT = 101
# FUSE_FOR_RELU6 pass should happen before FUSE_ACTIVATION
FUSE_FOR_RELU6 = 102 ##
EXPAND_CONVRELU = 102.1
CONV_ADD_ZERO_BIAS = 103
FUSE_FOR_CONV_BIAS = 103.1
FUSE_CONV_BN = 104
DECONV_ADD_ZERO_BIAS = 105
# DEPTHWISE_CONV_RESHAPE_WEIGHT requirs RESHAPE_BIAS_TO_1DIM
DEPTHWISE_CONV_RESHAPE_WEIGHT = 106
FUSE_SOFTMAX = 107
# RESHAPE_BIAS_TO_1DIM should happen before DECONV_SHAPE_AS_INPUT
RESHAPE_BIAS_TO_1DIM = 108
DECONV_SHAPE_AS_INPUT = 109
FUSE_ASTYPE = 110 ##
PADDING_FOR_CONV_AND_POOLING = 111
TRANSPOSE_PATTERN_AS_INPUT = 112
# FUSE_FOR_LEAKY_RELU should happen before EXPAND_MUL_ADD3
FUSE_FOR_LEAKY_RELU = 113
EXPAND_MUL_ADD3 = 114
EXPAND_ADD_SIGMOID = 115 ##
FUSE_FOR_DECONV_BIAS = 117
FUSE_FOR_FULLY_CONNECTED = 118 ##
# for TFLite Converter
SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE = 119
RESIZE_PARAMS_AS_INPUT = 120
REPLACE_FLATTEN_TO_RESHAPE = 120.1
# remove reshape
REMOVE_RESHAPE_REALTED_OP = 121
REMOVE_DROPOUT = 122
FUSE_ACTIVATION = 123
REMOVE_IDENTITY = 124
REMOVE_RELU = 125
REMOVE_UNRELATED_IROP = 130
ADD_FAKE_HSIGMOID_OUT = 131
RENAME_CAFFE_LAYER_TENSOR = 132
def cmp_rules(a, b):
if a.value < b.value:
return -1
if a.value > b.value:
return 1
return 0
class IRTransform:
def __init__(self, transformer_options):
if not isinstance(transformer_options, Sequence):
transformer_options = [
transformer_options,
]
# bias of depthwise_conv must be 1 dim
if TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT in transformer_options:
if TransformerRule.RESHAPE_BIAS_TO_1DIM not in transformer_options:
transformer_options.append(TransformerRule.RESHAPE_BIAS_TO_1DIM)
self.trans_options = sorted(transformer_options, key=cmp_to_key(cmp_rules))
def transform(self, ir_graph):
for option in self.trans_options:
TRANSFORMMAP[option](ir_graph)
return ir_graph
TRANSFORMMAP: Dict[Enum, Callable] = {}
def _register_tranformation_rule(transformer_option):
def callback(impl):
TRANSFORMMAP[transformer_option] = impl
return callback
def cal_pad_mode(tm_opr):
out_shape = tm_opr.out_tensors[0].shape
inp_shape = tm_opr.inp_tensors[0].shape
if out_shape[2:] == inp_shape[2:]:
return "SAME"
else:
return "VALID"
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_INPUT)
def _remove_reshape_input(net):
for op in net.all_oprs:
if not isinstance(op, ReshapeOpr):
continue
if len(op.inp_tensors) == 2:
del op.inp_tensors[1]
@_register_tranformation_rule(TransformerRule.TRANSPOSE_PATTERN_AS_INPUT)
def _transpose_pattern_as_input(net):
for op in net.all_oprs:
if not isinstance(op, TransposeOpr):
continue
perm_tensor = IRTensor(
name=op.inp_tensors[0].name + "_perm",
shape=np.array(op.pattern).shape,
dtype=np.int32,
np_data=np.array(op.pattern, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(perm_tensor)
@_register_tranformation_rule(TransformerRule.REDUCE_AXIS_AS_INPUT)
def _reduce_axis_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ReduceOpr):
continue
axis_tensor = IRTensor(
name=op.inp_tensors[0].name + "_axis",
shape=[1],
dtype=np.int32,
np_data=np.array(op.axis, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(axis_tensor)
@_register_tranformation_rule(TransformerRule.PADDING_FOR_CONV_AND_POOLING)
def _make_padding(net: IRGraph):
def have_padding(opr):
if isinstance(opr, Conv2dOpr):
if cal_pad_mode(opr) == "SAME":
return False
if hasattr(opr, "padding") and (opr.padding[0] > 0 or opr.padding[1] > 0):
return True
return False
insert_intended = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, (Conv2dOpr, _PoolOpr)):
continue
if have_padding(op):
assert op.inp_tensors[0].ndim == 4, "ERROR: unsupported padding mode"
np_data = np.array(
[
0,
0,
op.padding[0],
op.padding[0],
op.padding[1],
op.padding[1],
0,
0,
],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
pad_in_tensor = IRTensor(
name=op.inp_tensors[0].name + "_paddings",
shape=[4, 2],
dtype=np.int32,
owner_opr=None,
np_data=np_data,
q_type=np.int32,
axis=None,
)
net.add_tensor(new_tensor_id, pad_in_tensor)
shape = list(op.inp_tensors[0].shape)
new_tensor_id = max(net._tensor_ids) + 1
pad_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_pad_out",
shape=[
shape[0],
shape[1],
shape[2] + op.padding[0] * 2,
shape[3] + op.padding[1] * 2,
],
dtype=op.inp_tensors[0].dtype,
)
if (
hasattr(op.inp_tensors[0], "scale")
and op.inp_tensors[0].scale is not None
):
pad_out_tensor.scale = op.inp_tensors[0].scale
pad_out_tensor.q_dtype = op.inp_tensors[0].q_dtype
if hasattr(op.inp_tensors[0], "zero_point"):
pad_out_tensor.zero_point = op.inp_tensors[0].zero_point
net.add_tensor(new_tensor_id, pad_out_tensor)
pad_opr = PadOpr()
pad_opr.inp_tensors = [op.inp_tensors[0], pad_in_tensor]
index = op.inp_tensors[0].user_opr.index(op)
op.inp_tensors[0].user_opr[index] = pad_opr
pad_opr.out_tensors = [pad_out_tensor]
pad_out_tensor.owner_opr = pad_opr
op.inp_tensors = [pad_out_tensor] + op.inp_tensors[1:]
pad_out_tensor.user_opr.append(op)
index = net._opr_ids.index(id(op))
insert_intended[index] = (id(pad_opr), pad_opr)
for index, generated_pair in list(insert_intended.items())[::-1]:
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.DECONV_SHAPE_AS_INPUT)
def _deconv_shape_as_input(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
result_shape = op.out_tensors[0].shape
np_data = np.array(
[result_shape[0], result_shape[2], result_shape[3], result_shape[1],],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
shape_symvar = IRTensor(
name=op.inp_tensors[0].name + "_deconv_out_shape",
shape=[4],
dtype=np.int32,
owner_opr=op,
np_data=np_data,
q_type=np.int32,
axis=None,
)
shape_tensor = net.get_tensor(new_tensor_id, shape_symvar)
if len(op.inp_tensors) == 2:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
]
else:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
op.inp_tensors[2],
]
@_register_tranformation_rule(TransformerRule.RESIZE_PARAMS_AS_INPUT)
def _resize_params_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ResizeOpr):
continue
if len(op.inp_tensors) == 2:
continue
out_size_tensor = IRTensor(
name=op.inp_tensors[0].name + "_out_size",
shape=(2,),
dtype=np.int32,
np_data=np.array(op.out_size, dtype=np.int32),
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(out_size_tensor)
@_register_tranformation_rule(TransformerRule.CONV_ADD_ZERO_BIAS)
def _add_bias_for_conv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[0]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[1]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.DECONV_ADD_ZERO_BIAS)
def _add_bias_for_deconv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[1]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[2]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.RESHAPE_BIAS_TO_1DIM)
def _reshape_bias_to_1dim(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, (Deconv2dOpr, Conv2dOpr)):
continue
if len(op.inp_tensors) == 2:
continue
bias = op.inp_tensors[2]
if bias.ndim == 4:
bias.shape = (bias.shape[1],)
bias.np_data = bias.np_data.reshape(-1)
@_register_tranformation_rule(TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT)
def _depthwise_conv_reshape_weight(net: IRGraph):
# general group conv is not supported for TFLite
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if op.groups == 1:
continue
weight = op.inp_tensors[1] # G, oc/G, ic/G, kh, kw
ic, cm = weight.shape[1] * op.groups, weight.shape[2]
h, w = weight.shape[3:5]
weight.shape = (ic, cm, h, w) # oc, ic/G, kh, kw
weight.np_data = weight.np_data.reshape(ic, cm, h, w)
@_register_tranformation_rule(TransformerRule.FUSE_ACTIVATION)
def _fuse_activation(net):
delete_intended = []
for op_id, op in zip(net._opr_ids, net.all_oprs):
if isinstance(op, (ReluOpr, TanHOpr)):
prev_ops = net.find_inp_oprs(op)
if len(prev_ops) == 0:
continue
prev_op = prev_ops[0]
if not isinstance(prev_op, OpBase):
continue
if prev_op.activation != "IDENTITY" or prev_op.name == "Deconv2d":
continue
activation = op.name.upper()
prev_op.activation = activation
prev_op.out_tensors = op.out_tensors
for t in prev_op.out_tensors:
t.owner_opr = prev_op
delete_intended.append(net._opr_ids.index(op_id))
for delete_idx in delete_intended[::-1]:
net.delete_ops(delete_idx)
@_register_tranformation_rule(TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE)
def _make_slice_as_inputs(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, GetSubTensorOpr):
continue
ndim = op.inp_tensors[0].ndim
def make_input(axis, param, init_value):
# make inputs: begin, end and step.
ret = [init_value] * ndim # pylint:disable=cell-var-from-loop
for k, v in zip(axis, param):
ret[k] = v
ret = IRTensor(
name=op.name + "_fake_input", # pylint:disable=cell-var-from-loop
shape=[len(ret)],
dtype=np.int32,
np_data=np.array(ret, dtype=np.int32),
owner_opr=op, # pylint:disable=cell-var-from-loop
q_type=np.int32,
)
return ret
begins_tensor = make_input(op.axis, op.begin_params, 0)
ends_tensor = make_input(op.axis, op.end_params, np.iinfo(np.int32).max)
steps_tensor = make_input(op.axis, op.step_params, 1)
op.inp_tensors = [op.inp_tensors[0], begins_tensor, ends_tensor, steps_tensor]
# TFLite slice do not support squeeze axis, so insert a squeeze opr here.
# infer actual output shape of tflite slice
desired_out_shape = op.out_tensors[0].shape
actual_out_shape = [1] * ndim
idx = 0
for i in range(ndim):
if i in op.squeeze_axis:
continue
actual_out_shape[i] = desired_out_shape[idx]
idx += 1
slice_out_tensor = IRTensor(
name=op.name + "fake_output",
shape=actual_out_shape,
dtype=op.out_tensors[0].dtype,
q_type=op.out_tensors[0].q_dtype,
owner_opr=op,
)
old_out = op.out_tensors
op.out_tensors = [slice_out_tensor]
squeeze = SqueezeOpr(op.squeeze_axis)
squeeze.inp_tensors = [slice_out_tensor]
squeeze.out_tensors = old_out
idx = net._opr_ids.index(id(op)) + 1
net.add_op(squeeze, idx)
# caffe transormer rules
class PatternNode:
def __init__(self, type, is_output=False, const_value=None):
self.op = None
self.type = type
self.inp_oprs = []
self.inp_const = []
self.inp_tensors = []
self.is_output = is_output
self.const_value = const_value
def check_const_value(self, op):
inp_tensors = [v.np_data for v in op.inp_tensors]
for const in self.const_value:
idx = const[0]
if idx == -1:
find = False
for index, v in enumerate(inp_tensors):
if np.array_equal(const[1], v):
find = True
del inp_tensors[index]
break
if not find:
return False
elif not np.array_equal(const[1], inp_tensors[idx]):
return False
return True
get_type = lambda op: type(op).__name__
def match(node, opr):
node_queue = [node]
opr_queue = [opr]
matched_opr = set()
matched_node = set()
while len(node_queue) != 0:
cur_node = node_queue.pop(0)
cur_opr = opr_queue.pop(0)
if cur_node.type != get_type(cur_opr) and cur_node.type != "*" or cur_opr.skip:
return False
if cur_node.op == None:
cur_node.op = cur_opr
if cur_node.const_value != None:
if not cur_node.check_const_value(cur_opr):
return False
elif cur_node.op != cur_opr:
return False
matched_opr.add(cur_opr)
matched_node.add(cur_node)
for i, var in enumerate(cur_opr.inp_tensors):
if var.np_data is not None:
cur_node.inp_const.append([i, var.np_data])
else:
cur_node.inp_tensors.append([i, var])
if len(cur_node.inp_oprs) == 0:
continue
if len(cur_node.inp_oprs) != len(cur_opr.inp_oprs):
return False
for i, j in zip(cur_node.inp_oprs, cur_opr.inp_oprs):
node_queue.append(i)
opr_queue.append(j)
for n in matched_node:
if n.is_output:
continue
for op in n.op.out_oprs:
if op not in matched_opr:
return False
return True
def get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@_register_tranformation_rule(TransformerRule.FUSE_SOFTMAX)
def _fuse_softmax(net: IRGraph):
matches = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, TrueDivOpr):
continue
try:
prev_op = net.find_inp_oprs(op)[1]
cur_index = net._opr_ids.index(id(op))
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "SUM"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 1
):
continue
prev_op = net.find_inp_oprs(op)[0]
if (
not isinstance(prev_op, ExpOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 2
):
continue
prev_op = net.find_inp_oprs(prev_op)[0]
if (
not isinstance(prev_op, SubOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 3
):
continue
prev_op = net.find_inp_oprs(prev_op)[1]
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "MAX"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 4
):
continue
except IndexError: # doesn't match
continue
softmax_opr = SoftmaxOpr(axis=get_softmax_axis(prev_op.inp_tensors[0].ndim))
softmax_opr.beta = 1
softmax_opr.inp_tensors = prev_op.inp_tensors[:1]
for i in softmax_opr.inp_tensors:
i.user_opr.append(softmax_opr)
softmax_opr.out_tensors = op.out_tensors
softmax_out_oprs = net.find_out_oprs(op)
matches[id(prev_op)] = (id(prev_op), softmax_opr, softmax_out_oprs)
for original_id, generated_pair in list(matches.items())[::-1]:
index = net._opr_ids.index(original_id)
for out_op in generated_pair[2]:
generated_pair[1].out_tensors[0].user_opr.append(out_op)
del net._opr_ids[index : index + 5]
del net.all_oprs[index : index + 5]
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.FUSE_FOR_LEAKY_RELU)
def _fuse_leaky_relu(net: IRGraph):
"""
Elemwise(ADD) + Elemwise(MUL) + Elemwise(MAX) + Elemwise(MIN) -> LeakyRelu
"""
for opr in net.all_oprs:
if (
opr.name == "Add"
and len(net.find_inp_oprs(opr)) == 2
and net.find_inp_oprs(opr)[0].name == "Max"
and net.find_inp_oprs(opr)[1].name == "Mul"
):
max_op = net.find_inp_oprs(opr)[0]
mul_op = net.find_inp_oprs(opr)[1]
if not mul_op.inp_tensors[1].shape == (1,):
continue
if not max_op.inp_tensors[1].shape == (1,):
continue
if (
len(net.find_inp_oprs(mul_op)) != 1
or net.find_inp_oprs(mul_op)[0].name != "Min"
or net.find_inp_oprs(mul_op)[0].inp_tensors[1].shape != (1,)
):
continue
min_op = net.find_inp_oprs(mul_op)[0]
if not min_op.inp_tensors[1].shape == (1,):
continue
if max_op.inp_tensors[0] != min_op.inp_tensors[0]:
continue
leaky_relu = LeakyReluOpr(
negative_slope=float(mul_op.inp_tensors[1].np_data)
)
leaky_relu.inp_tensors = [max_op.inp_tensors[0]]
max_op.inp_tensors[0].user_opr.remove(max_op)
max_op.inp_tensors[0].user_opr.remove(min_op)
max_op.inp_tensors[0].user_opr.append(leaky_relu)
leaky_relu.out_tensors = opr.out_tensors
opr.out_tensors[0].owner_opr = leaky_relu
index = net.all_oprs.index(max_op)
del net.all_oprs[index : index + 4]
del net._opr_ids[index : index + 4]
net.add_op(leaky_relu, index)
@_register_tranformation_rule(TransformerRule.FUSE_FOR_CONV_BIAS)
def _fuse_for_conv_bias(net: IRGraph):
"""
ConvolutionForward + Elemwise(ADD) -> ConvForwardBias
"""
for opr in net.all_oprs:
if (
opr.name == "Conv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 2:
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[2].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[2].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.FUSE_FOR_DECONV_BIAS)
def _fuse_for_deconv_bias(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "Deconv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 3: # shape, weight, input, bias
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[3].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[3].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.EXPAND_MUL_ADD3)
def _expand_mul_add3(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, FuseMulAdd3Opr):
continue
last_op = net.find_inp_oprs(op)
assert len(last_op) == 1
mul_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_mul_out",
shape=op.inp_tensors[0].shape,
dtype=op.inp_tensors[0].dtype,
)
new_tensor_id = max(net._tensor_ids) + 1
net.add_tensor(new_tensor_id, mul_out_tensor)
mul_op = MulOpr()
mul_out_tensor.owner_opr = mul_op
mul_op.inp_tensors = op.inp_tensors[:2]
for o in mul_op.inp_tensors:
index = o.user_opr.index(op)
o.user_opr[index] = mul_op
mul_op.out_tensors = [mul_out_tensor]
add_op = AddOpr()
add_op.inp_tensors = [mul_out_tensor, op.inp_tensors[2]]
mul_out_tensor.user_opr.append(add_op)
add_op.out_tensors = op.out_tensors
index = net._opr_ids.index(id(op))
net.delete_ops(index)
net.add_op(mul_op, index)
net.add_op(add_op, index + 1)
@_register_tranformation_rule(TransformerRule.REPLACE_FLATTEN_TO_RESHAPE)
def _replace_flatten_to_reshape(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, FlattenOpr):
out_shape = tuple(list(opr.inp_tensors[0].shape[: opr.start_axis]) + [-1])
reshape_op = ReshapeOpr(out_shape=out_shape)
reshape_op.inp_tensors = opr.inp_tensors
for t in reshape_op.inp_tensors:
idx = t.user_opr.index(opr)
t.user_opr[idx] = reshape_op
reshape_op.out_tensors = opr.out_tensors
for t in reshape_op.out_tensors:
t.owner_opr = reshape_op
net.replace_op(opr, reshape_op)
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_REALTED_OP)
def _remove_reshape_tensors(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, ReshapeOpr) and len(opr.inp_tensors) > 1:
opr.inp_tensors = opr.inp_tensors[:1]
@_register_tranformation_rule(TransformerRule.REMOVE_DROPOUT)
def _remove_dropout(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
@_register_tranformation_rule(TransformerRule.REMOVE_RELU)
def _remove_relu(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, ReluOpr):
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, ReluOpr):
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
visited_tensor = set() # type: set
def _dfs_recursive(op_set, tensor):
owner_opr = tensor.owner_opr
op_set.add(owner_opr)
if tensor in visited_tensor:
return
visited_tensor.add(tensor)
if isinstance(owner_opr, IRGraph) or owner_opr is None:
return
for tt in owner_opr.inp_tensors:
_dfs_recursive(op_set, tt)
@_register_tranformation_rule(TransformerRule.REMOVE_UNRELATED_IROP)
def _remove_unrelated_op(net: IRGraph):
match_sets = set() # type: Set[OpBase]
for out_tensor in net.graph_outputs:
_dfs_recursive(match_sets, out_tensor)
remove_idx = []
for opr in net.all_oprs:
if opr not in match_sets:
index = net._opr_ids.index(id(opr))
remove_idx.append(index)
for i in remove_idx[::-1]:
net.delete_ops(i)
@_register_tranformation_rule(TransformerRule.ADD_FAKE_HSIGMOID_OUT)
def _add_fake_hsigmoid_tensor(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, (HardSwishOpr, HardSigmoidOpr)):
add_3_out_tensor = IRTensor(
opr.out_tensors[0].name + "_fake_add3_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(add_3_out_tensor)
relu6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_relu6_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(relu6_out_tensor)
if isinstance(opr, HardSwishOpr):
div6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_div_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(div6_out_tensor)
def fold_conv_bn(
conv_weight, conv_bias, conv_groups, gamma, beta, bn_mean, bn_var, eps
):
conv_bias = conv_bias.reshape(1, -1, 1, 1)
gamma = gamma.reshape(1, -1, 1, 1)
beta = beta.reshape(1, -1, 1, 1)
bn_mean = bn_mean.reshape(1, -1, 1, 1)
bn_var = bn_var.reshape(1, -1, 1, 1)
# bn_istd = 1 / bn_std
bn_istd = 1.0 / sqrt(bn_var + eps) # type: ignore[attr-defined]
# w_fold = gamma / bn_std * W
scale_factor = gamma * bn_istd
if conv_groups == 1:
w_fold = conv_weight * scale_factor.reshape(-1, 1, 1, 1)
else:
w_fold = conv_weight * scale_factor.reshape(conv_groups, -1, 1, 1, 1)
# b_fold = gamma * (b - bn_mean) / bn_std + beta
b_fold = beta + gamma * (conv_bias - bn_mean) * bn_istd
return w_fold, b_fold
@_register_tranformation_rule(TransformerRule.FUSE_CONV_BN)
def _fuse_conv_bn(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "BatchNormalization"
and len(net.find_inp_oprs(opr)) == 1
and net.find_inp_oprs(opr)[0].name == "Conv2d"
and len(net.find_out_oprs(net.find_inp_oprs(opr)[0])) == 1
and net.find_out_oprs(net.find_inp_oprs(opr)[0])[0] == opr
):
gamma = (
Tensor(opr.weight) # type: ignore[attr-defined]
if opr.weight is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[1].np_data)
)
beta = (
Tensor(opr.bias) # type: ignore[attr-defined]
if opr.bias is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[2].np_data)
)
bn_mean = (
| Tensor(opr.mean) | megengine.Tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from collections import OrderedDict
from enum import Enum
from functools import cmp_to_key
from typing import Set # pylint: disable=unused-import
from typing import Callable, Dict, Sequence
import numpy as np
from megengine import Tensor
from megengine.functional import sqrt
from ..converter_ir.ir_graph import IRGraph
from .ir_op import (
AddOpr,
Conv2dOpr,
ConvRelu2dOpr,
Deconv2dOpr,
DropoutOpr,
ExpOpr,
FlattenOpr,
FuseMulAdd3Opr,
GetSubTensorOpr,
HardSigmoidOpr,
HardSwishOpr,
IdentityOpr,
LeakyReluOpr,
MulOpr,
OpBase,
PadOpr,
ReduceOpr,
ReluOpr,
ReshapeOpr,
ResizeOpr,
SoftmaxOpr,
SqueezeOpr,
SubOpr,
TanHOpr,
TransposeOpr,
TrueDivOpr,
_PoolOpr,
)
from .ir_tensor import AxisOrder, IRTensor
class TransformerRule(Enum):
# general rules
NOPE = 1
# for TFLite
REDUCE_AXIS_AS_INPUT = 100
REMOVE_RESHAPE_INPUT = 101
# FUSE_FOR_RELU6 pass should happen before FUSE_ACTIVATION
FUSE_FOR_RELU6 = 102 ##
EXPAND_CONVRELU = 102.1
CONV_ADD_ZERO_BIAS = 103
FUSE_FOR_CONV_BIAS = 103.1
FUSE_CONV_BN = 104
DECONV_ADD_ZERO_BIAS = 105
# DEPTHWISE_CONV_RESHAPE_WEIGHT requirs RESHAPE_BIAS_TO_1DIM
DEPTHWISE_CONV_RESHAPE_WEIGHT = 106
FUSE_SOFTMAX = 107
# RESHAPE_BIAS_TO_1DIM should happen before DECONV_SHAPE_AS_INPUT
RESHAPE_BIAS_TO_1DIM = 108
DECONV_SHAPE_AS_INPUT = 109
FUSE_ASTYPE = 110 ##
PADDING_FOR_CONV_AND_POOLING = 111
TRANSPOSE_PATTERN_AS_INPUT = 112
# FUSE_FOR_LEAKY_RELU should happen before EXPAND_MUL_ADD3
FUSE_FOR_LEAKY_RELU = 113
EXPAND_MUL_ADD3 = 114
EXPAND_ADD_SIGMOID = 115 ##
FUSE_FOR_DECONV_BIAS = 117
FUSE_FOR_FULLY_CONNECTED = 118 ##
# for TFLite Converter
SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE = 119
RESIZE_PARAMS_AS_INPUT = 120
REPLACE_FLATTEN_TO_RESHAPE = 120.1
# remove reshape
REMOVE_RESHAPE_REALTED_OP = 121
REMOVE_DROPOUT = 122
FUSE_ACTIVATION = 123
REMOVE_IDENTITY = 124
REMOVE_RELU = 125
REMOVE_UNRELATED_IROP = 130
ADD_FAKE_HSIGMOID_OUT = 131
RENAME_CAFFE_LAYER_TENSOR = 132
def cmp_rules(a, b):
if a.value < b.value:
return -1
if a.value > b.value:
return 1
return 0
class IRTransform:
def __init__(self, transformer_options):
if not isinstance(transformer_options, Sequence):
transformer_options = [
transformer_options,
]
# bias of depthwise_conv must be 1 dim
if TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT in transformer_options:
if TransformerRule.RESHAPE_BIAS_TO_1DIM not in transformer_options:
transformer_options.append(TransformerRule.RESHAPE_BIAS_TO_1DIM)
self.trans_options = sorted(transformer_options, key=cmp_to_key(cmp_rules))
def transform(self, ir_graph):
for option in self.trans_options:
TRANSFORMMAP[option](ir_graph)
return ir_graph
TRANSFORMMAP: Dict[Enum, Callable] = {}
def _register_tranformation_rule(transformer_option):
def callback(impl):
TRANSFORMMAP[transformer_option] = impl
return callback
def cal_pad_mode(tm_opr):
out_shape = tm_opr.out_tensors[0].shape
inp_shape = tm_opr.inp_tensors[0].shape
if out_shape[2:] == inp_shape[2:]:
return "SAME"
else:
return "VALID"
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_INPUT)
def _remove_reshape_input(net):
for op in net.all_oprs:
if not isinstance(op, ReshapeOpr):
continue
if len(op.inp_tensors) == 2:
del op.inp_tensors[1]
@_register_tranformation_rule(TransformerRule.TRANSPOSE_PATTERN_AS_INPUT)
def _transpose_pattern_as_input(net):
for op in net.all_oprs:
if not isinstance(op, TransposeOpr):
continue
perm_tensor = IRTensor(
name=op.inp_tensors[0].name + "_perm",
shape=np.array(op.pattern).shape,
dtype=np.int32,
np_data=np.array(op.pattern, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(perm_tensor)
@_register_tranformation_rule(TransformerRule.REDUCE_AXIS_AS_INPUT)
def _reduce_axis_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ReduceOpr):
continue
axis_tensor = IRTensor(
name=op.inp_tensors[0].name + "_axis",
shape=[1],
dtype=np.int32,
np_data=np.array(op.axis, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(axis_tensor)
@_register_tranformation_rule(TransformerRule.PADDING_FOR_CONV_AND_POOLING)
def _make_padding(net: IRGraph):
def have_padding(opr):
if isinstance(opr, Conv2dOpr):
if cal_pad_mode(opr) == "SAME":
return False
if hasattr(opr, "padding") and (opr.padding[0] > 0 or opr.padding[1] > 0):
return True
return False
insert_intended = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, (Conv2dOpr, _PoolOpr)):
continue
if have_padding(op):
assert op.inp_tensors[0].ndim == 4, "ERROR: unsupported padding mode"
np_data = np.array(
[
0,
0,
op.padding[0],
op.padding[0],
op.padding[1],
op.padding[1],
0,
0,
],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
pad_in_tensor = IRTensor(
name=op.inp_tensors[0].name + "_paddings",
shape=[4, 2],
dtype=np.int32,
owner_opr=None,
np_data=np_data,
q_type=np.int32,
axis=None,
)
net.add_tensor(new_tensor_id, pad_in_tensor)
shape = list(op.inp_tensors[0].shape)
new_tensor_id = max(net._tensor_ids) + 1
pad_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_pad_out",
shape=[
shape[0],
shape[1],
shape[2] + op.padding[0] * 2,
shape[3] + op.padding[1] * 2,
],
dtype=op.inp_tensors[0].dtype,
)
if (
hasattr(op.inp_tensors[0], "scale")
and op.inp_tensors[0].scale is not None
):
pad_out_tensor.scale = op.inp_tensors[0].scale
pad_out_tensor.q_dtype = op.inp_tensors[0].q_dtype
if hasattr(op.inp_tensors[0], "zero_point"):
pad_out_tensor.zero_point = op.inp_tensors[0].zero_point
net.add_tensor(new_tensor_id, pad_out_tensor)
pad_opr = PadOpr()
pad_opr.inp_tensors = [op.inp_tensors[0], pad_in_tensor]
index = op.inp_tensors[0].user_opr.index(op)
op.inp_tensors[0].user_opr[index] = pad_opr
pad_opr.out_tensors = [pad_out_tensor]
pad_out_tensor.owner_opr = pad_opr
op.inp_tensors = [pad_out_tensor] + op.inp_tensors[1:]
pad_out_tensor.user_opr.append(op)
index = net._opr_ids.index(id(op))
insert_intended[index] = (id(pad_opr), pad_opr)
for index, generated_pair in list(insert_intended.items())[::-1]:
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.DECONV_SHAPE_AS_INPUT)
def _deconv_shape_as_input(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
result_shape = op.out_tensors[0].shape
np_data = np.array(
[result_shape[0], result_shape[2], result_shape[3], result_shape[1],],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
shape_symvar = IRTensor(
name=op.inp_tensors[0].name + "_deconv_out_shape",
shape=[4],
dtype=np.int32,
owner_opr=op,
np_data=np_data,
q_type=np.int32,
axis=None,
)
shape_tensor = net.get_tensor(new_tensor_id, shape_symvar)
if len(op.inp_tensors) == 2:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
]
else:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
op.inp_tensors[2],
]
@_register_tranformation_rule(TransformerRule.RESIZE_PARAMS_AS_INPUT)
def _resize_params_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ResizeOpr):
continue
if len(op.inp_tensors) == 2:
continue
out_size_tensor = IRTensor(
name=op.inp_tensors[0].name + "_out_size",
shape=(2,),
dtype=np.int32,
np_data=np.array(op.out_size, dtype=np.int32),
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(out_size_tensor)
@_register_tranformation_rule(TransformerRule.CONV_ADD_ZERO_BIAS)
def _add_bias_for_conv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[0]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[1]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.DECONV_ADD_ZERO_BIAS)
def _add_bias_for_deconv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[1]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[2]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.RESHAPE_BIAS_TO_1DIM)
def _reshape_bias_to_1dim(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, (Deconv2dOpr, Conv2dOpr)):
continue
if len(op.inp_tensors) == 2:
continue
bias = op.inp_tensors[2]
if bias.ndim == 4:
bias.shape = (bias.shape[1],)
bias.np_data = bias.np_data.reshape(-1)
@_register_tranformation_rule(TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT)
def _depthwise_conv_reshape_weight(net: IRGraph):
# general group conv is not supported for TFLite
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if op.groups == 1:
continue
weight = op.inp_tensors[1] # G, oc/G, ic/G, kh, kw
ic, cm = weight.shape[1] * op.groups, weight.shape[2]
h, w = weight.shape[3:5]
weight.shape = (ic, cm, h, w) # oc, ic/G, kh, kw
weight.np_data = weight.np_data.reshape(ic, cm, h, w)
@_register_tranformation_rule(TransformerRule.FUSE_ACTIVATION)
def _fuse_activation(net):
delete_intended = []
for op_id, op in zip(net._opr_ids, net.all_oprs):
if isinstance(op, (ReluOpr, TanHOpr)):
prev_ops = net.find_inp_oprs(op)
if len(prev_ops) == 0:
continue
prev_op = prev_ops[0]
if not isinstance(prev_op, OpBase):
continue
if prev_op.activation != "IDENTITY" or prev_op.name == "Deconv2d":
continue
activation = op.name.upper()
prev_op.activation = activation
prev_op.out_tensors = op.out_tensors
for t in prev_op.out_tensors:
t.owner_opr = prev_op
delete_intended.append(net._opr_ids.index(op_id))
for delete_idx in delete_intended[::-1]:
net.delete_ops(delete_idx)
@_register_tranformation_rule(TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE)
def _make_slice_as_inputs(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, GetSubTensorOpr):
continue
ndim = op.inp_tensors[0].ndim
def make_input(axis, param, init_value):
# make inputs: begin, end and step.
ret = [init_value] * ndim # pylint:disable=cell-var-from-loop
for k, v in zip(axis, param):
ret[k] = v
ret = IRTensor(
name=op.name + "_fake_input", # pylint:disable=cell-var-from-loop
shape=[len(ret)],
dtype=np.int32,
np_data=np.array(ret, dtype=np.int32),
owner_opr=op, # pylint:disable=cell-var-from-loop
q_type=np.int32,
)
return ret
begins_tensor = make_input(op.axis, op.begin_params, 0)
ends_tensor = make_input(op.axis, op.end_params, np.iinfo(np.int32).max)
steps_tensor = make_input(op.axis, op.step_params, 1)
op.inp_tensors = [op.inp_tensors[0], begins_tensor, ends_tensor, steps_tensor]
# TFLite slice do not support squeeze axis, so insert a squeeze opr here.
# infer actual output shape of tflite slice
desired_out_shape = op.out_tensors[0].shape
actual_out_shape = [1] * ndim
idx = 0
for i in range(ndim):
if i in op.squeeze_axis:
continue
actual_out_shape[i] = desired_out_shape[idx]
idx += 1
slice_out_tensor = IRTensor(
name=op.name + "fake_output",
shape=actual_out_shape,
dtype=op.out_tensors[0].dtype,
q_type=op.out_tensors[0].q_dtype,
owner_opr=op,
)
old_out = op.out_tensors
op.out_tensors = [slice_out_tensor]
squeeze = SqueezeOpr(op.squeeze_axis)
squeeze.inp_tensors = [slice_out_tensor]
squeeze.out_tensors = old_out
idx = net._opr_ids.index(id(op)) + 1
net.add_op(squeeze, idx)
# caffe transormer rules
class PatternNode:
def __init__(self, type, is_output=False, const_value=None):
self.op = None
self.type = type
self.inp_oprs = []
self.inp_const = []
self.inp_tensors = []
self.is_output = is_output
self.const_value = const_value
def check_const_value(self, op):
inp_tensors = [v.np_data for v in op.inp_tensors]
for const in self.const_value:
idx = const[0]
if idx == -1:
find = False
for index, v in enumerate(inp_tensors):
if np.array_equal(const[1], v):
find = True
del inp_tensors[index]
break
if not find:
return False
elif not np.array_equal(const[1], inp_tensors[idx]):
return False
return True
get_type = lambda op: type(op).__name__
def match(node, opr):
node_queue = [node]
opr_queue = [opr]
matched_opr = set()
matched_node = set()
while len(node_queue) != 0:
cur_node = node_queue.pop(0)
cur_opr = opr_queue.pop(0)
if cur_node.type != get_type(cur_opr) and cur_node.type != "*" or cur_opr.skip:
return False
if cur_node.op == None:
cur_node.op = cur_opr
if cur_node.const_value != None:
if not cur_node.check_const_value(cur_opr):
return False
elif cur_node.op != cur_opr:
return False
matched_opr.add(cur_opr)
matched_node.add(cur_node)
for i, var in enumerate(cur_opr.inp_tensors):
if var.np_data is not None:
cur_node.inp_const.append([i, var.np_data])
else:
cur_node.inp_tensors.append([i, var])
if len(cur_node.inp_oprs) == 0:
continue
if len(cur_node.inp_oprs) != len(cur_opr.inp_oprs):
return False
for i, j in zip(cur_node.inp_oprs, cur_opr.inp_oprs):
node_queue.append(i)
opr_queue.append(j)
for n in matched_node:
if n.is_output:
continue
for op in n.op.out_oprs:
if op not in matched_opr:
return False
return True
def get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@_register_tranformation_rule(TransformerRule.FUSE_SOFTMAX)
def _fuse_softmax(net: IRGraph):
matches = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, TrueDivOpr):
continue
try:
prev_op = net.find_inp_oprs(op)[1]
cur_index = net._opr_ids.index(id(op))
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "SUM"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 1
):
continue
prev_op = net.find_inp_oprs(op)[0]
if (
not isinstance(prev_op, ExpOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 2
):
continue
prev_op = net.find_inp_oprs(prev_op)[0]
if (
not isinstance(prev_op, SubOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 3
):
continue
prev_op = net.find_inp_oprs(prev_op)[1]
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "MAX"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 4
):
continue
except IndexError: # doesn't match
continue
softmax_opr = SoftmaxOpr(axis=get_softmax_axis(prev_op.inp_tensors[0].ndim))
softmax_opr.beta = 1
softmax_opr.inp_tensors = prev_op.inp_tensors[:1]
for i in softmax_opr.inp_tensors:
i.user_opr.append(softmax_opr)
softmax_opr.out_tensors = op.out_tensors
softmax_out_oprs = net.find_out_oprs(op)
matches[id(prev_op)] = (id(prev_op), softmax_opr, softmax_out_oprs)
for original_id, generated_pair in list(matches.items())[::-1]:
index = net._opr_ids.index(original_id)
for out_op in generated_pair[2]:
generated_pair[1].out_tensors[0].user_opr.append(out_op)
del net._opr_ids[index : index + 5]
del net.all_oprs[index : index + 5]
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.FUSE_FOR_LEAKY_RELU)
def _fuse_leaky_relu(net: IRGraph):
"""
Elemwise(ADD) + Elemwise(MUL) + Elemwise(MAX) + Elemwise(MIN) -> LeakyRelu
"""
for opr in net.all_oprs:
if (
opr.name == "Add"
and len(net.find_inp_oprs(opr)) == 2
and net.find_inp_oprs(opr)[0].name == "Max"
and net.find_inp_oprs(opr)[1].name == "Mul"
):
max_op = net.find_inp_oprs(opr)[0]
mul_op = net.find_inp_oprs(opr)[1]
if not mul_op.inp_tensors[1].shape == (1,):
continue
if not max_op.inp_tensors[1].shape == (1,):
continue
if (
len(net.find_inp_oprs(mul_op)) != 1
or net.find_inp_oprs(mul_op)[0].name != "Min"
or net.find_inp_oprs(mul_op)[0].inp_tensors[1].shape != (1,)
):
continue
min_op = net.find_inp_oprs(mul_op)[0]
if not min_op.inp_tensors[1].shape == (1,):
continue
if max_op.inp_tensors[0] != min_op.inp_tensors[0]:
continue
leaky_relu = LeakyReluOpr(
negative_slope=float(mul_op.inp_tensors[1].np_data)
)
leaky_relu.inp_tensors = [max_op.inp_tensors[0]]
max_op.inp_tensors[0].user_opr.remove(max_op)
max_op.inp_tensors[0].user_opr.remove(min_op)
max_op.inp_tensors[0].user_opr.append(leaky_relu)
leaky_relu.out_tensors = opr.out_tensors
opr.out_tensors[0].owner_opr = leaky_relu
index = net.all_oprs.index(max_op)
del net.all_oprs[index : index + 4]
del net._opr_ids[index : index + 4]
net.add_op(leaky_relu, index)
@_register_tranformation_rule(TransformerRule.FUSE_FOR_CONV_BIAS)
def _fuse_for_conv_bias(net: IRGraph):
"""
ConvolutionForward + Elemwise(ADD) -> ConvForwardBias
"""
for opr in net.all_oprs:
if (
opr.name == "Conv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 2:
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[2].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[2].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.FUSE_FOR_DECONV_BIAS)
def _fuse_for_deconv_bias(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "Deconv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 3: # shape, weight, input, bias
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[3].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[3].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.EXPAND_MUL_ADD3)
def _expand_mul_add3(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, FuseMulAdd3Opr):
continue
last_op = net.find_inp_oprs(op)
assert len(last_op) == 1
mul_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_mul_out",
shape=op.inp_tensors[0].shape,
dtype=op.inp_tensors[0].dtype,
)
new_tensor_id = max(net._tensor_ids) + 1
net.add_tensor(new_tensor_id, mul_out_tensor)
mul_op = MulOpr()
mul_out_tensor.owner_opr = mul_op
mul_op.inp_tensors = op.inp_tensors[:2]
for o in mul_op.inp_tensors:
index = o.user_opr.index(op)
o.user_opr[index] = mul_op
mul_op.out_tensors = [mul_out_tensor]
add_op = AddOpr()
add_op.inp_tensors = [mul_out_tensor, op.inp_tensors[2]]
mul_out_tensor.user_opr.append(add_op)
add_op.out_tensors = op.out_tensors
index = net._opr_ids.index(id(op))
net.delete_ops(index)
net.add_op(mul_op, index)
net.add_op(add_op, index + 1)
@_register_tranformation_rule(TransformerRule.REPLACE_FLATTEN_TO_RESHAPE)
def _replace_flatten_to_reshape(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, FlattenOpr):
out_shape = tuple(list(opr.inp_tensors[0].shape[: opr.start_axis]) + [-1])
reshape_op = ReshapeOpr(out_shape=out_shape)
reshape_op.inp_tensors = opr.inp_tensors
for t in reshape_op.inp_tensors:
idx = t.user_opr.index(opr)
t.user_opr[idx] = reshape_op
reshape_op.out_tensors = opr.out_tensors
for t in reshape_op.out_tensors:
t.owner_opr = reshape_op
net.replace_op(opr, reshape_op)
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_REALTED_OP)
def _remove_reshape_tensors(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, ReshapeOpr) and len(opr.inp_tensors) > 1:
opr.inp_tensors = opr.inp_tensors[:1]
@_register_tranformation_rule(TransformerRule.REMOVE_DROPOUT)
def _remove_dropout(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
@_register_tranformation_rule(TransformerRule.REMOVE_RELU)
def _remove_relu(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, ReluOpr):
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, ReluOpr):
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
visited_tensor = set() # type: set
def _dfs_recursive(op_set, tensor):
owner_opr = tensor.owner_opr
op_set.add(owner_opr)
if tensor in visited_tensor:
return
visited_tensor.add(tensor)
if isinstance(owner_opr, IRGraph) or owner_opr is None:
return
for tt in owner_opr.inp_tensors:
_dfs_recursive(op_set, tt)
@_register_tranformation_rule(TransformerRule.REMOVE_UNRELATED_IROP)
def _remove_unrelated_op(net: IRGraph):
match_sets = set() # type: Set[OpBase]
for out_tensor in net.graph_outputs:
_dfs_recursive(match_sets, out_tensor)
remove_idx = []
for opr in net.all_oprs:
if opr not in match_sets:
index = net._opr_ids.index(id(opr))
remove_idx.append(index)
for i in remove_idx[::-1]:
net.delete_ops(i)
@_register_tranformation_rule(TransformerRule.ADD_FAKE_HSIGMOID_OUT)
def _add_fake_hsigmoid_tensor(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, (HardSwishOpr, HardSigmoidOpr)):
add_3_out_tensor = IRTensor(
opr.out_tensors[0].name + "_fake_add3_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(add_3_out_tensor)
relu6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_relu6_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(relu6_out_tensor)
if isinstance(opr, HardSwishOpr):
div6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_div_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(div6_out_tensor)
def fold_conv_bn(
conv_weight, conv_bias, conv_groups, gamma, beta, bn_mean, bn_var, eps
):
conv_bias = conv_bias.reshape(1, -1, 1, 1)
gamma = gamma.reshape(1, -1, 1, 1)
beta = beta.reshape(1, -1, 1, 1)
bn_mean = bn_mean.reshape(1, -1, 1, 1)
bn_var = bn_var.reshape(1, -1, 1, 1)
# bn_istd = 1 / bn_std
bn_istd = 1.0 / sqrt(bn_var + eps) # type: ignore[attr-defined]
# w_fold = gamma / bn_std * W
scale_factor = gamma * bn_istd
if conv_groups == 1:
w_fold = conv_weight * scale_factor.reshape(-1, 1, 1, 1)
else:
w_fold = conv_weight * scale_factor.reshape(conv_groups, -1, 1, 1, 1)
# b_fold = gamma * (b - bn_mean) / bn_std + beta
b_fold = beta + gamma * (conv_bias - bn_mean) * bn_istd
return w_fold, b_fold
@_register_tranformation_rule(TransformerRule.FUSE_CONV_BN)
def _fuse_conv_bn(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "BatchNormalization"
and len(net.find_inp_oprs(opr)) == 1
and net.find_inp_oprs(opr)[0].name == "Conv2d"
and len(net.find_out_oprs(net.find_inp_oprs(opr)[0])) == 1
and net.find_out_oprs(net.find_inp_oprs(opr)[0])[0] == opr
):
gamma = (
Tensor(opr.weight) # type: ignore[attr-defined]
if opr.weight is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[1].np_data)
)
beta = (
Tensor(opr.bias) # type: ignore[attr-defined]
if opr.bias is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[2].np_data)
)
bn_mean = (
Tensor(opr.mean) # type: ignore[attr-defined]
if opr.mean is not None # type: ignore[attr-defined]
else | Tensor(opr.inp_tensors[3].np_data) | megengine.Tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from collections import OrderedDict
from enum import Enum
from functools import cmp_to_key
from typing import Set # pylint: disable=unused-import
from typing import Callable, Dict, Sequence
import numpy as np
from megengine import Tensor
from megengine.functional import sqrt
from ..converter_ir.ir_graph import IRGraph
from .ir_op import (
AddOpr,
Conv2dOpr,
ConvRelu2dOpr,
Deconv2dOpr,
DropoutOpr,
ExpOpr,
FlattenOpr,
FuseMulAdd3Opr,
GetSubTensorOpr,
HardSigmoidOpr,
HardSwishOpr,
IdentityOpr,
LeakyReluOpr,
MulOpr,
OpBase,
PadOpr,
ReduceOpr,
ReluOpr,
ReshapeOpr,
ResizeOpr,
SoftmaxOpr,
SqueezeOpr,
SubOpr,
TanHOpr,
TransposeOpr,
TrueDivOpr,
_PoolOpr,
)
from .ir_tensor import AxisOrder, IRTensor
class TransformerRule(Enum):
# general rules
NOPE = 1
# for TFLite
REDUCE_AXIS_AS_INPUT = 100
REMOVE_RESHAPE_INPUT = 101
# FUSE_FOR_RELU6 pass should happen before FUSE_ACTIVATION
FUSE_FOR_RELU6 = 102 ##
EXPAND_CONVRELU = 102.1
CONV_ADD_ZERO_BIAS = 103
FUSE_FOR_CONV_BIAS = 103.1
FUSE_CONV_BN = 104
DECONV_ADD_ZERO_BIAS = 105
# DEPTHWISE_CONV_RESHAPE_WEIGHT requirs RESHAPE_BIAS_TO_1DIM
DEPTHWISE_CONV_RESHAPE_WEIGHT = 106
FUSE_SOFTMAX = 107
# RESHAPE_BIAS_TO_1DIM should happen before DECONV_SHAPE_AS_INPUT
RESHAPE_BIAS_TO_1DIM = 108
DECONV_SHAPE_AS_INPUT = 109
FUSE_ASTYPE = 110 ##
PADDING_FOR_CONV_AND_POOLING = 111
TRANSPOSE_PATTERN_AS_INPUT = 112
# FUSE_FOR_LEAKY_RELU should happen before EXPAND_MUL_ADD3
FUSE_FOR_LEAKY_RELU = 113
EXPAND_MUL_ADD3 = 114
EXPAND_ADD_SIGMOID = 115 ##
FUSE_FOR_DECONV_BIAS = 117
FUSE_FOR_FULLY_CONNECTED = 118 ##
# for TFLite Converter
SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE = 119
RESIZE_PARAMS_AS_INPUT = 120
REPLACE_FLATTEN_TO_RESHAPE = 120.1
# remove reshape
REMOVE_RESHAPE_REALTED_OP = 121
REMOVE_DROPOUT = 122
FUSE_ACTIVATION = 123
REMOVE_IDENTITY = 124
REMOVE_RELU = 125
REMOVE_UNRELATED_IROP = 130
ADD_FAKE_HSIGMOID_OUT = 131
RENAME_CAFFE_LAYER_TENSOR = 132
def cmp_rules(a, b):
if a.value < b.value:
return -1
if a.value > b.value:
return 1
return 0
class IRTransform:
def __init__(self, transformer_options):
if not isinstance(transformer_options, Sequence):
transformer_options = [
transformer_options,
]
# bias of depthwise_conv must be 1 dim
if TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT in transformer_options:
if TransformerRule.RESHAPE_BIAS_TO_1DIM not in transformer_options:
transformer_options.append(TransformerRule.RESHAPE_BIAS_TO_1DIM)
self.trans_options = sorted(transformer_options, key=cmp_to_key(cmp_rules))
def transform(self, ir_graph):
for option in self.trans_options:
TRANSFORMMAP[option](ir_graph)
return ir_graph
TRANSFORMMAP: Dict[Enum, Callable] = {}
def _register_tranformation_rule(transformer_option):
def callback(impl):
TRANSFORMMAP[transformer_option] = impl
return callback
def cal_pad_mode(tm_opr):
out_shape = tm_opr.out_tensors[0].shape
inp_shape = tm_opr.inp_tensors[0].shape
if out_shape[2:] == inp_shape[2:]:
return "SAME"
else:
return "VALID"
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_INPUT)
def _remove_reshape_input(net):
for op in net.all_oprs:
if not isinstance(op, ReshapeOpr):
continue
if len(op.inp_tensors) == 2:
del op.inp_tensors[1]
@_register_tranformation_rule(TransformerRule.TRANSPOSE_PATTERN_AS_INPUT)
def _transpose_pattern_as_input(net):
for op in net.all_oprs:
if not isinstance(op, TransposeOpr):
continue
perm_tensor = IRTensor(
name=op.inp_tensors[0].name + "_perm",
shape=np.array(op.pattern).shape,
dtype=np.int32,
np_data=np.array(op.pattern, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(perm_tensor)
@_register_tranformation_rule(TransformerRule.REDUCE_AXIS_AS_INPUT)
def _reduce_axis_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ReduceOpr):
continue
axis_tensor = IRTensor(
name=op.inp_tensors[0].name + "_axis",
shape=[1],
dtype=np.int32,
np_data=np.array(op.axis, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(axis_tensor)
@_register_tranformation_rule(TransformerRule.PADDING_FOR_CONV_AND_POOLING)
def _make_padding(net: IRGraph):
def have_padding(opr):
if isinstance(opr, Conv2dOpr):
if cal_pad_mode(opr) == "SAME":
return False
if hasattr(opr, "padding") and (opr.padding[0] > 0 or opr.padding[1] > 0):
return True
return False
insert_intended = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, (Conv2dOpr, _PoolOpr)):
continue
if have_padding(op):
assert op.inp_tensors[0].ndim == 4, "ERROR: unsupported padding mode"
np_data = np.array(
[
0,
0,
op.padding[0],
op.padding[0],
op.padding[1],
op.padding[1],
0,
0,
],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
pad_in_tensor = IRTensor(
name=op.inp_tensors[0].name + "_paddings",
shape=[4, 2],
dtype=np.int32,
owner_opr=None,
np_data=np_data,
q_type=np.int32,
axis=None,
)
net.add_tensor(new_tensor_id, pad_in_tensor)
shape = list(op.inp_tensors[0].shape)
new_tensor_id = max(net._tensor_ids) + 1
pad_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_pad_out",
shape=[
shape[0],
shape[1],
shape[2] + op.padding[0] * 2,
shape[3] + op.padding[1] * 2,
],
dtype=op.inp_tensors[0].dtype,
)
if (
hasattr(op.inp_tensors[0], "scale")
and op.inp_tensors[0].scale is not None
):
pad_out_tensor.scale = op.inp_tensors[0].scale
pad_out_tensor.q_dtype = op.inp_tensors[0].q_dtype
if hasattr(op.inp_tensors[0], "zero_point"):
pad_out_tensor.zero_point = op.inp_tensors[0].zero_point
net.add_tensor(new_tensor_id, pad_out_tensor)
pad_opr = PadOpr()
pad_opr.inp_tensors = [op.inp_tensors[0], pad_in_tensor]
index = op.inp_tensors[0].user_opr.index(op)
op.inp_tensors[0].user_opr[index] = pad_opr
pad_opr.out_tensors = [pad_out_tensor]
pad_out_tensor.owner_opr = pad_opr
op.inp_tensors = [pad_out_tensor] + op.inp_tensors[1:]
pad_out_tensor.user_opr.append(op)
index = net._opr_ids.index(id(op))
insert_intended[index] = (id(pad_opr), pad_opr)
for index, generated_pair in list(insert_intended.items())[::-1]:
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.DECONV_SHAPE_AS_INPUT)
def _deconv_shape_as_input(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
result_shape = op.out_tensors[0].shape
np_data = np.array(
[result_shape[0], result_shape[2], result_shape[3], result_shape[1],],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
shape_symvar = IRTensor(
name=op.inp_tensors[0].name + "_deconv_out_shape",
shape=[4],
dtype=np.int32,
owner_opr=op,
np_data=np_data,
q_type=np.int32,
axis=None,
)
shape_tensor = net.get_tensor(new_tensor_id, shape_symvar)
if len(op.inp_tensors) == 2:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
]
else:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
op.inp_tensors[2],
]
@_register_tranformation_rule(TransformerRule.RESIZE_PARAMS_AS_INPUT)
def _resize_params_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ResizeOpr):
continue
if len(op.inp_tensors) == 2:
continue
out_size_tensor = IRTensor(
name=op.inp_tensors[0].name + "_out_size",
shape=(2,),
dtype=np.int32,
np_data=np.array(op.out_size, dtype=np.int32),
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(out_size_tensor)
@_register_tranformation_rule(TransformerRule.CONV_ADD_ZERO_BIAS)
def _add_bias_for_conv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[0]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[1]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.DECONV_ADD_ZERO_BIAS)
def _add_bias_for_deconv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[1]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[2]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.RESHAPE_BIAS_TO_1DIM)
def _reshape_bias_to_1dim(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, (Deconv2dOpr, Conv2dOpr)):
continue
if len(op.inp_tensors) == 2:
continue
bias = op.inp_tensors[2]
if bias.ndim == 4:
bias.shape = (bias.shape[1],)
bias.np_data = bias.np_data.reshape(-1)
@_register_tranformation_rule(TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT)
def _depthwise_conv_reshape_weight(net: IRGraph):
# general group conv is not supported for TFLite
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if op.groups == 1:
continue
weight = op.inp_tensors[1] # G, oc/G, ic/G, kh, kw
ic, cm = weight.shape[1] * op.groups, weight.shape[2]
h, w = weight.shape[3:5]
weight.shape = (ic, cm, h, w) # oc, ic/G, kh, kw
weight.np_data = weight.np_data.reshape(ic, cm, h, w)
@_register_tranformation_rule(TransformerRule.FUSE_ACTIVATION)
def _fuse_activation(net):
delete_intended = []
for op_id, op in zip(net._opr_ids, net.all_oprs):
if isinstance(op, (ReluOpr, TanHOpr)):
prev_ops = net.find_inp_oprs(op)
if len(prev_ops) == 0:
continue
prev_op = prev_ops[0]
if not isinstance(prev_op, OpBase):
continue
if prev_op.activation != "IDENTITY" or prev_op.name == "Deconv2d":
continue
activation = op.name.upper()
prev_op.activation = activation
prev_op.out_tensors = op.out_tensors
for t in prev_op.out_tensors:
t.owner_opr = prev_op
delete_intended.append(net._opr_ids.index(op_id))
for delete_idx in delete_intended[::-1]:
net.delete_ops(delete_idx)
@_register_tranformation_rule(TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE)
def _make_slice_as_inputs(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, GetSubTensorOpr):
continue
ndim = op.inp_tensors[0].ndim
def make_input(axis, param, init_value):
# make inputs: begin, end and step.
ret = [init_value] * ndim # pylint:disable=cell-var-from-loop
for k, v in zip(axis, param):
ret[k] = v
ret = IRTensor(
name=op.name + "_fake_input", # pylint:disable=cell-var-from-loop
shape=[len(ret)],
dtype=np.int32,
np_data=np.array(ret, dtype=np.int32),
owner_opr=op, # pylint:disable=cell-var-from-loop
q_type=np.int32,
)
return ret
begins_tensor = make_input(op.axis, op.begin_params, 0)
ends_tensor = make_input(op.axis, op.end_params, np.iinfo(np.int32).max)
steps_tensor = make_input(op.axis, op.step_params, 1)
op.inp_tensors = [op.inp_tensors[0], begins_tensor, ends_tensor, steps_tensor]
# TFLite slice do not support squeeze axis, so insert a squeeze opr here.
# infer actual output shape of tflite slice
desired_out_shape = op.out_tensors[0].shape
actual_out_shape = [1] * ndim
idx = 0
for i in range(ndim):
if i in op.squeeze_axis:
continue
actual_out_shape[i] = desired_out_shape[idx]
idx += 1
slice_out_tensor = IRTensor(
name=op.name + "fake_output",
shape=actual_out_shape,
dtype=op.out_tensors[0].dtype,
q_type=op.out_tensors[0].q_dtype,
owner_opr=op,
)
old_out = op.out_tensors
op.out_tensors = [slice_out_tensor]
squeeze = SqueezeOpr(op.squeeze_axis)
squeeze.inp_tensors = [slice_out_tensor]
squeeze.out_tensors = old_out
idx = net._opr_ids.index(id(op)) + 1
net.add_op(squeeze, idx)
# caffe transormer rules
class PatternNode:
def __init__(self, type, is_output=False, const_value=None):
self.op = None
self.type = type
self.inp_oprs = []
self.inp_const = []
self.inp_tensors = []
self.is_output = is_output
self.const_value = const_value
def check_const_value(self, op):
inp_tensors = [v.np_data for v in op.inp_tensors]
for const in self.const_value:
idx = const[0]
if idx == -1:
find = False
for index, v in enumerate(inp_tensors):
if np.array_equal(const[1], v):
find = True
del inp_tensors[index]
break
if not find:
return False
elif not np.array_equal(const[1], inp_tensors[idx]):
return False
return True
get_type = lambda op: type(op).__name__
def match(node, opr):
node_queue = [node]
opr_queue = [opr]
matched_opr = set()
matched_node = set()
while len(node_queue) != 0:
cur_node = node_queue.pop(0)
cur_opr = opr_queue.pop(0)
if cur_node.type != get_type(cur_opr) and cur_node.type != "*" or cur_opr.skip:
return False
if cur_node.op == None:
cur_node.op = cur_opr
if cur_node.const_value != None:
if not cur_node.check_const_value(cur_opr):
return False
elif cur_node.op != cur_opr:
return False
matched_opr.add(cur_opr)
matched_node.add(cur_node)
for i, var in enumerate(cur_opr.inp_tensors):
if var.np_data is not None:
cur_node.inp_const.append([i, var.np_data])
else:
cur_node.inp_tensors.append([i, var])
if len(cur_node.inp_oprs) == 0:
continue
if len(cur_node.inp_oprs) != len(cur_opr.inp_oprs):
return False
for i, j in zip(cur_node.inp_oprs, cur_opr.inp_oprs):
node_queue.append(i)
opr_queue.append(j)
for n in matched_node:
if n.is_output:
continue
for op in n.op.out_oprs:
if op not in matched_opr:
return False
return True
def get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@_register_tranformation_rule(TransformerRule.FUSE_SOFTMAX)
def _fuse_softmax(net: IRGraph):
matches = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, TrueDivOpr):
continue
try:
prev_op = net.find_inp_oprs(op)[1]
cur_index = net._opr_ids.index(id(op))
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "SUM"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 1
):
continue
prev_op = net.find_inp_oprs(op)[0]
if (
not isinstance(prev_op, ExpOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 2
):
continue
prev_op = net.find_inp_oprs(prev_op)[0]
if (
not isinstance(prev_op, SubOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 3
):
continue
prev_op = net.find_inp_oprs(prev_op)[1]
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "MAX"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 4
):
continue
except IndexError: # doesn't match
continue
softmax_opr = SoftmaxOpr(axis=get_softmax_axis(prev_op.inp_tensors[0].ndim))
softmax_opr.beta = 1
softmax_opr.inp_tensors = prev_op.inp_tensors[:1]
for i in softmax_opr.inp_tensors:
i.user_opr.append(softmax_opr)
softmax_opr.out_tensors = op.out_tensors
softmax_out_oprs = net.find_out_oprs(op)
matches[id(prev_op)] = (id(prev_op), softmax_opr, softmax_out_oprs)
for original_id, generated_pair in list(matches.items())[::-1]:
index = net._opr_ids.index(original_id)
for out_op in generated_pair[2]:
generated_pair[1].out_tensors[0].user_opr.append(out_op)
del net._opr_ids[index : index + 5]
del net.all_oprs[index : index + 5]
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.FUSE_FOR_LEAKY_RELU)
def _fuse_leaky_relu(net: IRGraph):
"""
Elemwise(ADD) + Elemwise(MUL) + Elemwise(MAX) + Elemwise(MIN) -> LeakyRelu
"""
for opr in net.all_oprs:
if (
opr.name == "Add"
and len(net.find_inp_oprs(opr)) == 2
and net.find_inp_oprs(opr)[0].name == "Max"
and net.find_inp_oprs(opr)[1].name == "Mul"
):
max_op = net.find_inp_oprs(opr)[0]
mul_op = net.find_inp_oprs(opr)[1]
if not mul_op.inp_tensors[1].shape == (1,):
continue
if not max_op.inp_tensors[1].shape == (1,):
continue
if (
len(net.find_inp_oprs(mul_op)) != 1
or net.find_inp_oprs(mul_op)[0].name != "Min"
or net.find_inp_oprs(mul_op)[0].inp_tensors[1].shape != (1,)
):
continue
min_op = net.find_inp_oprs(mul_op)[0]
if not min_op.inp_tensors[1].shape == (1,):
continue
if max_op.inp_tensors[0] != min_op.inp_tensors[0]:
continue
leaky_relu = LeakyReluOpr(
negative_slope=float(mul_op.inp_tensors[1].np_data)
)
leaky_relu.inp_tensors = [max_op.inp_tensors[0]]
max_op.inp_tensors[0].user_opr.remove(max_op)
max_op.inp_tensors[0].user_opr.remove(min_op)
max_op.inp_tensors[0].user_opr.append(leaky_relu)
leaky_relu.out_tensors = opr.out_tensors
opr.out_tensors[0].owner_opr = leaky_relu
index = net.all_oprs.index(max_op)
del net.all_oprs[index : index + 4]
del net._opr_ids[index : index + 4]
net.add_op(leaky_relu, index)
@_register_tranformation_rule(TransformerRule.FUSE_FOR_CONV_BIAS)
def _fuse_for_conv_bias(net: IRGraph):
"""
ConvolutionForward + Elemwise(ADD) -> ConvForwardBias
"""
for opr in net.all_oprs:
if (
opr.name == "Conv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 2:
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[2].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[2].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.FUSE_FOR_DECONV_BIAS)
def _fuse_for_deconv_bias(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "Deconv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 3: # shape, weight, input, bias
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[3].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[3].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.EXPAND_MUL_ADD3)
def _expand_mul_add3(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, FuseMulAdd3Opr):
continue
last_op = net.find_inp_oprs(op)
assert len(last_op) == 1
mul_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_mul_out",
shape=op.inp_tensors[0].shape,
dtype=op.inp_tensors[0].dtype,
)
new_tensor_id = max(net._tensor_ids) + 1
net.add_tensor(new_tensor_id, mul_out_tensor)
mul_op = MulOpr()
mul_out_tensor.owner_opr = mul_op
mul_op.inp_tensors = op.inp_tensors[:2]
for o in mul_op.inp_tensors:
index = o.user_opr.index(op)
o.user_opr[index] = mul_op
mul_op.out_tensors = [mul_out_tensor]
add_op = AddOpr()
add_op.inp_tensors = [mul_out_tensor, op.inp_tensors[2]]
mul_out_tensor.user_opr.append(add_op)
add_op.out_tensors = op.out_tensors
index = net._opr_ids.index(id(op))
net.delete_ops(index)
net.add_op(mul_op, index)
net.add_op(add_op, index + 1)
@_register_tranformation_rule(TransformerRule.REPLACE_FLATTEN_TO_RESHAPE)
def _replace_flatten_to_reshape(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, FlattenOpr):
out_shape = tuple(list(opr.inp_tensors[0].shape[: opr.start_axis]) + [-1])
reshape_op = ReshapeOpr(out_shape=out_shape)
reshape_op.inp_tensors = opr.inp_tensors
for t in reshape_op.inp_tensors:
idx = t.user_opr.index(opr)
t.user_opr[idx] = reshape_op
reshape_op.out_tensors = opr.out_tensors
for t in reshape_op.out_tensors:
t.owner_opr = reshape_op
net.replace_op(opr, reshape_op)
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_REALTED_OP)
def _remove_reshape_tensors(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, ReshapeOpr) and len(opr.inp_tensors) > 1:
opr.inp_tensors = opr.inp_tensors[:1]
@_register_tranformation_rule(TransformerRule.REMOVE_DROPOUT)
def _remove_dropout(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
@_register_tranformation_rule(TransformerRule.REMOVE_RELU)
def _remove_relu(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, ReluOpr):
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, ReluOpr):
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
visited_tensor = set() # type: set
def _dfs_recursive(op_set, tensor):
owner_opr = tensor.owner_opr
op_set.add(owner_opr)
if tensor in visited_tensor:
return
visited_tensor.add(tensor)
if isinstance(owner_opr, IRGraph) or owner_opr is None:
return
for tt in owner_opr.inp_tensors:
_dfs_recursive(op_set, tt)
@_register_tranformation_rule(TransformerRule.REMOVE_UNRELATED_IROP)
def _remove_unrelated_op(net: IRGraph):
match_sets = set() # type: Set[OpBase]
for out_tensor in net.graph_outputs:
_dfs_recursive(match_sets, out_tensor)
remove_idx = []
for opr in net.all_oprs:
if opr not in match_sets:
index = net._opr_ids.index(id(opr))
remove_idx.append(index)
for i in remove_idx[::-1]:
net.delete_ops(i)
@_register_tranformation_rule(TransformerRule.ADD_FAKE_HSIGMOID_OUT)
def _add_fake_hsigmoid_tensor(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, (HardSwishOpr, HardSigmoidOpr)):
add_3_out_tensor = IRTensor(
opr.out_tensors[0].name + "_fake_add3_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(add_3_out_tensor)
relu6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_relu6_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(relu6_out_tensor)
if isinstance(opr, HardSwishOpr):
div6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_div_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(div6_out_tensor)
def fold_conv_bn(
conv_weight, conv_bias, conv_groups, gamma, beta, bn_mean, bn_var, eps
):
conv_bias = conv_bias.reshape(1, -1, 1, 1)
gamma = gamma.reshape(1, -1, 1, 1)
beta = beta.reshape(1, -1, 1, 1)
bn_mean = bn_mean.reshape(1, -1, 1, 1)
bn_var = bn_var.reshape(1, -1, 1, 1)
# bn_istd = 1 / bn_std
bn_istd = 1.0 / sqrt(bn_var + eps) # type: ignore[attr-defined]
# w_fold = gamma / bn_std * W
scale_factor = gamma * bn_istd
if conv_groups == 1:
w_fold = conv_weight * scale_factor.reshape(-1, 1, 1, 1)
else:
w_fold = conv_weight * scale_factor.reshape(conv_groups, -1, 1, 1, 1)
# b_fold = gamma * (b - bn_mean) / bn_std + beta
b_fold = beta + gamma * (conv_bias - bn_mean) * bn_istd
return w_fold, b_fold
@_register_tranformation_rule(TransformerRule.FUSE_CONV_BN)
def _fuse_conv_bn(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "BatchNormalization"
and len(net.find_inp_oprs(opr)) == 1
and net.find_inp_oprs(opr)[0].name == "Conv2d"
and len(net.find_out_oprs(net.find_inp_oprs(opr)[0])) == 1
and net.find_out_oprs(net.find_inp_oprs(opr)[0])[0] == opr
):
gamma = (
Tensor(opr.weight) # type: ignore[attr-defined]
if opr.weight is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[1].np_data)
)
beta = (
Tensor(opr.bias) # type: ignore[attr-defined]
if opr.bias is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[2].np_data)
)
bn_mean = (
Tensor(opr.mean) # type: ignore[attr-defined]
if opr.mean is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[3].np_data)
)
bn_var = (
| Tensor(opr.var) | megengine.Tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from collections import OrderedDict
from enum import Enum
from functools import cmp_to_key
from typing import Set # pylint: disable=unused-import
from typing import Callable, Dict, Sequence
import numpy as np
from megengine import Tensor
from megengine.functional import sqrt
from ..converter_ir.ir_graph import IRGraph
from .ir_op import (
AddOpr,
Conv2dOpr,
ConvRelu2dOpr,
Deconv2dOpr,
DropoutOpr,
ExpOpr,
FlattenOpr,
FuseMulAdd3Opr,
GetSubTensorOpr,
HardSigmoidOpr,
HardSwishOpr,
IdentityOpr,
LeakyReluOpr,
MulOpr,
OpBase,
PadOpr,
ReduceOpr,
ReluOpr,
ReshapeOpr,
ResizeOpr,
SoftmaxOpr,
SqueezeOpr,
SubOpr,
TanHOpr,
TransposeOpr,
TrueDivOpr,
_PoolOpr,
)
from .ir_tensor import AxisOrder, IRTensor
class TransformerRule(Enum):
# general rules
NOPE = 1
# for TFLite
REDUCE_AXIS_AS_INPUT = 100
REMOVE_RESHAPE_INPUT = 101
# FUSE_FOR_RELU6 pass should happen before FUSE_ACTIVATION
FUSE_FOR_RELU6 = 102 ##
EXPAND_CONVRELU = 102.1
CONV_ADD_ZERO_BIAS = 103
FUSE_FOR_CONV_BIAS = 103.1
FUSE_CONV_BN = 104
DECONV_ADD_ZERO_BIAS = 105
# DEPTHWISE_CONV_RESHAPE_WEIGHT requirs RESHAPE_BIAS_TO_1DIM
DEPTHWISE_CONV_RESHAPE_WEIGHT = 106
FUSE_SOFTMAX = 107
# RESHAPE_BIAS_TO_1DIM should happen before DECONV_SHAPE_AS_INPUT
RESHAPE_BIAS_TO_1DIM = 108
DECONV_SHAPE_AS_INPUT = 109
FUSE_ASTYPE = 110 ##
PADDING_FOR_CONV_AND_POOLING = 111
TRANSPOSE_PATTERN_AS_INPUT = 112
# FUSE_FOR_LEAKY_RELU should happen before EXPAND_MUL_ADD3
FUSE_FOR_LEAKY_RELU = 113
EXPAND_MUL_ADD3 = 114
EXPAND_ADD_SIGMOID = 115 ##
FUSE_FOR_DECONV_BIAS = 117
FUSE_FOR_FULLY_CONNECTED = 118 ##
# for TFLite Converter
SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE = 119
RESIZE_PARAMS_AS_INPUT = 120
REPLACE_FLATTEN_TO_RESHAPE = 120.1
# remove reshape
REMOVE_RESHAPE_REALTED_OP = 121
REMOVE_DROPOUT = 122
FUSE_ACTIVATION = 123
REMOVE_IDENTITY = 124
REMOVE_RELU = 125
REMOVE_UNRELATED_IROP = 130
ADD_FAKE_HSIGMOID_OUT = 131
RENAME_CAFFE_LAYER_TENSOR = 132
def cmp_rules(a, b):
if a.value < b.value:
return -1
if a.value > b.value:
return 1
return 0
class IRTransform:
def __init__(self, transformer_options):
if not isinstance(transformer_options, Sequence):
transformer_options = [
transformer_options,
]
# bias of depthwise_conv must be 1 dim
if TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT in transformer_options:
if TransformerRule.RESHAPE_BIAS_TO_1DIM not in transformer_options:
transformer_options.append(TransformerRule.RESHAPE_BIAS_TO_1DIM)
self.trans_options = sorted(transformer_options, key=cmp_to_key(cmp_rules))
def transform(self, ir_graph):
for option in self.trans_options:
TRANSFORMMAP[option](ir_graph)
return ir_graph
TRANSFORMMAP: Dict[Enum, Callable] = {}
def _register_tranformation_rule(transformer_option):
def callback(impl):
TRANSFORMMAP[transformer_option] = impl
return callback
def cal_pad_mode(tm_opr):
out_shape = tm_opr.out_tensors[0].shape
inp_shape = tm_opr.inp_tensors[0].shape
if out_shape[2:] == inp_shape[2:]:
return "SAME"
else:
return "VALID"
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_INPUT)
def _remove_reshape_input(net):
for op in net.all_oprs:
if not isinstance(op, ReshapeOpr):
continue
if len(op.inp_tensors) == 2:
del op.inp_tensors[1]
@_register_tranformation_rule(TransformerRule.TRANSPOSE_PATTERN_AS_INPUT)
def _transpose_pattern_as_input(net):
for op in net.all_oprs:
if not isinstance(op, TransposeOpr):
continue
perm_tensor = IRTensor(
name=op.inp_tensors[0].name + "_perm",
shape=np.array(op.pattern).shape,
dtype=np.int32,
np_data=np.array(op.pattern, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(perm_tensor)
@_register_tranformation_rule(TransformerRule.REDUCE_AXIS_AS_INPUT)
def _reduce_axis_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ReduceOpr):
continue
axis_tensor = IRTensor(
name=op.inp_tensors[0].name + "_axis",
shape=[1],
dtype=np.int32,
np_data=np.array(op.axis, dtype=np.int32),
owner_opr=op,
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(axis_tensor)
@_register_tranformation_rule(TransformerRule.PADDING_FOR_CONV_AND_POOLING)
def _make_padding(net: IRGraph):
def have_padding(opr):
if isinstance(opr, Conv2dOpr):
if cal_pad_mode(opr) == "SAME":
return False
if hasattr(opr, "padding") and (opr.padding[0] > 0 or opr.padding[1] > 0):
return True
return False
insert_intended = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, (Conv2dOpr, _PoolOpr)):
continue
if have_padding(op):
assert op.inp_tensors[0].ndim == 4, "ERROR: unsupported padding mode"
np_data = np.array(
[
0,
0,
op.padding[0],
op.padding[0],
op.padding[1],
op.padding[1],
0,
0,
],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
pad_in_tensor = IRTensor(
name=op.inp_tensors[0].name + "_paddings",
shape=[4, 2],
dtype=np.int32,
owner_opr=None,
np_data=np_data,
q_type=np.int32,
axis=None,
)
net.add_tensor(new_tensor_id, pad_in_tensor)
shape = list(op.inp_tensors[0].shape)
new_tensor_id = max(net._tensor_ids) + 1
pad_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_pad_out",
shape=[
shape[0],
shape[1],
shape[2] + op.padding[0] * 2,
shape[3] + op.padding[1] * 2,
],
dtype=op.inp_tensors[0].dtype,
)
if (
hasattr(op.inp_tensors[0], "scale")
and op.inp_tensors[0].scale is not None
):
pad_out_tensor.scale = op.inp_tensors[0].scale
pad_out_tensor.q_dtype = op.inp_tensors[0].q_dtype
if hasattr(op.inp_tensors[0], "zero_point"):
pad_out_tensor.zero_point = op.inp_tensors[0].zero_point
net.add_tensor(new_tensor_id, pad_out_tensor)
pad_opr = PadOpr()
pad_opr.inp_tensors = [op.inp_tensors[0], pad_in_tensor]
index = op.inp_tensors[0].user_opr.index(op)
op.inp_tensors[0].user_opr[index] = pad_opr
pad_opr.out_tensors = [pad_out_tensor]
pad_out_tensor.owner_opr = pad_opr
op.inp_tensors = [pad_out_tensor] + op.inp_tensors[1:]
pad_out_tensor.user_opr.append(op)
index = net._opr_ids.index(id(op))
insert_intended[index] = (id(pad_opr), pad_opr)
for index, generated_pair in list(insert_intended.items())[::-1]:
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.DECONV_SHAPE_AS_INPUT)
def _deconv_shape_as_input(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
result_shape = op.out_tensors[0].shape
np_data = np.array(
[result_shape[0], result_shape[2], result_shape[3], result_shape[1],],
dtype=np.int32,
)
new_tensor_id = max(net._tensor_ids) + 1
shape_symvar = IRTensor(
name=op.inp_tensors[0].name + "_deconv_out_shape",
shape=[4],
dtype=np.int32,
owner_opr=op,
np_data=np_data,
q_type=np.int32,
axis=None,
)
shape_tensor = net.get_tensor(new_tensor_id, shape_symvar)
if len(op.inp_tensors) == 2:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
]
else:
op.inp_tensors = [
shape_tensor,
op.inp_tensors[1],
op.inp_tensors[0],
op.inp_tensors[2],
]
@_register_tranformation_rule(TransformerRule.RESIZE_PARAMS_AS_INPUT)
def _resize_params_as_input(net):
for op in net.all_oprs:
if not isinstance(op, ResizeOpr):
continue
if len(op.inp_tensors) == 2:
continue
out_size_tensor = IRTensor(
name=op.inp_tensors[0].name + "_out_size",
shape=(2,),
dtype=np.int32,
np_data=np.array(op.out_size, dtype=np.int32),
q_type=np.int32,
axis=None,
)
op.add_inp_tensors(out_size_tensor)
@_register_tranformation_rule(TransformerRule.CONV_ADD_ZERO_BIAS)
def _add_bias_for_conv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[0]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[1]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.DECONV_ADD_ZERO_BIAS)
def _add_bias_for_deconv(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, Deconv2dOpr):
continue
if len(op.inp_tensors) == 3:
continue
weight_shape = op.inp_tensors[1].shape
bias_shape = (
weight_shape[1]
if len(weight_shape) == 4
else weight_shape[0] * weight_shape[2]
)
bias_shape = (1, bias_shape, 1, 1)
bias = np.zeros(bias_shape, dtype=np.float32)
bias_tensor = IRTensor(
name=op.inp_tensors[0].name + "_bias",
shape=bias_shape,
dtype=np.float32,
np_data=bias,
axis=AxisOrder.NCHW,
)
if op.inp_tensors[0].scale and op.inp_tensors[1].scale:
bias_tensor.set_qparams(
op.inp_tensors[0].scale * op.inp_tensors[1].scale, 0
)
bias_tensor.q_dtype = "int32"
op.inp_tensors.append(bias_tensor)
@_register_tranformation_rule(TransformerRule.RESHAPE_BIAS_TO_1DIM)
def _reshape_bias_to_1dim(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, (Deconv2dOpr, Conv2dOpr)):
continue
if len(op.inp_tensors) == 2:
continue
bias = op.inp_tensors[2]
if bias.ndim == 4:
bias.shape = (bias.shape[1],)
bias.np_data = bias.np_data.reshape(-1)
@_register_tranformation_rule(TransformerRule.DEPTHWISE_CONV_RESHAPE_WEIGHT)
def _depthwise_conv_reshape_weight(net: IRGraph):
# general group conv is not supported for TFLite
for op in net.all_oprs:
if not isinstance(op, Conv2dOpr):
continue
if op.groups == 1:
continue
weight = op.inp_tensors[1] # G, oc/G, ic/G, kh, kw
ic, cm = weight.shape[1] * op.groups, weight.shape[2]
h, w = weight.shape[3:5]
weight.shape = (ic, cm, h, w) # oc, ic/G, kh, kw
weight.np_data = weight.np_data.reshape(ic, cm, h, w)
@_register_tranformation_rule(TransformerRule.FUSE_ACTIVATION)
def _fuse_activation(net):
delete_intended = []
for op_id, op in zip(net._opr_ids, net.all_oprs):
if isinstance(op, (ReluOpr, TanHOpr)):
prev_ops = net.find_inp_oprs(op)
if len(prev_ops) == 0:
continue
prev_op = prev_ops[0]
if not isinstance(prev_op, OpBase):
continue
if prev_op.activation != "IDENTITY" or prev_op.name == "Deconv2d":
continue
activation = op.name.upper()
prev_op.activation = activation
prev_op.out_tensors = op.out_tensors
for t in prev_op.out_tensors:
t.owner_opr = prev_op
delete_intended.append(net._opr_ids.index(op_id))
for delete_idx in delete_intended[::-1]:
net.delete_ops(delete_idx)
@_register_tranformation_rule(TransformerRule.SLICE_PARAMS_AS_INPUTS_AND_MAKE_SQUEEZE)
def _make_slice_as_inputs(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, GetSubTensorOpr):
continue
ndim = op.inp_tensors[0].ndim
def make_input(axis, param, init_value):
# make inputs: begin, end and step.
ret = [init_value] * ndim # pylint:disable=cell-var-from-loop
for k, v in zip(axis, param):
ret[k] = v
ret = IRTensor(
name=op.name + "_fake_input", # pylint:disable=cell-var-from-loop
shape=[len(ret)],
dtype=np.int32,
np_data=np.array(ret, dtype=np.int32),
owner_opr=op, # pylint:disable=cell-var-from-loop
q_type=np.int32,
)
return ret
begins_tensor = make_input(op.axis, op.begin_params, 0)
ends_tensor = make_input(op.axis, op.end_params, np.iinfo(np.int32).max)
steps_tensor = make_input(op.axis, op.step_params, 1)
op.inp_tensors = [op.inp_tensors[0], begins_tensor, ends_tensor, steps_tensor]
# TFLite slice do not support squeeze axis, so insert a squeeze opr here.
# infer actual output shape of tflite slice
desired_out_shape = op.out_tensors[0].shape
actual_out_shape = [1] * ndim
idx = 0
for i in range(ndim):
if i in op.squeeze_axis:
continue
actual_out_shape[i] = desired_out_shape[idx]
idx += 1
slice_out_tensor = IRTensor(
name=op.name + "fake_output",
shape=actual_out_shape,
dtype=op.out_tensors[0].dtype,
q_type=op.out_tensors[0].q_dtype,
owner_opr=op,
)
old_out = op.out_tensors
op.out_tensors = [slice_out_tensor]
squeeze = SqueezeOpr(op.squeeze_axis)
squeeze.inp_tensors = [slice_out_tensor]
squeeze.out_tensors = old_out
idx = net._opr_ids.index(id(op)) + 1
net.add_op(squeeze, idx)
# caffe transormer rules
class PatternNode:
def __init__(self, type, is_output=False, const_value=None):
self.op = None
self.type = type
self.inp_oprs = []
self.inp_const = []
self.inp_tensors = []
self.is_output = is_output
self.const_value = const_value
def check_const_value(self, op):
inp_tensors = [v.np_data for v in op.inp_tensors]
for const in self.const_value:
idx = const[0]
if idx == -1:
find = False
for index, v in enumerate(inp_tensors):
if np.array_equal(const[1], v):
find = True
del inp_tensors[index]
break
if not find:
return False
elif not np.array_equal(const[1], inp_tensors[idx]):
return False
return True
get_type = lambda op: type(op).__name__
def match(node, opr):
node_queue = [node]
opr_queue = [opr]
matched_opr = set()
matched_node = set()
while len(node_queue) != 0:
cur_node = node_queue.pop(0)
cur_opr = opr_queue.pop(0)
if cur_node.type != get_type(cur_opr) and cur_node.type != "*" or cur_opr.skip:
return False
if cur_node.op == None:
cur_node.op = cur_opr
if cur_node.const_value != None:
if not cur_node.check_const_value(cur_opr):
return False
elif cur_node.op != cur_opr:
return False
matched_opr.add(cur_opr)
matched_node.add(cur_node)
for i, var in enumerate(cur_opr.inp_tensors):
if var.np_data is not None:
cur_node.inp_const.append([i, var.np_data])
else:
cur_node.inp_tensors.append([i, var])
if len(cur_node.inp_oprs) == 0:
continue
if len(cur_node.inp_oprs) != len(cur_opr.inp_oprs):
return False
for i, j in zip(cur_node.inp_oprs, cur_opr.inp_oprs):
node_queue.append(i)
opr_queue.append(j)
for n in matched_node:
if n.is_output:
continue
for op in n.op.out_oprs:
if op not in matched_opr:
return False
return True
def get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
@_register_tranformation_rule(TransformerRule.FUSE_SOFTMAX)
def _fuse_softmax(net: IRGraph):
matches = OrderedDict() # type: OrderedDict
for op in net.all_oprs:
if not isinstance(op, TrueDivOpr):
continue
try:
prev_op = net.find_inp_oprs(op)[1]
cur_index = net._opr_ids.index(id(op))
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "SUM"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 1
):
continue
prev_op = net.find_inp_oprs(op)[0]
if (
not isinstance(prev_op, ExpOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 2
):
continue
prev_op = net.find_inp_oprs(prev_op)[0]
if (
not isinstance(prev_op, SubOpr)
or net._opr_ids.index(id(prev_op)) != cur_index - 3
):
continue
prev_op = net.find_inp_oprs(prev_op)[1]
if (
not isinstance(prev_op, ReduceOpr)
or prev_op.mode != "MAX"
or prev_op.axis != get_softmax_axis(prev_op.inp_tensors[0].ndim)
or net._opr_ids.index(id(prev_op)) != cur_index - 4
):
continue
except IndexError: # doesn't match
continue
softmax_opr = SoftmaxOpr(axis=get_softmax_axis(prev_op.inp_tensors[0].ndim))
softmax_opr.beta = 1
softmax_opr.inp_tensors = prev_op.inp_tensors[:1]
for i in softmax_opr.inp_tensors:
i.user_opr.append(softmax_opr)
softmax_opr.out_tensors = op.out_tensors
softmax_out_oprs = net.find_out_oprs(op)
matches[id(prev_op)] = (id(prev_op), softmax_opr, softmax_out_oprs)
for original_id, generated_pair in list(matches.items())[::-1]:
index = net._opr_ids.index(original_id)
for out_op in generated_pair[2]:
generated_pair[1].out_tensors[0].user_opr.append(out_op)
del net._opr_ids[index : index + 5]
del net.all_oprs[index : index + 5]
net._opr_ids.insert(index, generated_pair[0])
net.all_oprs.insert(index, generated_pair[1])
@_register_tranformation_rule(TransformerRule.FUSE_FOR_LEAKY_RELU)
def _fuse_leaky_relu(net: IRGraph):
"""
Elemwise(ADD) + Elemwise(MUL) + Elemwise(MAX) + Elemwise(MIN) -> LeakyRelu
"""
for opr in net.all_oprs:
if (
opr.name == "Add"
and len(net.find_inp_oprs(opr)) == 2
and net.find_inp_oprs(opr)[0].name == "Max"
and net.find_inp_oprs(opr)[1].name == "Mul"
):
max_op = net.find_inp_oprs(opr)[0]
mul_op = net.find_inp_oprs(opr)[1]
if not mul_op.inp_tensors[1].shape == (1,):
continue
if not max_op.inp_tensors[1].shape == (1,):
continue
if (
len(net.find_inp_oprs(mul_op)) != 1
or net.find_inp_oprs(mul_op)[0].name != "Min"
or net.find_inp_oprs(mul_op)[0].inp_tensors[1].shape != (1,)
):
continue
min_op = net.find_inp_oprs(mul_op)[0]
if not min_op.inp_tensors[1].shape == (1,):
continue
if max_op.inp_tensors[0] != min_op.inp_tensors[0]:
continue
leaky_relu = LeakyReluOpr(
negative_slope=float(mul_op.inp_tensors[1].np_data)
)
leaky_relu.inp_tensors = [max_op.inp_tensors[0]]
max_op.inp_tensors[0].user_opr.remove(max_op)
max_op.inp_tensors[0].user_opr.remove(min_op)
max_op.inp_tensors[0].user_opr.append(leaky_relu)
leaky_relu.out_tensors = opr.out_tensors
opr.out_tensors[0].owner_opr = leaky_relu
index = net.all_oprs.index(max_op)
del net.all_oprs[index : index + 4]
del net._opr_ids[index : index + 4]
net.add_op(leaky_relu, index)
@_register_tranformation_rule(TransformerRule.FUSE_FOR_CONV_BIAS)
def _fuse_for_conv_bias(net: IRGraph):
"""
ConvolutionForward + Elemwise(ADD) -> ConvForwardBias
"""
for opr in net.all_oprs:
if (
opr.name == "Conv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[0]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 2:
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[2].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[2].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.FUSE_FOR_DECONV_BIAS)
def _fuse_for_deconv_bias(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "Deconv2d"
and len(net.find_out_oprs(opr)) == 1
and net.find_out_oprs(opr)[0].name == "Add"
):
bias_op = net.find_out_oprs(opr)[0]
if not (
(
bias_op.inp_tensors[1].np_data is not None
and len(bias_op.inp_tensors[1].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
or (
(
bias_op.inp_tensors[0].np_data is not None
and len(bias_op.inp_tensors[0].np_data.reshape(-1))
== opr.inp_tensors[1].shape[1]
)
)
):
continue
bias_idx = 0 if bias_op.inp_tensors[0].np_data is not None else 1
if len(opr.inp_tensors) == 3: # shape, weight, input, bias
opr.inp_tensors.append(bias_op.inp_tensors[bias_idx])
else:
bias_shape = opr.inp_tensors[3].np_data.shape
add_tensor = bias_op.inp_tensors[bias_idx].np_data
if add_tensor.shape != bias_shape:
add_tensor = add_tensor.reshape(bias_shape)
opr.inp_tensors[3].np_data += add_tensor
if bias_op in opr.out_tensors[0].user_opr:
opr.out_tensors[0].user_opr.remove(bias_op)
bias_out_op = net.find_out_oprs(bias_op)
if len(bias_out_op) > 0:
for op in bias_out_op:
op.inp_tensors[0] = opr.out_tensors[0]
opr.out_tensors[0].user_opr.append(op)
else:
# last op of the graph
assert bias_op.out_tensors[0] in net.graph_outputs
index = net.graph_outputs.index(bias_op.out_tensors[0])
net.graph_outputs[index] = opr.out_tensors[0]
opr.activation = bias_op.activation
index = net.all_oprs.index(bias_op)
del net.all_oprs[index]
del net._opr_ids[index]
@_register_tranformation_rule(TransformerRule.EXPAND_MUL_ADD3)
def _expand_mul_add3(net: IRGraph):
for op in net.all_oprs:
if not isinstance(op, FuseMulAdd3Opr):
continue
last_op = net.find_inp_oprs(op)
assert len(last_op) == 1
mul_out_tensor = IRTensor(
name=op.inp_tensors[0].name + "_mul_out",
shape=op.inp_tensors[0].shape,
dtype=op.inp_tensors[0].dtype,
)
new_tensor_id = max(net._tensor_ids) + 1
net.add_tensor(new_tensor_id, mul_out_tensor)
mul_op = MulOpr()
mul_out_tensor.owner_opr = mul_op
mul_op.inp_tensors = op.inp_tensors[:2]
for o in mul_op.inp_tensors:
index = o.user_opr.index(op)
o.user_opr[index] = mul_op
mul_op.out_tensors = [mul_out_tensor]
add_op = AddOpr()
add_op.inp_tensors = [mul_out_tensor, op.inp_tensors[2]]
mul_out_tensor.user_opr.append(add_op)
add_op.out_tensors = op.out_tensors
index = net._opr_ids.index(id(op))
net.delete_ops(index)
net.add_op(mul_op, index)
net.add_op(add_op, index + 1)
@_register_tranformation_rule(TransformerRule.REPLACE_FLATTEN_TO_RESHAPE)
def _replace_flatten_to_reshape(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, FlattenOpr):
out_shape = tuple(list(opr.inp_tensors[0].shape[: opr.start_axis]) + [-1])
reshape_op = ReshapeOpr(out_shape=out_shape)
reshape_op.inp_tensors = opr.inp_tensors
for t in reshape_op.inp_tensors:
idx = t.user_opr.index(opr)
t.user_opr[idx] = reshape_op
reshape_op.out_tensors = opr.out_tensors
for t in reshape_op.out_tensors:
t.owner_opr = reshape_op
net.replace_op(opr, reshape_op)
@_register_tranformation_rule(TransformerRule.REMOVE_RESHAPE_REALTED_OP)
def _remove_reshape_tensors(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, ReshapeOpr) and len(opr.inp_tensors) > 1:
opr.inp_tensors = opr.inp_tensors[:1]
@_register_tranformation_rule(TransformerRule.REMOVE_DROPOUT)
def _remove_dropout(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, DropoutOpr) and owner_opr.drop_prob == 0:
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
@_register_tranformation_rule(TransformerRule.REMOVE_RELU)
def _remove_relu(net: IRGraph):
for opr in net.all_oprs:
for idx, inp in enumerate(opr.inp_tensors):
owner_opr = inp.owner_opr
if isinstance(owner_opr, ReluOpr):
opr.inp_tensors[idx] = owner_opr.inp_tensors[0]
for idx, out in enumerate(net.graph_outputs):
owner_opr = out.owner_opr
if isinstance(owner_opr, ReluOpr):
net.graph_outputs[idx] = owner_opr.inp_tensors[0]
visited_tensor = set() # type: set
def _dfs_recursive(op_set, tensor):
owner_opr = tensor.owner_opr
op_set.add(owner_opr)
if tensor in visited_tensor:
return
visited_tensor.add(tensor)
if isinstance(owner_opr, IRGraph) or owner_opr is None:
return
for tt in owner_opr.inp_tensors:
_dfs_recursive(op_set, tt)
@_register_tranformation_rule(TransformerRule.REMOVE_UNRELATED_IROP)
def _remove_unrelated_op(net: IRGraph):
match_sets = set() # type: Set[OpBase]
for out_tensor in net.graph_outputs:
_dfs_recursive(match_sets, out_tensor)
remove_idx = []
for opr in net.all_oprs:
if opr not in match_sets:
index = net._opr_ids.index(id(opr))
remove_idx.append(index)
for i in remove_idx[::-1]:
net.delete_ops(i)
@_register_tranformation_rule(TransformerRule.ADD_FAKE_HSIGMOID_OUT)
def _add_fake_hsigmoid_tensor(net: IRGraph):
for opr in net.all_oprs:
if isinstance(opr, (HardSwishOpr, HardSigmoidOpr)):
add_3_out_tensor = IRTensor(
opr.out_tensors[0].name + "_fake_add3_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(add_3_out_tensor)
relu6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_relu6_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(relu6_out_tensor)
if isinstance(opr, HardSwishOpr):
div6_out_tensor = IRTensor(
opr.out_tensors[0].name + "_div_out",
opr.inp_tensors[0].shape,
opr.inp_tensors[0].dtype,
q_type=opr.inp_tensors[0].q_dtype,
scale=opr.inp_tensors[0].scale,
zero_point=opr.inp_tensors[0].zero_point,
)
opr.add_inp_tensors(div6_out_tensor)
def fold_conv_bn(
conv_weight, conv_bias, conv_groups, gamma, beta, bn_mean, bn_var, eps
):
conv_bias = conv_bias.reshape(1, -1, 1, 1)
gamma = gamma.reshape(1, -1, 1, 1)
beta = beta.reshape(1, -1, 1, 1)
bn_mean = bn_mean.reshape(1, -1, 1, 1)
bn_var = bn_var.reshape(1, -1, 1, 1)
# bn_istd = 1 / bn_std
bn_istd = 1.0 / sqrt(bn_var + eps) # type: ignore[attr-defined]
# w_fold = gamma / bn_std * W
scale_factor = gamma * bn_istd
if conv_groups == 1:
w_fold = conv_weight * scale_factor.reshape(-1, 1, 1, 1)
else:
w_fold = conv_weight * scale_factor.reshape(conv_groups, -1, 1, 1, 1)
# b_fold = gamma * (b - bn_mean) / bn_std + beta
b_fold = beta + gamma * (conv_bias - bn_mean) * bn_istd
return w_fold, b_fold
@_register_tranformation_rule(TransformerRule.FUSE_CONV_BN)
def _fuse_conv_bn(net: IRGraph):
for opr in net.all_oprs:
if (
opr.name == "BatchNormalization"
and len(net.find_inp_oprs(opr)) == 1
and net.find_inp_oprs(opr)[0].name == "Conv2d"
and len(net.find_out_oprs(net.find_inp_oprs(opr)[0])) == 1
and net.find_out_oprs(net.find_inp_oprs(opr)[0])[0] == opr
):
gamma = (
Tensor(opr.weight) # type: ignore[attr-defined]
if opr.weight is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[1].np_data)
)
beta = (
Tensor(opr.bias) # type: ignore[attr-defined]
if opr.bias is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[2].np_data)
)
bn_mean = (
Tensor(opr.mean) # type: ignore[attr-defined]
if opr.mean is not None # type: ignore[attr-defined]
else Tensor(opr.inp_tensors[3].np_data)
)
bn_var = (
Tensor(opr.var) # type: ignore[attr-defined]
if opr.var is not None # type: ignore[attr-defined]
else | Tensor(opr.inp_tensors[4].np_data) | megengine.Tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = | F.ones((1, 3, 3, 3)) | megengine.functional.ones |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = | trace_module(module, x) | megengine.traced_module.trace_module |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return F.neg(x) + self.param
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
self = graph.inputs[0]
setattr(traced_module, "neg", Neg(name="neg"))
setattr(traced_module, "neg2", Neg(name="neg"))
setattr(traced_module, "param", F.zeros((1,)))
with graph.insert_exprs():
neg_out = self.neg(relu_out)
neg_out = self.neg2(relu_out)
neg_out = neg_out + self.param
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
assert traced_module.neg.graph is not None
assert traced_module.neg2.graph is not None
assert traced_module.neg2.param is not None
assert len(traced_module.neg.graph._exprs) == 13
for n in traced_module.graph.nodes():
if isinstance(n, TensorNode):
assert n.value is None
def test_insert_qat_module():
class concat(qat.Concat):
pass
traced_module, x, expect = _init_block()
graph = traced_module.graph
self = graph.inputs[0]
out = graph.outputs[0]
setattr(traced_module, "cat_0", qat.Concat())
setattr(traced_module, "cat_1", concat())
with graph.insert_exprs():
x_0 = self.cat_0([out, out])
x_1 = self.cat_1([out, x_0])
graph.replace_node({out: x_1})
graph.compile()
x = | F.copy(x) | megengine.functional.copy |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return F.neg(x) + self.param
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
self = graph.inputs[0]
setattr(traced_module, "neg", Neg(name="neg"))
setattr(traced_module, "neg2", Neg(name="neg"))
setattr(traced_module, "param", F.zeros((1,)))
with graph.insert_exprs():
neg_out = self.neg(relu_out)
neg_out = self.neg2(relu_out)
neg_out = neg_out + self.param
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
assert traced_module.neg.graph is not None
assert traced_module.neg2.graph is not None
assert traced_module.neg2.param is not None
assert len(traced_module.neg.graph._exprs) == 13
for n in traced_module.graph.nodes():
if isinstance(n, TensorNode):
assert n.value is None
def test_insert_qat_module():
class concat(qat.Concat):
pass
traced_module, x, expect = _init_block()
graph = traced_module.graph
self = graph.inputs[0]
out = graph.outputs[0]
setattr(traced_module, "cat_0", qat.Concat())
setattr(traced_module, "cat_1", concat())
with graph.insert_exprs():
x_0 = self.cat_0([out, out])
x_1 = self.cat_1([out, x_0])
graph.replace_node({out: x_1})
graph.compile()
x = F.copy(x)
np.testing.assert_allclose(
F.concat([expect, expect, expect]), traced_module(x), atol=1e-6
)
assert not hasattr(traced_module.cat_0, "graph")
assert traced_module.cat_1.graph is not None
def test_add_input_and_output():
traced_module, x, y = _init_module()
data_node = traced_module.graph.add_input_node(shape=(1, 3, 224, 224), name="data")
traced_module.graph.add_output_node(data_node)
assert data_node.name == "data"
assert traced_module.graph.inputs[-1] == data_node
assert len(traced_module.graph.inputs) == 3
assert len(traced_module.graph.outputs) == 2
y1, y2 = traced_module(x, x)
np.testing.assert_equal(y1.numpy(), y.numpy())
np.testing.assert_equal(y2.numpy(), x.numpy())
y1, y2 = traced_module(x, y)
np.testing.assert_equal(y2.numpy(), y.numpy())
traced_module.graph.reset_outputs(
({"orig_out": traced_module.graph.outputs[0]}, traced_module.graph.outputs[1])
)
out = traced_module(x, x)
assert isinstance(out, tuple)
assert isinstance(out[0], dict)
np.testing.assert_equal(out[0]["orig_out"].numpy(), y.numpy())
np.testing.assert_equal(out[1].numpy(), x.numpy())
def test_delete():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
node = relu_expr.outputs
repl_node = relu_expr.inputs
graph.replace_node({node[0]: repl_node[0]})
graph.compile()
np.testing.assert_allclose(expect - 1, F.relu(traced_module(x) - 1), atol=1e-6)
# clear graph
graph.replace_node({graph.outputs[0]: graph.inputs[1]})
graph.compile()
np.testing.assert_equal(len(list(graph._exprs)), 0)
np.testing.assert_equal(traced_module(x).numpy(), x.numpy())
def test_flatten():
traced_module, x, expect = _init_module()
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module, x, expect = _init_cls(MyModule1)
traced_module = traced_module.flatten()
_check_expr_users(traced_module)
def test_id_and_name():
def _check_id(traced_module):
_total_ids = traced_module.graph._total_ids
node_ids = [n._id for n in traced_module.graph.nodes().as_list()]
assert len(set(node_ids)) == len(node_ids)
assert max(node_ids) + 1 == _total_ids[0]
expr_ids = [n._id for n in traced_module.graph.exprs().as_list()]
assert len(set(expr_ids)) == len(expr_ids)
assert max(expr_ids) + 1 == _total_ids[1]
def _check_name(flatened_module):
node_names = [n._name for n in flatened_module.graph.nodes().as_list()]
assert len(set(node_names)) == len(node_names)
traced_module, x, expect = _init_module()
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
# pickle check
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
| Node._set_next_id(159) | megengine.traced_module.node.Node._set_next_id |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return F.neg(x) + self.param
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
self = graph.inputs[0]
setattr(traced_module, "neg", Neg(name="neg"))
setattr(traced_module, "neg2", Neg(name="neg"))
setattr(traced_module, "param", F.zeros((1,)))
with graph.insert_exprs():
neg_out = self.neg(relu_out)
neg_out = self.neg2(relu_out)
neg_out = neg_out + self.param
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
assert traced_module.neg.graph is not None
assert traced_module.neg2.graph is not None
assert traced_module.neg2.param is not None
assert len(traced_module.neg.graph._exprs) == 13
for n in traced_module.graph.nodes():
if isinstance(n, TensorNode):
assert n.value is None
def test_insert_qat_module():
class concat(qat.Concat):
pass
traced_module, x, expect = _init_block()
graph = traced_module.graph
self = graph.inputs[0]
out = graph.outputs[0]
setattr(traced_module, "cat_0", qat.Concat())
setattr(traced_module, "cat_1", concat())
with graph.insert_exprs():
x_0 = self.cat_0([out, out])
x_1 = self.cat_1([out, x_0])
graph.replace_node({out: x_1})
graph.compile()
x = F.copy(x)
np.testing.assert_allclose(
F.concat([expect, expect, expect]), traced_module(x), atol=1e-6
)
assert not hasattr(traced_module.cat_0, "graph")
assert traced_module.cat_1.graph is not None
def test_add_input_and_output():
traced_module, x, y = _init_module()
data_node = traced_module.graph.add_input_node(shape=(1, 3, 224, 224), name="data")
traced_module.graph.add_output_node(data_node)
assert data_node.name == "data"
assert traced_module.graph.inputs[-1] == data_node
assert len(traced_module.graph.inputs) == 3
assert len(traced_module.graph.outputs) == 2
y1, y2 = traced_module(x, x)
np.testing.assert_equal(y1.numpy(), y.numpy())
np.testing.assert_equal(y2.numpy(), x.numpy())
y1, y2 = traced_module(x, y)
np.testing.assert_equal(y2.numpy(), y.numpy())
traced_module.graph.reset_outputs(
({"orig_out": traced_module.graph.outputs[0]}, traced_module.graph.outputs[1])
)
out = traced_module(x, x)
assert isinstance(out, tuple)
assert isinstance(out[0], dict)
np.testing.assert_equal(out[0]["orig_out"].numpy(), y.numpy())
np.testing.assert_equal(out[1].numpy(), x.numpy())
def test_delete():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
node = relu_expr.outputs
repl_node = relu_expr.inputs
graph.replace_node({node[0]: repl_node[0]})
graph.compile()
np.testing.assert_allclose(expect - 1, F.relu(traced_module(x) - 1), atol=1e-6)
# clear graph
graph.replace_node({graph.outputs[0]: graph.inputs[1]})
graph.compile()
np.testing.assert_equal(len(list(graph._exprs)), 0)
np.testing.assert_equal(traced_module(x).numpy(), x.numpy())
def test_flatten():
traced_module, x, expect = _init_module()
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module, x, expect = _init_cls(MyModule1)
traced_module = traced_module.flatten()
_check_expr_users(traced_module)
def test_id_and_name():
def _check_id(traced_module):
_total_ids = traced_module.graph._total_ids
node_ids = [n._id for n in traced_module.graph.nodes().as_list()]
assert len(set(node_ids)) == len(node_ids)
assert max(node_ids) + 1 == _total_ids[0]
expr_ids = [n._id for n in traced_module.graph.exprs().as_list()]
assert len(set(expr_ids)) == len(expr_ids)
assert max(expr_ids) + 1 == _total_ids[1]
def _check_name(flatened_module):
node_names = [n._name for n in flatened_module.graph.nodes().as_list()]
assert len(set(node_names)) == len(node_names)
traced_module, x, expect = _init_module()
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
# pickle check
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
Node._set_next_id(159)
| Expr._set_next_id(1024) | megengine.traced_module.expr.Expr._set_next_id |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return F.neg(x) + self.param
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
self = graph.inputs[0]
setattr(traced_module, "neg", Neg(name="neg"))
setattr(traced_module, "neg2", Neg(name="neg"))
setattr(traced_module, "param", F.zeros((1,)))
with graph.insert_exprs():
neg_out = self.neg(relu_out)
neg_out = self.neg2(relu_out)
neg_out = neg_out + self.param
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
assert traced_module.neg.graph is not None
assert traced_module.neg2.graph is not None
assert traced_module.neg2.param is not None
assert len(traced_module.neg.graph._exprs) == 13
for n in traced_module.graph.nodes():
if isinstance(n, TensorNode):
assert n.value is None
def test_insert_qat_module():
class concat(qat.Concat):
pass
traced_module, x, expect = _init_block()
graph = traced_module.graph
self = graph.inputs[0]
out = graph.outputs[0]
setattr(traced_module, "cat_0", qat.Concat())
setattr(traced_module, "cat_1", concat())
with graph.insert_exprs():
x_0 = self.cat_0([out, out])
x_1 = self.cat_1([out, x_0])
graph.replace_node({out: x_1})
graph.compile()
x = F.copy(x)
np.testing.assert_allclose(
F.concat([expect, expect, expect]), traced_module(x), atol=1e-6
)
assert not hasattr(traced_module.cat_0, "graph")
assert traced_module.cat_1.graph is not None
def test_add_input_and_output():
traced_module, x, y = _init_module()
data_node = traced_module.graph.add_input_node(shape=(1, 3, 224, 224), name="data")
traced_module.graph.add_output_node(data_node)
assert data_node.name == "data"
assert traced_module.graph.inputs[-1] == data_node
assert len(traced_module.graph.inputs) == 3
assert len(traced_module.graph.outputs) == 2
y1, y2 = traced_module(x, x)
np.testing.assert_equal(y1.numpy(), y.numpy())
np.testing.assert_equal(y2.numpy(), x.numpy())
y1, y2 = traced_module(x, y)
np.testing.assert_equal(y2.numpy(), y.numpy())
traced_module.graph.reset_outputs(
({"orig_out": traced_module.graph.outputs[0]}, traced_module.graph.outputs[1])
)
out = traced_module(x, x)
assert isinstance(out, tuple)
assert isinstance(out[0], dict)
np.testing.assert_equal(out[0]["orig_out"].numpy(), y.numpy())
np.testing.assert_equal(out[1].numpy(), x.numpy())
def test_delete():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
node = relu_expr.outputs
repl_node = relu_expr.inputs
graph.replace_node({node[0]: repl_node[0]})
graph.compile()
np.testing.assert_allclose(expect - 1, F.relu(traced_module(x) - 1), atol=1e-6)
# clear graph
graph.replace_node({graph.outputs[0]: graph.inputs[1]})
graph.compile()
np.testing.assert_equal(len(list(graph._exprs)), 0)
np.testing.assert_equal(traced_module(x).numpy(), x.numpy())
def test_flatten():
traced_module, x, expect = _init_module()
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module, x, expect = _init_cls(MyModule1)
traced_module = traced_module.flatten()
_check_expr_users(traced_module)
def test_id_and_name():
def _check_id(traced_module):
_total_ids = traced_module.graph._total_ids
node_ids = [n._id for n in traced_module.graph.nodes().as_list()]
assert len(set(node_ids)) == len(node_ids)
assert max(node_ids) + 1 == _total_ids[0]
expr_ids = [n._id for n in traced_module.graph.exprs().as_list()]
assert len(set(expr_ids)) == len(expr_ids)
assert max(expr_ids) + 1 == _total_ids[1]
def _check_name(flatened_module):
node_names = [n._name for n in flatened_module.graph.nodes().as_list()]
assert len(set(node_names)) == len(node_names)
traced_module, x, expect = _init_module()
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
# pickle check
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
Node._set_next_id(159)
Expr._set_next_id(1024)
graph = traced_module.graph
for expr in graph.get_function_by_type(F.relu).as_list():
relu_out = expr.outputs[0]
cur_graph = expr.top_graph
with cur_graph.insert_exprs():
neg_out = F.neg(relu_out)
cur_graph.replace_node({relu_out: neg_out})
cur_graph.compile()
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
# check trace TracedModule
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = NewModule(traced_module)
traced_module = | trace_module(module, x) | megengine.traced_module.trace_module |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return F.neg(x) + self.param
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
self = graph.inputs[0]
setattr(traced_module, "neg", Neg(name="neg"))
setattr(traced_module, "neg2", Neg(name="neg"))
setattr(traced_module, "param", F.zeros((1,)))
with graph.insert_exprs():
neg_out = self.neg(relu_out)
neg_out = self.neg2(relu_out)
neg_out = neg_out + self.param
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
assert traced_module.neg.graph is not None
assert traced_module.neg2.graph is not None
assert traced_module.neg2.param is not None
assert len(traced_module.neg.graph._exprs) == 13
for n in traced_module.graph.nodes():
if isinstance(n, TensorNode):
assert n.value is None
def test_insert_qat_module():
class concat(qat.Concat):
pass
traced_module, x, expect = _init_block()
graph = traced_module.graph
self = graph.inputs[0]
out = graph.outputs[0]
setattr(traced_module, "cat_0", qat.Concat())
setattr(traced_module, "cat_1", concat())
with graph.insert_exprs():
x_0 = self.cat_0([out, out])
x_1 = self.cat_1([out, x_0])
graph.replace_node({out: x_1})
graph.compile()
x = F.copy(x)
np.testing.assert_allclose(
F.concat([expect, expect, expect]), traced_module(x), atol=1e-6
)
assert not hasattr(traced_module.cat_0, "graph")
assert traced_module.cat_1.graph is not None
def test_add_input_and_output():
traced_module, x, y = _init_module()
data_node = traced_module.graph.add_input_node(shape=(1, 3, 224, 224), name="data")
traced_module.graph.add_output_node(data_node)
assert data_node.name == "data"
assert traced_module.graph.inputs[-1] == data_node
assert len(traced_module.graph.inputs) == 3
assert len(traced_module.graph.outputs) == 2
y1, y2 = traced_module(x, x)
np.testing.assert_equal(y1.numpy(), y.numpy())
np.testing.assert_equal(y2.numpy(), x.numpy())
y1, y2 = traced_module(x, y)
np.testing.assert_equal(y2.numpy(), y.numpy())
traced_module.graph.reset_outputs(
({"orig_out": traced_module.graph.outputs[0]}, traced_module.graph.outputs[1])
)
out = traced_module(x, x)
assert isinstance(out, tuple)
assert isinstance(out[0], dict)
np.testing.assert_equal(out[0]["orig_out"].numpy(), y.numpy())
np.testing.assert_equal(out[1].numpy(), x.numpy())
def test_delete():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
node = relu_expr.outputs
repl_node = relu_expr.inputs
graph.replace_node({node[0]: repl_node[0]})
graph.compile()
np.testing.assert_allclose(expect - 1, F.relu(traced_module(x) - 1), atol=1e-6)
# clear graph
graph.replace_node({graph.outputs[0]: graph.inputs[1]})
graph.compile()
np.testing.assert_equal(len(list(graph._exprs)), 0)
np.testing.assert_equal(traced_module(x).numpy(), x.numpy())
def test_flatten():
traced_module, x, expect = _init_module()
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module, x, expect = _init_cls(MyModule1)
traced_module = traced_module.flatten()
_check_expr_users(traced_module)
def test_id_and_name():
def _check_id(traced_module):
_total_ids = traced_module.graph._total_ids
node_ids = [n._id for n in traced_module.graph.nodes().as_list()]
assert len(set(node_ids)) == len(node_ids)
assert max(node_ids) + 1 == _total_ids[0]
expr_ids = [n._id for n in traced_module.graph.exprs().as_list()]
assert len(set(expr_ids)) == len(expr_ids)
assert max(expr_ids) + 1 == _total_ids[1]
def _check_name(flatened_module):
node_names = [n._name for n in flatened_module.graph.nodes().as_list()]
assert len(set(node_names)) == len(node_names)
traced_module, x, expect = _init_module()
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
# pickle check
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
Node._set_next_id(159)
Expr._set_next_id(1024)
graph = traced_module.graph
for expr in graph.get_function_by_type(F.relu).as_list():
relu_out = expr.outputs[0]
cur_graph = expr.top_graph
with cur_graph.insert_exprs():
neg_out = F.neg(relu_out)
cur_graph.replace_node({relu_out: neg_out})
cur_graph.compile()
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
# check trace TracedModule
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = NewModule(traced_module)
traced_module = trace_module(module, x)
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
def test_set_node_name():
traced_module, x, expect = _init_module()
graph = traced_module.graph
output_node = graph.outputs[0]
def rename(name):
output_node.name = name
np.testing.assert_raises(AssertionError, rename, "block1_out")
rename("output")
np.testing.assert_equal(str(graph.outputs[0]), "output")
def test_set_graph_name():
traced_module, x, expect = _init_module()
graph = traced_module.graph
output_node = graph.outputs[0]
node_name = output_node.name
graph.name = "Top"
node = graph.get_node_by_name("{}_{}".format("Top", node_name)).as_unique()
assert node is output_node
def test_extra_block():
class PostProcess(M.Module):
def forward(self, x):
return x * 2
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.post_process = PostProcess()
self.traced_module = traced_module
def forward(self, x):
x = self.traced_module(x)
x = self.post_process(x)
return x
traced_module, x, expect = _init_block()
module = Net(traced_module)
np.testing.assert_allclose(2 * expect, module(x), atol=1e-6)
traced_module = | trace_module(module, x) | megengine.traced_module.trace_module |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = | M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False) | megengine.module.Conv2d |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = | M.BatchNorm2d(channels) | megengine.module.BatchNorm2d |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = | F.concat([a, a]) | megengine.functional.concat |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = | F.neg(relu_out) | megengine.functional.neg |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return F.neg(x) + self.param
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
self = graph.inputs[0]
setattr(traced_module, "neg", Neg(name="neg"))
setattr(traced_module, "neg2", Neg(name="neg"))
setattr(traced_module, "param", | F.zeros((1,)) | megengine.functional.zeros |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return F.neg(x) + self.param
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
self = graph.inputs[0]
setattr(traced_module, "neg", Neg(name="neg"))
setattr(traced_module, "neg2", Neg(name="neg"))
setattr(traced_module, "param", F.zeros((1,)))
with graph.insert_exprs():
neg_out = self.neg(relu_out)
neg_out = self.neg2(relu_out)
neg_out = neg_out + self.param
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
assert traced_module.neg.graph is not None
assert traced_module.neg2.graph is not None
assert traced_module.neg2.param is not None
assert len(traced_module.neg.graph._exprs) == 13
for n in traced_module.graph.nodes():
if isinstance(n, TensorNode):
assert n.value is None
def test_insert_qat_module():
class concat(qat.Concat):
pass
traced_module, x, expect = _init_block()
graph = traced_module.graph
self = graph.inputs[0]
out = graph.outputs[0]
setattr(traced_module, "cat_0", | qat.Concat() | megengine.module.qat.Concat |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return F.neg(x) + self.param
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
self = graph.inputs[0]
setattr(traced_module, "neg", Neg(name="neg"))
setattr(traced_module, "neg2", Neg(name="neg"))
setattr(traced_module, "param", F.zeros((1,)))
with graph.insert_exprs():
neg_out = self.neg(relu_out)
neg_out = self.neg2(relu_out)
neg_out = neg_out + self.param
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
assert traced_module.neg.graph is not None
assert traced_module.neg2.graph is not None
assert traced_module.neg2.param is not None
assert len(traced_module.neg.graph._exprs) == 13
for n in traced_module.graph.nodes():
if isinstance(n, TensorNode):
assert n.value is None
def test_insert_qat_module():
class concat(qat.Concat):
pass
traced_module, x, expect = _init_block()
graph = traced_module.graph
self = graph.inputs[0]
out = graph.outputs[0]
setattr(traced_module, "cat_0", qat.Concat())
setattr(traced_module, "cat_1", concat())
with graph.insert_exprs():
x_0 = self.cat_0([out, out])
x_1 = self.cat_1([out, x_0])
graph.replace_node({out: x_1})
graph.compile()
x = F.copy(x)
np.testing.assert_allclose(
| F.concat([expect, expect, expect]) | megengine.functional.concat |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = | F.relu(x) | megengine.functional.relu |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = | M.Identity() | megengine.module.Identity |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = | F.zeros((1,)) | megengine.functional.zeros |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return F.neg(x) + self.param
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
self = graph.inputs[0]
setattr(traced_module, "neg", Neg(name="neg"))
setattr(traced_module, "neg2", Neg(name="neg"))
setattr(traced_module, "param", F.zeros((1,)))
with graph.insert_exprs():
neg_out = self.neg(relu_out)
neg_out = self.neg2(relu_out)
neg_out = neg_out + self.param
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
assert traced_module.neg.graph is not None
assert traced_module.neg2.graph is not None
assert traced_module.neg2.param is not None
assert len(traced_module.neg.graph._exprs) == 13
for n in traced_module.graph.nodes():
if isinstance(n, TensorNode):
assert n.value is None
def test_insert_qat_module():
class concat(qat.Concat):
pass
traced_module, x, expect = _init_block()
graph = traced_module.graph
self = graph.inputs[0]
out = graph.outputs[0]
setattr(traced_module, "cat_0", qat.Concat())
setattr(traced_module, "cat_1", concat())
with graph.insert_exprs():
x_0 = self.cat_0([out, out])
x_1 = self.cat_1([out, x_0])
graph.replace_node({out: x_1})
graph.compile()
x = F.copy(x)
np.testing.assert_allclose(
F.concat([expect, expect, expect]), traced_module(x), atol=1e-6
)
assert not hasattr(traced_module.cat_0, "graph")
assert traced_module.cat_1.graph is not None
def test_add_input_and_output():
traced_module, x, y = _init_module()
data_node = traced_module.graph.add_input_node(shape=(1, 3, 224, 224), name="data")
traced_module.graph.add_output_node(data_node)
assert data_node.name == "data"
assert traced_module.graph.inputs[-1] == data_node
assert len(traced_module.graph.inputs) == 3
assert len(traced_module.graph.outputs) == 2
y1, y2 = traced_module(x, x)
np.testing.assert_equal(y1.numpy(), y.numpy())
np.testing.assert_equal(y2.numpy(), x.numpy())
y1, y2 = traced_module(x, y)
np.testing.assert_equal(y2.numpy(), y.numpy())
traced_module.graph.reset_outputs(
({"orig_out": traced_module.graph.outputs[0]}, traced_module.graph.outputs[1])
)
out = traced_module(x, x)
assert isinstance(out, tuple)
assert isinstance(out[0], dict)
np.testing.assert_equal(out[0]["orig_out"].numpy(), y.numpy())
np.testing.assert_equal(out[1].numpy(), x.numpy())
def test_delete():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
node = relu_expr.outputs
repl_node = relu_expr.inputs
graph.replace_node({node[0]: repl_node[0]})
graph.compile()
np.testing.assert_allclose(expect - 1, F.relu(traced_module(x) - 1), atol=1e-6)
# clear graph
graph.replace_node({graph.outputs[0]: graph.inputs[1]})
graph.compile()
np.testing.assert_equal(len(list(graph._exprs)), 0)
np.testing.assert_equal(traced_module(x).numpy(), x.numpy())
def test_flatten():
traced_module, x, expect = _init_module()
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module = traced_module.flatten()
assert len(traced_module.graph._exprs) == 12
np.testing.assert_equal(expect.numpy(), traced_module(x).numpy())
traced_module, x, expect = _init_cls(MyModule1)
traced_module = traced_module.flatten()
_check_expr_users(traced_module)
def test_id_and_name():
def _check_id(traced_module):
_total_ids = traced_module.graph._total_ids
node_ids = [n._id for n in traced_module.graph.nodes().as_list()]
assert len(set(node_ids)) == len(node_ids)
assert max(node_ids) + 1 == _total_ids[0]
expr_ids = [n._id for n in traced_module.graph.exprs().as_list()]
assert len(set(expr_ids)) == len(expr_ids)
assert max(expr_ids) + 1 == _total_ids[1]
def _check_name(flatened_module):
node_names = [n._name for n in flatened_module.graph.nodes().as_list()]
assert len(set(node_names)) == len(node_names)
traced_module, x, expect = _init_module()
_check_id(traced_module)
flattened_module = traced_module.flatten()
_check_id(flattened_module)
_check_name(flattened_module)
# pickle check
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
Node._set_next_id(159)
Expr._set_next_id(1024)
graph = traced_module.graph
for expr in graph.get_function_by_type(F.relu).as_list():
relu_out = expr.outputs[0]
cur_graph = expr.top_graph
with cur_graph.insert_exprs():
neg_out = | F.neg(relu_out) | megengine.functional.neg |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [ | M.Identity() | megengine.module.Identity |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), | M.Identity() | megengine.module.Identity |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": | M.Identity() | megengine.module.Identity |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": | M.Identity() | megengine.module.Identity |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import pickle
from collections import defaultdict
from itertools import chain
import numpy as np
import megengine.functional as F
import megengine.module as M
import megengine.module.qat as qat
from megengine.module.identity import Identity
from megengine.traced_module import trace_module
from megengine.traced_module.expr import CallFunction, CallMethod, Expr, GetAttr, Input
from megengine.traced_module.node import ModuleNode, Node, TensorNode
class IdentityMod(M.Module):
def forward(self, x):
return x
class MyBlock(M.Module):
def __init__(self, in_channels=3, channels=3):
super(MyBlock, self).__init__()
self.conv1 = M.Conv2d(in_channels, channels, 3, 1, padding=1, bias=False)
self.bn1 = M.BatchNorm2d(channels)
self.nothing = IdentityMod()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x) + 1
x = self.nothing(x)
return x
class MyModule(M.Module):
def __init__(self):
super(MyModule, self).__init__()
self.block0 = MyBlock()
self.block1 = MyBlock()
self.nothing = IdentityMod()
def forward(self, x):
x = self.block0(x)
x = self.block1(x)
x = self.nothing(x)
return x
class MyBlock1(M.Module):
def forward(self, a):
y = F.concat([a, a])
return a, y
class MyModule1(M.Module):
def __init__(self):
super().__init__()
self.block0 = MyBlock1()
self.block1 = MyBlock1()
def forward(self, a):
a, y1 = self.block0(a)
a = a + 1
a, y2 = self.block1(a)
return a, y1 + y2
class NewModule(M.Module):
def __init__(self, traced_module):
super(NewModule, self).__init__()
self.module = traced_module
def forward(self, x):
x = x - 1
x = self.module(x)
x = x + 1
return x
def _check_expr_users(traced_module):
node_user = defaultdict(list)
for expr in traced_module.graph._exprs:
for node in expr.inputs:
node_user[node].append(expr)
for node in traced_module.graph.nodes():
node.users.sort(key=lambda m: m._id)
node_user[node].sort(key=lambda m: m._id)
assert node.users == node_user[node]
def _init_cls(cls):
module = cls()
x = F.ones((1, 3, 3, 3))
y = module(x)
traced_module = trace_module(module, x)
return traced_module, x, y
def _init_block():
return _init_cls(MyBlock)
def _init_module():
return _init_cls(MyModule)
def test_search():
traced_module, *_ = _init_block()
graph = traced_module.graph
relu_expr = graph.get_function_by_type(F.relu).as_unique()
assert isinstance(relu_expr, CallFunction) and relu_expr.func == F.relu
conv_node = graph.get_module_by_type(M.Conv2d).as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
add_expr = graph.get_method_by_type("__add__").as_unique()
assert isinstance(add_expr, CallMethod) and add_expr.method == "__add__"
conv_node = graph.get_node_by_name("MyBlock_conv1").as_unique()
assert isinstance(conv_node, ModuleNode) and conv_node.module_type == M.Conv2d
def test_producer_and_users():
traced_module, *_ = _init_module()
def _check(exprs):
for expr in exprs:
for n in chain(expr.inputs, expr.outputs):
if not isinstance(n.expr, Input):
assert n.expr in exprs
for e in n.users:
assert e in exprs
assert n in e.inputs
for mod in traced_module.modules():
if not hasattr(mod, "argdef_graph_map"):
continue
for g in mod.argdef_graph_map.values():
_check(g._exprs)
def test_insert():
traced_module, x, expect = _init_block()
graph = traced_module.graph
relu_out = graph.get_function_by_type(F.relu).as_unique().outputs[0]
with graph.insert_exprs():
neg_out = F.neg(relu_out)
graph.replace_node({relu_out: neg_out})
graph.compile()
np.testing.assert_allclose(expect - 1, 1 - traced_module(x), atol=1e-6)
def test_insert_module():
class Neg(M.Module):
def __init__(self, name):
super().__init__(name)
self.identity = M.Identity()
self.identity_list = [M.Identity(), M.Identity()]
self.identity_dict = {"0": M.Identity(), "1": M.Identity()}
self.param = F.zeros((1,))
def forward(self, x):
x = self.identity(x)
for m in self.identity_dict:
x = self.identity_dict[m](x)
for m in self.identity_list:
x = m(x)
return | F.neg(x) | megengine.functional.neg |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = | Elemwise(mode) | megengine.core.ops.builtin.Elemwise |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = | dist.Server(port) | megengine.distributed.Server |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = | TensorWrapper(x_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = | TensorWrapper([0.0]) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = | TensorWrapper(x_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = | TensorWrapper(x_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = | TensorWrapper(y_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = | TensorWrapper(dz_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = | TensorWrapper(x_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = | TensorWrapper(dz_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = | Elemwise(Elemwise.Mode.RELU) | megengine.core.ops.builtin.Elemwise |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = | TensorAttr() | megengine.core._imperative_rt.TensorAttr |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = | imperative.make_backward_graph(op, [attr], [True], [True]) | megengine.core._imperative_rt.imperative.make_backward_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = | TensorWrapper(x_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = | TensorWrapper(x_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = | TensorWrapper(x_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = | TensorWrapper(x_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze(F.expand_dims(x, 2), 0)
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 1, 1, 1, 1]], dtype=np.float32), x.grad.numpy()
)
def test_Broadcast():
x_np = np.random.rand(3, 3, 1).astype("float32")
x = | TensorWrapper(x_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze(F.expand_dims(x, 2), 0)
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 1, 1, 1, 1]], dtype=np.float32), x.grad.numpy()
)
def test_Broadcast():
x_np = np.random.rand(3, 3, 1).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = | F.broadcast_to(x, (3, 3, 10)) | megengine.functional.broadcast_to |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze(F.expand_dims(x, 2), 0)
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 1, 1, 1, 1]], dtype=np.float32), x.grad.numpy()
)
def test_Broadcast():
x_np = np.random.rand(3, 3, 1).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.broadcast_to(x, (3, 3, 10))
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((3, 3, 1), dtype=np.float32) * 10, x.grad.numpy())
def test_Reduce_sum():
x_np = np.random.rand(3, 3).astype("float32")
x = | TensorWrapper(x_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = remote_send(recv_x, 0)
grad([], [])
# sync because grad has a send operator
sync()
send_x.device._cn._sync_all()
import multiprocessing as mp
p0 = mp.Process(target=worker0)
p1 = mp.Process(target=worker1)
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
def test_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np))
def test_grad_2():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, as_tensor(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
@pytest.mark.skip(reason="high order gradient was not implemented yet")
def test_2nd_grad():
x_np = np.random.rand(10).astype("float32")
x = as_tensor(x_np)
ones = as_tensor(np.ones_like(x_np))
grad = Grad().wrt(x, callback=save_to(x))
grad2 = Grad().wrt(x, callback=save_to(x))
y = cos(x)
grad(y, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.sin(x_np), decimal=5)
grad2(x.grad, ones)
np.testing.assert_almost_equal(x.grad.numpy(), -np.cos(x_np))
def test_grad_with_tensor_wrapper():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y = mul(y, y)
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_release():
def check(f):
n = 0
d = None
gc.disable()
try:
for i in range(3):
f()
m = len(gc.get_objects())
d = m - n
n = m
assert d == 0
finally:
gc.enable()
x = TensorWrapper([0.0])
dy = TensorWrapper(np.ones_like(x.numpy()))
@check
def _():
g = Grad().wrt(x)
y = x * x
g(y, dy)
@check
def _():
with Grad().wrt(x) as g:
pass
@check
def _():
with Grad().wrt(x) as g:
y = x * x
def test_grad_inplace():
x_np = np.random.rand(10).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = mul(x, x)
y *= y
grad(y, TensorWrapper(np.ones_like(x_np)))
np.testing.assert_almost_equal(x.grad.numpy(), 4 * x_np ** 3, decimal=6)
def test_elemwise_add():
x_np = np.random.rand(10).astype("float32")
y_np = np.random.rand(10, 10).astype("float32")
dz_np = np.random.rand(10, 10).astype("float32")
x = TensorWrapper(x_np)
y = TensorWrapper(y_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x, y):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
refs["y"] = weakref.ref(y.__wrapped__)
return x + y
grad = Grad().wrt(x, callback=save_to(x))
z = f(x, y)
del y
for k, r in refs.items():
assert r() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), dz_np.sum(0) * 2, decimal=5)
def test_elemwise_relu():
x_np = [1.0, -1.0]
dz_np = [1.0]
x = TensorWrapper(x_np)
dz = TensorWrapper(dz_np)
refs = {}
def f(x):
x = x * 2
refs["x"] = weakref.ref(x.__wrapped__)
return relu(x)
grad = Grad().wrt(x, callback=save_to(x))
z = f(x)
assert refs["x"]() is None
grad(z, dz)
np.testing.assert_almost_equal(x.grad.numpy(), [2.0, 0])
def test_elemwise_relu_backward_fn():
op = Elemwise(Elemwise.Mode.RELU)
attr = TensorAttr()
attr.dtype = "float32"
attr.comp_node = "xpux"
result = imperative.make_backward_graph(op, [attr], [True], [True])
backward_graph, save_for_backward_mask, input_has_grad = result
assert save_for_backward_mask == [False, True, True], save_for_backward_mask
def test_reshape():
x_np = np.random.rand(2, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.reshape(5, 2)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((2, 5), dtype=np.float32), x.grad.numpy())
def test_subtensor():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[1:-1, :2]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32), x.grad.numpy()
)
def test_IndexingMultiAxisVec():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x[[0, 2], [0, 2]]
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]], dtype=np.float32), x.grad.numpy()
)
def test_AxisAddRemove():
x_np = np.random.rand(1, 5).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.squeeze(F.expand_dims(x, 2), 0)
grad(y, F.ones_like(y))
np.testing.assert_equal(
np.array([[1, 1, 1, 1, 1]], dtype=np.float32), x.grad.numpy()
)
def test_Broadcast():
x_np = np.random.rand(3, 3, 1).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = F.broadcast_to(x, (3, 3, 10))
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((3, 3, 1), dtype=np.float32) * 10, x.grad.numpy())
def test_Reduce_sum():
x_np = np.random.rand(3, 3).astype("float32")
x = TensorWrapper(x_np)
grad = Grad().wrt(x, callback=save_to(x))
y = x.sum(axis=0)
grad(y, F.ones_like(y))
np.testing.assert_equal(np.ones((3, 3), dtype=np.float32), x.grad.numpy())
def test_Reduce_mean():
x_np = np.random.rand(3, 3).astype("float32")
x = | TensorWrapper(x_np) | megengine.core.tensor.tensor_wrapper.TensorWrapper |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = | apply(op, *args) | megengine.core.tensor.tensor.apply |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = | dist.get_free_ports(1) | megengine.distributed.get_free_ports |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
| dist.init_process_group("localhost", port, world_size, 0, 0) | megengine.distributed.init_process_group |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
| mge.device.set_default_device("gpu0") | megengine.device.set_default_device |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = | remote_send(x, 1) | megengine.functional.distributed.remote_send |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = | remote_recv(1, x_np.shape, x_np.dtype, "gpu0") | megengine.functional.distributed.remote_recv |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
| dist.init_process_group("localhost", port, world_size, 1, 1) | megengine.distributed.init_process_group |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
| mge.device.set_default_device("gpu1") | megengine.device.set_default_device |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = | Grad() | megengine.core.autodiff.grad.Grad |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = | remote_recv(0, x_np.shape, x_np.dtype, "gpu1") | megengine.functional.distributed.remote_recv |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import gc
import platform
import weakref
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.core._imperative_rt import TensorAttr, imperative
from megengine.core._imperative_rt.imperative import sync
from megengine.core.autodiff.grad import Grad
from megengine.core.ops.builtin import Elemwise
from megengine.core.tensor.raw_tensor import as_raw_tensor
from megengine.core.tensor.tensor import Tensor, apply
from megengine.core.tensor.tensor_wrapper import TensorWrapper
from megengine.functional.distributed import remote_recv, remote_send
def _elwise(mode):
op = Elemwise(mode)
def f(*args):
(result,) = apply(op, *args)
return result
return f
add = _elwise(Elemwise.Mode.ADD)
mul = _elwise(Elemwise.Mode.MUL)
cos = _elwise(Elemwise.Mode.COS)
relu = _elwise(Elemwise.Mode.RELU)
def as_tensor(x):
return Tensor(as_raw_tensor(x, device=mge.device.get_default_device()))
def save_to(self, name="grad"):
def callback(tensor, grad):
setattr(self, name, grad)
return callback
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dist_grad():
world_size = 2
x_np = np.random.rand(10).astype("float32")
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker0():
dist.init_process_group("localhost", port, world_size, 0, 0)
mge.device.set_default_device("gpu0")
grad = Grad()
x = as_tensor(x_np)
grad.wrt(x, callback=save_to(x))
# need a placeholder to trace operator
send_x = remote_send(x, 1)
recv_x = remote_recv(1, x_np.shape, x_np.dtype, "gpu0")
y = recv_x * recv_x
grad([y], [as_tensor(np.ones_like(x_np))])
np.testing.assert_almost_equal(x.grad.numpy(), x.numpy() * 2)
def worker1():
dist.init_process_group("localhost", port, world_size, 1, 1)
mge.device.set_default_device("gpu1")
grad = Grad()
recv_x = remote_recv(0, x_np.shape, x_np.dtype, "gpu1")
send_x = | remote_send(recv_x, 0) | megengine.functional.distributed.remote_send |
Subsets and Splits